text
stringlengths 11
4.05M
|
|---|
package router
import (
"fmt"
"net/http"
"strings"
"github.com/pkg/errors"
"github.com/shharn/blog/logger"
)
var (
mapStatusCodeToMessage = map[int]string{
http.StatusBadRequest: "Bad Request",
http.StatusUnauthorized: "Invalid authentication data",
http.StatusForbidden: "Not allowed to do it",
http.StatusNotFound: "Not Found",
http.StatusMethodNotAllowed: "Not Allowed Method",
http.StatusRequestTimeout: "Request Timeout",
http.StatusConflict: "Conflict",
http.StatusInternalServerError: "The Server is temporaliy unavailable. Try later",
}
)
// Handler processes the client's request and return something
// The argument will be the unmarshalled body & route parameter values & values in Query String
// the object of return value({}interface) will be processed by marshaler
type Handler func(http.ResponseWriter, *http.Request, Params) (interface{}, ErrorResponse)
type corsContext struct {
AllowedOrigins []string
AllowedMethods string
AllowedHeaders string
}
type routerContext struct {
Pattern string
Handler Handler
}
// Router is router - self-explanatory
type Router struct {
RegisteredMethods []string
CORSContext *corsContext
Dispatchers map[string][]routerContext
Filters []Filter
Marshaler Marshaler
}
// SetCORS sets the CORS Enabled mode flag
func (r *Router) SetCORS() *Router {
r.ensureCORSContext()
browserAgents := []string{"Firefox", "Seamonkey", "Chrome", "Chromium", "Safari", "OPR", "Opera", "MSIE"}
r.RegisteredMethods = append(r.RegisteredMethods, "OPTIONS")
ctxs := []routerContext{}
ctxs = append(ctxs, routerContext{
Pattern: "*",
Handler: func(w http.ResponseWriter, rq *http.Request, params Params) (interface{}, ErrorResponse) {
return nil, EmptyErrorResponse
},
})
(*r).Dispatchers["OPTIONS"] = ctxs
r.Use(corsFilter{
CORSContext: r.CORSContext,
Exceptions: []FilterExceptionJudge{
0: func(w http.ResponseWriter, r *http.Request) bool {
path := r.URL.Path
return path == "/"
},
1: func(w http.ResponseWriter, r *http.Request) bool {
ua := r.Header.Get("User-Agent")
for _, rua := range browserAgents {
if strings.Contains(ua, rua) {
return false
}
}
return true
},
},
})
return r
}
func (r *Router) ensureCORSContext() {
if r.CORSContext == nil {
r.CORSContext = &corsContext{}
}
}
// SetAllowedOrigin sets the allowed origins
func (r *Router) SetAllowedOrigin(origins []string) *Router {
r.ensureCORSContext()
r.CORSContext.AllowedOrigins = origins
return r
}
// SetAllowedMethod sets the allowed methods
func (r *Router) SetAllowedMethod(methods string) *Router {
r.ensureCORSContext()
r.CORSContext.AllowedMethods = methods
return r
}
// SetAllowedHeaders sets the allowed headers
func (r *Router) SetAllowedHeaders(headers string) *Router {
r.ensureCORSContext()
r.CORSContext.AllowedHeaders = headers
return r
}
// Use registers middleware
func (r *Router) Use(filter Filter) {
if r.Filters == nil {
r.Filters = []Filter{}
}
r.Filters = append(r.Filters, filter)
}
// Get registers the handler, filters, post-filters for the path on "GET" method
func (r *Router) Get(path string, handler Handler) {
r.add("GET", path, handler)
}
// Post registers the handler, filters, post-filters for the path on "POST" method
func (r *Router) Post(path string, handler Handler) {
r.add("POST", path, handler)
}
// Patch registers the handler, filters, post-filters for the path on "PATCH" method
func (r *Router) Patch(path string, handler Handler) {
r.add("PATCH", path, handler)
}
// Put registers the handler, filters, post-filters for the path on "PUT" method
func (r *Router) Put(path string, handler Handler) {
r.add("PUT", path, handler)
}
// Delete registers the handler, filters, post-filters for the path on "DELETE" method
func (r *Router) Delete(path string, handler Handler) {
r.add("DELETE", path, handler)
}
func (r *Router) add(method, path string, handler Handler) {
r.RegisteredMethods = append(r.RegisteredMethods, method)
newCtx := routerContext{
Pattern: path,
Handler: handler,
}
ctxs, exist := (*r).Dispatchers[method]
if !exist {
ctxs = []routerContext{}
}
ctxs = append(ctxs, newCtx)
(*r).Dispatchers[method] = ctxs
}
// ServerHTTP is the http.Handler interface method
func (r *Router) ServeHTTP(w http.ResponseWriter, rq *http.Request) {
defer func() {
if rcv := recover(); rcv != nil {
if err, ok := rcv.(error); ok {
wrapped := errors.Wrap(err, "Unexpected error")
logger.Error(wrapped)
} else {
err = errors.New(fmt.Sprintf("%v", rcv))
logger.Error(err)
}
w.WriteHeader(http.StatusInternalServerError)
}
}()
ok, errResponse := r.consume(w, rq);
if !ok {
var err error
if errResponse.InnerError != nil {
err = errors.Wrap(errResponse.InnerError, errResponse.Message)
} else {
err = errors.New(errResponse.Message)
}
logger.Error(err)
bytes, _ := r.Marshaler.Marshal(errResponse)
ct := r.Marshaler.ContentType()
w.Header().Set("Content-Type", ct)
w.WriteHeader(errResponse.Code)
w.Write(bytes)
}
}
func (r *Router) consume(w http.ResponseWriter, rq *http.Request) (bool, ErrorResponse) {
if exists := contains(r.RegisteredMethods, rq.Method); !exists {
return false, NewErrorResponse(http.StatusMethodNotAllowed, mapStatusCodeToMessage[http.StatusMethodNotAllowed])
}
for _, filter := range r.Filters {
if ok, errorResponse := filter.Filter(w, rq); !ok {
return false, errorResponse
}
}
ctxs, ok := (*r).Dispatchers[rq.Method]
if ok != true {
return false, NewErrorResponse(http.StatusNotFound, mapStatusCodeToMessage[http.StatusNotFound])
}
path := rq.URL.EscapedPath()
ctx, found := findContextFromPath(ctxs, path)
if !found {
return false, NewErrorResponse(http.StatusNotFound, mapStatusCodeToMessage[http.StatusNotFound])
}
params := parseURL(ctx.Pattern, path)
result, errResponse := ctx.Handler(w, rq, params)
if errResponse != EmptyErrorResponse {
return false, errResponse
}
bytes, err := r.Marshaler.Marshal(result)
if err != nil {
return false, NewErrorResponseWithError(http.StatusInternalServerError, mapStatusCodeToMessage[http.StatusInternalServerError], err)
}
ct := r.Marshaler.ContentType()
w.Header().Set("Content-Type", ct)
w.WriteHeader(http.StatusOK)
if bytes != nil {
w.Write(bytes)
}
return true, EmptyErrorResponse
}
func contains(list []string, value string) bool {
for _, element := range list {
if element == value {
return true
}
}
return false
}
func findContextFromPath(ctxs []routerContext, path string) (routerContext, bool) {
wildCardIdx := -1
for idx, ctx := range ctxs {
if ctx.Pattern == "*" {
wildCardIdx = idx
break;
}
if MatchPathToPattern(ctx.Pattern, path) {
return ctx, true
}
}
if wildCardIdx != -1 {
return ctxs[wildCardIdx], true
}
return routerContext{}, false
}
func MatchPathToPattern(pattern, path string) bool {
splittedPattern, splittedPath := strings.Split(pattern, "/"), strings.Split(path, "/")
if len(splittedPattern) != len(splittedPath) {
return false
}
for idx, patternSlice := range splittedPattern {
if len(patternSlice) < 1 && len(splittedPath[idx]) > 0 {
return false
}
if len(patternSlice) > 0 && patternSlice[0] != ':' && patternSlice != splittedPath[idx] {
return false
}
}
return true
}
// NewRouter creates a new router
func NewRouter(marshaler Marshaler) *Router {
return &Router{
RegisteredMethods: []string{},
Dispatchers: map[string][]routerContext{},
Filters: []Filter{},
Marshaler: marshaler,
}
}
|
package main
import (
"testing"
)
func TestScore(t *testing.T) {
var absTests = []struct {
candidate Candidate
query string
wanted float32
}{
{candidate: NewCandidate("a"), query: "", wanted: 1.0},
{candidate: NewCandidate("a"), query: "aa", wanted: 0.0},
{candidate: NewCandidate("abcx"), query: "abcd", wanted: 0.0},
{candidate: NewCandidate("axbxcx"), query: "aac", wanted: 0.0},
}
for _, test := range absTests {
s := Score(&test.candidate, test.query)
if s != test.wanted {
t.Errorf("Score(%q, %q) = %f, wanted %f",
test.candidate,
test.query,
s,
test.wanted)
}
}
var greaterTests = []struct {
candidate Candidate
query string
lower float32
}{
{candidate: NewCandidate("abcx"), query: "abc", lower: 0.0},
{candidate: NewCandidate("abcx"), query: "abc", lower: 0.0},
{candidate: NewCandidate("aaa/bbb/File"), query: "abf", lower: 0.0},
{candidate: NewCandidate("aaa/bbb/file"), query: "abF", lower: 0.0},
}
for _, test := range greaterTests {
s := Score(&test.candidate, test.query)
if s <= test.lower {
t.Errorf("Score(%q, %q) = %f, wanted > %f",
test.candidate,
test.query,
s,
test.lower)
}
}
var comparingTests = []struct {
candidate1 Candidate
query1 string
candidate2 Candidate
query2 string
}{
{NewCandidate("yxxxabxc"), "abc", NewCandidate("axxxybxc"), "abc"},
{NewCandidate("xabc"), "abc", NewCandidate("long string abc"), "abc"},
}
for _, test := range comparingTests {
s1 := Score(&test.candidate1, test.query1)
s2 := Score(&test.candidate2, test.query2)
if s1 <= s2 {
t.Errorf("Score(%q, %q) <= Score(%q, %q), wanted >",
test.candidate1,
test.query1,
test.candidate2,
test.query2)
}
}
var equalTests = []struct {
candidate1 Candidate
query1 string
candidate2 Candidate
query2 string
}{
{NewCandidate("abcxxxabxxxc"), "abc", NewCandidate("xabcxxxyyxxy"), "abc"},
{NewCandidate("axxxabxc"), "abc", NewCandidate("yxxxabxc"), "abc"},
}
for _, test := range equalTests {
s1 := Score(&test.candidate1, test.query1)
s2 := Score(&test.candidate2, test.query2)
if s1 != s2 {
t.Errorf("Score(%q, %q) != Score(%q, %q), wanted =",
test.candidate1,
test.query1,
test.candidate2,
test.query2)
}
}
var lessTests = []struct {
candidate1 Candidate
query1 string
candidate2 Candidate
query2 string
}{
{NewCandidate("axbxcxd"), "abcd", NewCandidate("AxBxCxD"), "abcd"},
{NewCandidate("axbxcxd"), "abcd", NewCandidate("a/b/c/d"), "abcd"},
}
for _, test := range lessTests {
s1 := Score(&test.candidate1, test.query1)
s2 := Score(&test.candidate2, test.query2)
if s1 >= s2 {
t.Errorf("Score(%q, %q) >= Score(%q, %q), wanted <",
test.candidate1,
test.query1,
test.candidate2,
test.query2)
}
}
}
|
//Copyright (c) 2017 Phil
//Package apollo ctrip apollo go client
package apollo
var (
defaultClient *Client
)
// Start apollo
func Start() error {
return StartWithConfFile(defaultConfName)
}
// StartWithConfFile run apollo with conf file
func StartWithConfFile(name string) error {
log.Debugf("StartWithConfFile run apollo with conf file name: %s", name)
conf, err := NewConf(name)
if err != nil {
return err
}
return StartWithConf(conf)
}
// StartWithConf run apollo with Conf
func StartWithConf(conf *Conf) error {
if log == nil {
setDefaultLogger()
}
defaultClient = NewClient(conf)
return defaultClient.Start()
}
// Stop sync config
func Stop() error {
return defaultClient.Stop()
}
// WatchUpdate get all updates
func WatchUpdate() <-chan *ChangeEvent {
return defaultClient.WatchUpdate()
}
// GetStringValueWithNameSpace get value from given namespace
func GetStringValueWithNameSpace(namespace, key, defaultValue string) string {
return defaultClient.GetStringValueWithNameSpace(namespace, key, defaultValue)
}
// GetStringValue from default namespace
func GetStringValue(key, defaultValue string) string {
return GetStringValueWithNameSpace(defaultNamespace, key, defaultValue)
}
// GetIntValue from default namespace
func GetIntValue(key string, defaultValue int) int {
return defaultClient.GetIntValue(key, defaultValue)
}
// GetNameSpaceContent get contents of namespace
func GetNameSpaceContent(namespace, defaultValue string) string {
return defaultClient.GetNameSpaceContent(namespace, defaultValue)
}
// ListKeys list all keys under given namespace
func ListKeys(namespace string) []string {
return defaultClient.ListKeys(namespace)
}
|
package middleware
import (
"github.com/best-expendables/httpclient/net/profile"
"net/http"
log "github.com/best-expendables/logger"
)
// ResponseLogger create log for response
type ResponseLogger struct {
logger
}
// NewResponseLogger create logger for response
func NewResponseLogger(loggerEntry log.Entry) *ResponseLogger {
return &ResponseLogger{
logger: logger{logger: loggerEntry},
}
}
func (l *ResponseLogger) RoundTripper(next http.RoundTripper) http.RoundTripper {
return RoundTripperFn(func(request *http.Request) (*http.Response, error) {
response, err := next.RoundTrip(request)
if response != nil {
l.Process(response)
}
return response, err
})
}
func (l *ResponseLogger) Process(response *http.Response) error {
logger := l.logger.get(response.Request.Context())
if logger == nil {
return nil
}
responseEntry, err := newResponseEntry(response)
meta := log.Fields{
"url": response.Request.URL.String(),
"source": "ResponseLogger",
"response": responseEntry,
}
if report := profile.ReportFromResponse(response); report != nil {
network := make(map[string]interface{})
network["reused"] = report.Reused
if !report.Reused {
network["dns"] = report.ConnectionTimeMs()
network["connection"] = report.ConnectionTimeMs()
}
meta["network"] = network
}
if err != nil {
meta["err"] = err
logger.WithFields(meta).Warning("Response logger has an error")
return nil
}
entry := logger.WithFields(meta)
statusText := http.StatusText(response.StatusCode)
// 422 response code uses for validation error
if response.StatusCode < 400 || response.StatusCode == http.StatusUnprocessableEntity {
entry.Info(statusText)
} else {
entry.Error(statusText)
}
return nil
}
|
package fuse_test
import (
"os"
"testing"
"gx/ipfs/QmSJBsmLP1XMjv8hxYg2rUMdPDB7YUpyBo9idjrJ6Cmq6F/fuse"
)
func TestOpenFlagsAccmodeMaskReadWrite(t *testing.T) {
var f = fuse.OpenFlags(os.O_RDWR | os.O_SYNC)
if g, e := f&fuse.OpenAccessModeMask, fuse.OpenReadWrite; g != e {
t.Fatalf("OpenAccessModeMask behaves wrong: %v: %o != %o", f, g, e)
}
if f.IsReadOnly() {
t.Fatalf("IsReadOnly is wrong: %v", f)
}
if f.IsWriteOnly() {
t.Fatalf("IsWriteOnly is wrong: %v", f)
}
if !f.IsReadWrite() {
t.Fatalf("IsReadWrite is wrong: %v", f)
}
}
func TestOpenFlagsAccmodeMaskReadOnly(t *testing.T) {
var f = fuse.OpenFlags(os.O_RDONLY | os.O_SYNC)
if g, e := f&fuse.OpenAccessModeMask, fuse.OpenReadOnly; g != e {
t.Fatalf("OpenAccessModeMask behaves wrong: %v: %o != %o", f, g, e)
}
if !f.IsReadOnly() {
t.Fatalf("IsReadOnly is wrong: %v", f)
}
if f.IsWriteOnly() {
t.Fatalf("IsWriteOnly is wrong: %v", f)
}
if f.IsReadWrite() {
t.Fatalf("IsReadWrite is wrong: %v", f)
}
}
func TestOpenFlagsAccmodeMaskWriteOnly(t *testing.T) {
var f = fuse.OpenFlags(os.O_WRONLY | os.O_SYNC)
if g, e := f&fuse.OpenAccessModeMask, fuse.OpenWriteOnly; g != e {
t.Fatalf("OpenAccessModeMask behaves wrong: %v: %o != %o", f, g, e)
}
if f.IsReadOnly() {
t.Fatalf("IsReadOnly is wrong: %v", f)
}
if !f.IsWriteOnly() {
t.Fatalf("IsWriteOnly is wrong: %v", f)
}
if f.IsReadWrite() {
t.Fatalf("IsReadWrite is wrong: %v", f)
}
}
func TestOpenFlagsString(t *testing.T) {
var f = fuse.OpenFlags(os.O_RDWR | os.O_SYNC | os.O_APPEND)
if g, e := f.String(), "OpenReadWrite+OpenAppend+OpenSync"; g != e {
t.Fatalf("OpenFlags.String: %q != %q", g, e)
}
}
|
package model
import (
"bytes"
"fmt"
"time"
"tpay_backend/utils"
"gorm.io/gorm"
)
const TransferOrderTableName = "transfer_order"
const (
// 代付订单状态
TransferOrderStatusPending = 1 // 待支付
TransferOrderStatusPaid = 2 // 已支付
TransferOrderStatusFail = 3 // 支付失败
// 代付订单异步通知状态
TransferNotifyStatusNot = 0 // 未通知
TransferNotifyStatusSuccess = 1 // 成功
TransferNotifyStatusNotifying = 2 // 通知进行中
TransferNotifyStatusTimeOut = 3 // 超时
// 代付订单来源
TransferOrderSourceInterface = 1 // 接口
TransferOrderSourceWithdrawAllot = 2 // 平台提现派单
TransferOrderSourceMerchantPayment = 3 // 商户后台付款
// 模式
TransferModeTest = "test" // 测试
TransferModePro = "pro" // 生产
)
type TransferOrder struct {
Id int64 `gorm:"id"`
OrderNo string `gorm:"order_no"` // 平台订单号
MerchantOrderNo string `gorm:"merchant_order_no"` // 商户订单号
UpstreamOrderNo string `gorm:"upstream_order_no"` // 上游订单号
MerchantNo string `gorm:"merchant_no"` // 商户编号
ReqAmount int64 `gorm:"req_amount"` // 订单请求金额
MerchantFee int64 `gorm:"merchant_fee"` // 商户手续费
DecreaseAmount int64 `gorm:"decrease_amount"` // 账户扣除的金额
UpstreamAmount int64 `gorm:"upstream_amount"` // 请求上游的金额
UpstreamFee int64 `gorm:"upstream_fee"` // 上游手续费
Currency string `gorm:"currency"` // 币种
OrderStatus int64 `gorm:"order_status"` // 订单状态
CreateTime int64 `gorm:"create_time"` // 创建时间
PlatformChannelId int64 `gorm:"platform_channel_id"` // 平台通道id
UpstreamChannelId int64 `gorm:"upstream_channel_id"` // 上游通道id
UpdateTime int64 `gorm:"update_time"` // 更新时间
NotifyUrl string `gorm:"notify_url"` // 异步通知地址
ReturnUrl string `gorm:"return_url"` // 同步跳转地址
BankName string `gorm:"bank_name"` // 银行名称
AccountName string `gorm:"account_name"` // 银行卡开户名
CardNumber string `gorm:"card_number"` // 收款卡号
BranchName string `gorm:"branch_name"` // 支行名称
NotifyStatus int64 `gorm:"notify_status"` // 异步通知状态(0未通知,1成功,2通知进行中,3超时)
NotifyFailTimes int64 `gorm:"notify_fail_times"` // 通知失败次数
NextNotifyTime int64 `gorm:"next_notify_time"` // 下次通知时间
PayeeRealAmount int64 `gorm:"payee_real_amount"` // 收款方实际到账金额
FeeDeductType int64 `gorm:"fee_deduct_type"` // 手续费扣款方式(1内扣,2外扣)
UpstreamFailReason string `gorm:"upstream_fail_reason"` // 上游失败原因
OrderSource int64 `gorm:"order_source"` // 订单来源:1-接口; 2-平台提现派单;3-商户后台付款
Remark string `gorm:"remark"` // 付款备注
BankCode string `gorm:"bank_code"` // 银行代码(ifsc_code)
AreaId int64 `gorm:"area_id"`
Mode string `gorm:"mode"` // 模式:test|pro(测试|生产)
MerchantRate float64 `gorm:"merchant_rate"` // 商户费率
MerchantSingleFee int64 `gorm:"merchant_single_fee"` // 商户单笔手续费
BatchNo string `gorm:"batch_no"` // 批量付款批次号
BatchRowNo string `gorm:"batch_row_no"` // 批量付款批次行号
}
func (t *TransferOrder) TableName() string {
return TransferOrderTableName
}
func NewTransferOrderModel(db *gorm.DB) *TransferOrderModel {
return &TransferOrderModel{db: db}
}
type TransferOrderModel struct {
db *gorm.DB
}
// 生成订单号
func (t *TransferOrder) GenerateOrderNo(mode string) string {
orderNo := fmt.Sprintf("%d%d%d",
TransferOrderNoPrefix,
time.Now().UnixNano()/1000,
utils.RandInt64(10000, 99999),
)
switch mode {
case TransferModeTest:
// 1位+16位+5位 = 22位
return TransferModeTest + orderNo
case TransferModePro:
fallthrough
default:
return orderNo
}
}
func (m *TransferOrderModel) Insert(data *TransferOrder) error {
data.CreateTime = time.Now().Unix()
result := m.db.Create(data)
return result.Error
}
func (m *TransferOrderModel) UpdateUpstreamOrderInfo(id int64, data TransferOrder) error {
setMap := map[string]interface{}{
"update_time": time.Now().Unix(),
"upstream_order_no": data.UpstreamOrderNo,
}
if data.OrderStatus != 0 {
setMap["order_status"] = data.OrderStatus
}
result := m.db.Model(&TransferOrder{}).Where("id=?", id).Updates(setMap)
return result.Error
}
// 修改订单状态
func (m *TransferOrderModel) UpdateOrderStatus(id, status int64) error {
result := m.db.Model(&TransferOrder{}).Where("id=?", id).Update("order_status", status)
return result.Error
}
// 修改订单为已支付
func (m *TransferOrderModel) UpdateOrderPaidById(id int64) error {
result := m.db.Model(&TransferOrder{}).Where("id=?", id).Update("order_status", TransferOrderStatusPaid)
return result.Error
}
// 修改订单为失败
func (m *TransferOrderModel) UpdateOrderFailById(id int64, failReason string) error {
setMap := map[string]interface{}{
"order_status": TransferOrderStatusFail,
"upstream_fail_reason": failReason,
}
result := m.db.Model(&TransferOrder{}).Where("id=?", id).Updates(setMap)
return result.Error
}
type TransferNotifyInfo struct {
NotifyStatus int64 `gorm:"notify_status"` // 异步通知状态(0未通知,1成功,2通知进行中,3超时)
NotifyFailTimes int64 `gorm:"notify_fail_times"` // 通知失败次数
NextNotifyTime int64 `gorm:"next_notify_time"` // 下次通知时间
}
// 修改订单-通知信息
func (m *TransferOrderModel) UpdateNotify(orderId int64, data TransferNotifyInfo) error {
setMap := map[string]interface{}{
"notify_status": data.NotifyStatus,
"notify_fail_times": data.NotifyFailTimes,
"next_notify_time": data.NextNotifyTime,
}
result := m.db.Model(TransferOrder{}).Where("id = ?", orderId).Updates(&setMap)
return result.Error
}
func (m *TransferOrderModel) MerchantOrderNoExist(merchantNo, merchantOrderNo string) (bool, error) {
var cnt int64
result := m.db.Model(&TransferOrder{}).Where("merchant_no=? and merchant_order_no=?", merchantNo, merchantOrderNo).Count(&cnt)
if result.Error != nil {
return false, result.Error
}
return cnt > 0, nil
}
func (m *TransferOrderModel) FindByOrderNo(orderNo string) (*TransferOrder, error) {
var o = &TransferOrder{}
result := m.db.Model(o).Where("order_no=?", orderNo).First(o)
return o, result.Error
}
func (m *TransferOrderModel) FindByMerchantId(merchantId int64, orderNo string) (*TransferOrder, error) {
var o = &TransferOrder{}
result := m.db.Table(TransferOrderTableName+" o").
Select("o.*").
Joins("left join "+MerchantTableName+" m on m.merchant_no=o.merchant_no").
Where("m.id=? and o.order_no=?", merchantId, orderNo).
Find(o)
return o, result.Error
}
type FindByOrderNoAndMerchantNoData struct {
TransferOrder
PlatformChannelName string `gorm:"platform_channel_name"` // 银行代码(ifsc_code)
}
func (m *TransferOrderModel) FindByOrderNoAndMerchantNo(orderNo, merchantNo string) (FindByOrderNoAndMerchantNoData, error) {
var o FindByOrderNoAndMerchantNoData
selectField := "t.*, plat.channel_name AS platform_channel_name "
platformCh := fmt.Sprintf("left join %s plat on plat.id = t.platform_channel_id", PlatformChannelTableName)
result := m.db.Table(TransferOrderTableName+" t").
Select(selectField).
Joins(platformCh).
Where("t.order_no=? and t.merchant_no = ? ", orderNo, merchantNo).Scan(&o)
return o, result.Error
}
func (m *TransferOrderModel) FindByMchOrderNo(merchantNo, merchantOrderNo string) (*TransferOrder, error) {
var o = &TransferOrder{}
result := m.db.Model(o).Where("merchant_no=? and merchant_order_no=?", merchantNo, merchantOrderNo).First(o)
return o, result.Error
}
type FindTransferOrderList struct {
Page int64
PageSize int64
OrderNo string
MerchantOrderNo string
UpstreamOrderNo string
MerchantName string
Currency string
PlatformChannelId int64
StartCreateTime int64
EndCreateTime int64
OrderStatus int64
OrderSourceList []int64
MerchantNo string
OrderType string
}
type TransferOrderList struct {
TransferOrder
MerchantName string `gorm:"merchant_name"`
PlatformChannelName string `gorm:"platform_channel_name"`
UpstreamName string `gorm:"upstream_name"`
}
func (m *TransferOrderModel) FindList(f FindTransferOrderList) ([]*TransferOrderList, int64, error) {
var (
order = fmt.Sprintf("%s o", TransferOrderTableName)
merchant = fmt.Sprintf("left join %s m on m.merchant_no = o.merchant_no", MerchantTableName)
platformCh = fmt.Sprintf("left join %s plat on plat.id = o.platform_channel_id", PlatformChannelTableName)
upstreamCh = fmt.Sprintf("left join %s upst on upst.id = o.upstream_channel_id", UpstreamChannelTableName)
upstream = fmt.Sprintf("left join %s up on up.id = upst.upstream_id", UpstreamTableName)
selectField = "o.order_no, o.merchant_order_no, o.currency, o.req_amount, o.merchant_fee, " +
"o.payee_real_amount, o.upstream_fee, o.order_status, o.create_time, o.update_time," +
"o.bank_name, o.account_name, o.card_number, o.branch_name, o.order_source," +
"o.upstream_order_no, o.remark, " +
"m.username AS merchant_name, plat.channel_name as platform_channel_name, up.upstream_name "
whereBuffer = bytes.NewBufferString(" 1=1 ")
args []interface{}
)
if f.OrderNo != "" {
whereBuffer.WriteString("and o.order_no like ? ")
args = append(args, "%"+f.OrderNo+"%")
}
if f.MerchantOrderNo != "" {
whereBuffer.WriteString("and o.merchant_order_no like ? ")
args = append(args, "%"+f.MerchantOrderNo+"%")
}
if f.UpstreamOrderNo != "" {
whereBuffer.WriteString("and o.upstream_order_no like ? ")
args = append(args, "%"+f.UpstreamOrderNo+"%")
}
if f.MerchantNo != "" {
whereBuffer.WriteString("and m.merchant_no = ? ")
args = append(args, f.MerchantNo)
}
if f.MerchantName != "" {
whereBuffer.WriteString("and m.username like ? ")
args = append(args, "%"+f.MerchantName+"%")
}
if f.Currency != "" {
whereBuffer.WriteString("and o.currency = ? ")
args = append(args, f.Currency)
}
if f.PlatformChannelId != 0 {
whereBuffer.WriteString("and o.platform_channel_id = ? ")
args = append(args, f.PlatformChannelId)
}
if f.StartCreateTime != 0 {
whereBuffer.WriteString("and o.create_time >= ? ")
args = append(args, f.StartCreateTime)
}
if f.EndCreateTime != 0 {
whereBuffer.WriteString("and o.create_time <= ? ")
args = append(args, f.EndCreateTime)
}
if f.OrderStatus != 0 {
whereBuffer.WriteString("and o.order_status = ? ")
args = append(args, f.OrderStatus)
}
if f.OrderType != "" {
whereBuffer.WriteString("and o.mode = ? ")
args = append(args, f.OrderType)
}
if f.OrderSourceList != nil {
whereBuffer.WriteString("and o.order_source in ? ")
args = append(args, f.OrderSourceList)
}
if f.Page == 0 {
f.Page = 1
}
if f.PageSize == 0 {
f.PageSize = 10
}
var total int64
ret := m.db.Table(order).Joins(merchant).Joins(platformCh).Joins(upstreamCh).Joins(upstream).Where(whereBuffer.String(), args...).Count(&total)
if ret.Error != nil {
return nil, 0, ret.Error
}
if total == 0 {
return nil, 0, nil
}
whereBuffer.WriteString(" order by o.create_time desc limit ? offset ?")
args = append(args, f.PageSize, (f.Page-1)*f.PageSize)
var list []*TransferOrderList
ret = m.db.Table(order).Select(selectField).Joins(merchant).Joins(platformCh).Joins(upstreamCh).Joins(upstream).Where(whereBuffer.String(), args...).Scan(&list)
if ret.Error != nil {
return nil, 0, ret.Error
}
return list, total, nil
}
// 查询异步通知遗漏的订单
func (m *TransferOrderModel) FindNotifyOmissionOrderNo(payTime int64) ([]string, error) {
orderStatus := []int64{TransferOrderStatusPaid, TransferOrderStatusFail}
selectField := "order_no"
whereStr := "order_status in ? and notify_status= ? and update_time < ?"
var orderNos []string
result := m.db.Model(&TransferOrder{}).Select(selectField).Where(whereStr, orderStatus, TransferNotifyStatusNot, payTime).Find(&orderNos)
if result.Error != nil {
return nil, result.Error
}
return orderNos, nil
}
// 查询异步通知中断的订单
func (m *TransferOrderModel) FindNotifyBreakOrderNo(nextTime int64) ([]string, error) {
orderStatus := []int64{TransferOrderStatusPaid, TransferOrderStatusFail}
selectField := "order_no"
whereStr := "order_status in ? and notify_status = ? and next_notify_time < ?"
var orderNos []string
result := m.db.Model(&TransferOrder{}).Select(selectField).Where(whereStr, orderStatus, TransferNotifyStatusNotifying, nextTime).Find(&orderNos)
if result.Error != nil {
return nil, result.Error
}
return orderNos, nil
}
type TransferOrderDetail struct {
TransferOrder
MerchantName string `gorm:"merchant_name"`
PlatformChannelName string `gorm:"platform_channel_name"`
UpstreamName string `gorm:"upstream_name"`
}
func (m *TransferOrderModel) FindDetail(orderNo string) (*TransferOrderDetail, error) {
var (
order = fmt.Sprintf("%s o", TransferOrderTableName)
merchant = fmt.Sprintf("left join %s m on m.merchant_no = o.merchant_no", MerchantTableName)
selectField = "o.order_no, o.merchant_order_no, o.upstream_order_no, o.merchant_no, o.req_amount," +
"o.decrease_amount, o.merchant_fee, o.upstream_fee, o.payee_real_amount, o.fee_deduct_type," +
"o.upstream_amount, o.currency, o.create_time, o.update_time, o.notify_url, " +
"o.bank_name, o.card_number, o.account_name, o.branch_name, o.notify_status, " +
"m.username AS merchant_name "
)
var data TransferOrderDetail
ret := m.db.Table(order).Select(selectField).Joins(merchant).Where(" o.order_no = ? ", orderNo).Scan(&data)
if ret.Error != nil {
return nil, ret.Error
}
return &data, nil
}
type FindTransferExportData struct {
MerchantId int64
OrderNo string
MerchantOrderNo string
UpstreamOrderNo string
MerchantName string
Currency string
PlatformChannelId int64
StartCreateTime int64
EndCreateTime int64
OrderStatus int64
OrderSourceList []int64
MerchantNo string
OrderType string
}
type TransferExportData struct {
OrderList []TransferOrderList
Total int64
TotalReqAmount int64
TotalFee int64
TotalIncreaseAmount int64
}
// 查询导出数据
func (m *TransferOrderModel) FindExportData(f FindTransferExportData) (*TransferExportData, error) {
var (
order = fmt.Sprintf("%s o", TransferOrderTableName)
merchant = fmt.Sprintf("left join %s m on m.merchant_no = o.merchant_no", MerchantTableName)
platformCh = fmt.Sprintf("left join %s plat on plat.id = o.platform_channel_id", PlatformChannelTableName)
selectField = "o.id, o.order_no, o.merchant_order_no, o.currency, o.req_amount, " +
"o.merchant_rate, o.merchant_fee, o.merchant_single_fee, o.payee_real_amount, o.order_status, " +
"o.create_time, o.update_time," +
"m.username AS merchant_name, plat.channel_name as platform_channel_name "
whereBuffer = bytes.NewBufferString(" 1=1 ")
args []interface{}
)
if f.MerchantId != 0 {
whereBuffer.WriteString("and m.id = ? ")
args = append(args, f.MerchantId)
}
if f.OrderNo != "" {
whereBuffer.WriteString("and o.order_no like ? ")
args = append(args, "%"+f.OrderNo+"%")
}
if f.MerchantOrderNo != "" {
whereBuffer.WriteString("and o.merchant_order_no like ? ")
args = append(args, "%"+f.MerchantOrderNo+"%")
}
if f.UpstreamOrderNo != "" {
whereBuffer.WriteString("and o.upstream_order_no like ? ")
args = append(args, "%"+f.UpstreamOrderNo+"%")
}
if f.MerchantNo != "" {
whereBuffer.WriteString("and m.merchant_no = ? ")
args = append(args, f.MerchantNo)
}
if f.MerchantName != "" {
whereBuffer.WriteString("and m.username like ? ")
args = append(args, "%"+f.MerchantName+"%")
}
if f.Currency != "" {
whereBuffer.WriteString("and o.currency = ? ")
args = append(args, f.Currency)
}
if f.PlatformChannelId != 0 {
whereBuffer.WriteString("and o.platform_channel_id = ? ")
args = append(args, f.PlatformChannelId)
}
if f.StartCreateTime != 0 {
whereBuffer.WriteString("and o.create_time >= ? ")
args = append(args, f.StartCreateTime)
}
if f.EndCreateTime != 0 {
whereBuffer.WriteString("and o.create_time <= ? ")
args = append(args, f.EndCreateTime)
}
if f.OrderStatus != 0 {
whereBuffer.WriteString("and o.order_status = ? ")
args = append(args, f.OrderStatus)
}
if f.OrderType != "" {
whereBuffer.WriteString("and o.mode = ? ")
args = append(args, f.OrderType)
}
if f.OrderSourceList != nil {
whereBuffer.WriteString("and o.order_source in ? ")
args = append(args, f.OrderSourceList)
}
var total, totalReqAmount, totalFee, totalIncreaseAmount int64
result := m.db.Table(order).Joins(merchant).Joins(platformCh).Where(whereBuffer.String(), args...).Count(&total)
if result.Error != nil {
return nil, result.Error
}
if total == 0 {
return nil, nil
}
// 请求金额总数
result = m.db.Table(order).Select("IFNULL(sum(o.req_amount), 0)").Joins(merchant).Joins(platformCh).Where(whereBuffer.String(), args...).Scan(&totalReqAmount)
if result.Error != nil {
return nil, result.Error
}
// 商户手续费总数
result = m.db.Table(order).Select("IFNULL(sum(o.merchant_fee), 0)").Joins(merchant).Joins(platformCh).Where(whereBuffer.String(), args...).Scan(&totalFee)
if result.Error != nil {
return nil, result.Error
}
// 商户实际入账金额总数
result = m.db.Table(order).Select("IFNULL(sum(o.payee_real_amount), 0)").Joins(merchant).Joins(platformCh).
Where(whereBuffer.String()+fmt.Sprintf("and o.order_status = %v ", TransferOrderStatusPaid), args...).Scan(&totalIncreaseAmount)
if result.Error != nil {
return nil, result.Error
}
whereBuffer.WriteString(" order by o.create_time asc ")
var list []TransferOrderList
result = m.db.Table(order).Select(selectField).Joins(merchant).Joins(platformCh).Where(whereBuffer.String(), args...).Scan(&list)
if result.Error != nil {
return nil, result.Error
}
return &TransferExportData{
OrderList: list,
Total: total,
TotalReqAmount: totalReqAmount,
TotalFee: totalFee,
TotalIncreaseAmount: totalIncreaseAmount,
}, nil
}
func (m *TransferOrderModel) FindByBatchNoAndRowNo(merchantNo string, batchNo string, rowNos []string) ([]string, error) {
var rowNo []string
result := m.db.Model(&TransferOrder{}).Select("batch_row_no").
Where("merchant_no=? and batch_no=? and batch_row_no in ?", merchantNo, batchNo, rowNos).Scan(&rowNo)
return rowNo, result.Error
}
func (m *TransferOrderModel) CountByBatchNo(merchantNo string, batchNo string) (int64, error) {
var total int64
result := m.db.Model(&TransferOrder{}).Where("merchant_no=? and batch_no=?", merchantNo, batchNo).Count(&total)
return total, result.Error
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2020-09-10 08:19
# @File : lt_61_Rotate_List.go
# @Description :
# @Attention :
*/
package v0
/*
旋转链表
将其构建成环
找到新的头结点: 新的头节点在 n-(k%n) 处
新的尾节点: 在 n-(k%n)-1 处
*/
func rotateRight(head *ListNode, k int) *ListNode {
if nil == head {
return nil
}
walkerNode := head
length := 1
for nil != walkerNode.Next {
walkerNode = walkerNode.Next
length++
}
walkerNode.Next = head
count := length - (k % length)
walkerNode = head
for i := 0; i < count-1; i++ {
walkerNode = walkerNode.Next
}
tail := walkerNode
newHead := tail.Next
tail.Next = nil
return newHead
}
|
package main
import (
"fmt"
"log"
"os"
"github.com/knusbaum/go9p/fs"
"github.com/knusbaum/go9p/server"
)
var readme string = `# To look up a word, open and read a file with the
# word's name under /words. For example, to read the definition
# of the word 'tree', read the file /words/tree. If the file
# doesn't exist already, it will automatically be created and
# populated with a definition when it is read. Once a definition
# file is opened and read, it will continue to be listed under
# the /words directory. Listing the /words directory will show
# all words looked up so far.
#
# If you mounted dictfs at the usual location (/mnt/dictfs)
# then you can source this file and then use the following
# functions to query the dictionary. Adapt them as needed if
# you mounted the service somewhere else.
# lookup looks up a word
fn lookup {
cat /mnt/dictfs/words/^$1
}
# index lists all of the words that the dictionary has looked up so far.
fn index {
lc /mnt/dictfs/words
}
`
var apiKey string
func lookup(dictFS *fs.FS, word string) (fs.FSNode, error) {
fmt.Println("Looking up: " + word)
resp, err := dictQuery(apiKey, word)
if err != nil {
return nil, fmt.Errorf("Failed to define " + word)
}
if len(resp.defs) == 0 {
return nil, fmt.Errorf("No definitions for " + word)
}
content := resp.responseContent()
newF := fs.NewStaticFile(dictFS.NewStat(word, "glenda", "glenda", 0444), []byte(content))
if newF == nil {
return nil, fmt.Errorf("Can't find /words")
}
return newF, nil
}
func WalkFail(dictFS *fs.FS, parent fs.Dir, name string) (fs.FSNode, error) {
if fs.FullPath(parent) != "/words" {
return nil, nil
}
word := name
f, err := lookup(dictFS, word)
if err != nil {
fmt.Printf("Failed to look up word: %s\n", err)
return nil, err
}
return f, nil
}
func main() {
apiKey = os.Getenv("DICTIONARY_API_KEY")
if apiKey == "" {
log.Fatal("No API key. Define env variable DICTIONARY_API_KEY")
}
dictFS := fs.NewFS("glenda", "glenda", 0555,
fs.WithWalkFailHandler(WalkFail))
dictFS.Root.AddChild(fs.NewStaticFile(dictFS.NewStat("README", "glenda", "glenda", 0444), []byte(readme)))
dictFS.Root.AddChild(fs.NewStaticDir(dictFS.NewStat("words", "glenda", "glenda", 0555)))
server.Serve("0.0.0.0:9999", dictFS)
}
|
package user
type insertRequest struct {
ExternalId string `json:"externalId" validate:"required"`
}
func insertRequestConvert(r *insertRequest) *User {
return &User{
ExternalId: r.ExternalId,
}
}
type insertResponse User
func newInsertResponse(group *User) *insertResponse {
return (*insertResponse)(group)
}
type listResponse []User
func newListResponse(groups []User) listResponse {
return groups
}
type deleteRequest struct {
Id int `json:"id" validate:"required"`
}
type getRequest struct {
Id int `json:"id" validate:"required"`
}
|
package util
import (
"encoding/json"
"fmt"
"strings"
)
// SmartPrint formats messages as text or JSON with a given severity.
func SmartPrint(severity, m string, jsonOut bool) {
if jsonOut {
if severity == "" {
fmt.Printf(m)
return
}
outMap := make(map[string]string)
outMap[severity] = m
b, _ := json.Marshal(outMap)
fmt.Printf(fmt.Sprintf("%s\n", b))
return
}
if severity == "" {
fmt.Printf("%s", m)
return
}
fmt.Printf("[%s] %s", strings.ToUpper(severity), m)
return
}
// RemoveDuplicatesUnordered removes duplicate strings from a slice.
// with no guarantee on order.
func RemoveDuplicatesUnordered(elements []string) []string {
encountered := map[string]bool{}
for v := range elements {
encountered[elements[v]] = true
}
result := []string{}
for k := range encountered {
result = append(result, k)
}
return result
}
|
/**
* Copyright 2019 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package postgresql
import (
"encoding/json"
"github.com/stretchr/testify/mock"
"github.com/xmidt-org/codex-db"
)
type mockFinder struct {
mock.Mock
}
func (f *mockFinder) findRecords(out *[]db.Record, limit int, where ...interface{}) error {
args := f.Called(out, limit, where)
err := json.Unmarshal(args.Get(1).([]byte), out)
if err != nil {
return err
}
return args.Error(0)
}
func (f *mockFinder) findRecordsToDelete(limit int, shard int, deathDate int64) ([]db.RecordToDelete, error) {
args := f.Called(limit, shard, deathDate)
return args.Get(0).([]db.RecordToDelete), args.Error(1)
}
type mockDeviceFinder struct {
mock.Mock
}
func (df *mockDeviceFinder) getList(offset string, limit int, where ...interface{}) ([]string, error) {
args := df.Called(offset, limit, where)
return args.Get(0).([]string), args.Error(1)
}
type mockMultiInsert struct {
mock.Mock
}
func (c *mockMultiInsert) insert(records []db.Record) (int64, error) {
args := c.Called(records)
return int64(args.Int(0)), args.Error(1)
}
type mockDeleter struct {
mock.Mock
}
func (d *mockDeleter) delete(value *db.Record, limit int, where ...interface{}) (int64, error) {
args := d.Called(value, limit, where)
return int64(args.Int(0)), args.Error(1)
}
type mockCloser struct {
mock.Mock
}
func (d *mockCloser) close() error {
args := d.Called()
return args.Error(0)
}
type mockPing struct {
mock.Mock
}
func (d *mockPing) ping() error {
args := d.Called()
return args.Error(0)
}
|
package main
import (
"fmt"
"github.com/robertkrimen/otto"
)
func main() {
vm := otto.New()
vm.Run(`
abc = 2 + 2;
console.log("The value of abc is " + abc); // 4
`)
// get a value from vm
if value, err := vm.Get("abc"); err == nil {
if value_int, err := value.ToInteger(); err == nil {
fmt.Println(value_int, err)
}
}
// set a number
vm.Set("def", 11)
vm.Run(`
console.log("The value of def is " + def);
// The value of def is 11
`)
}
|
package fmm
import (
"bytes"
"encoding/binary"
"io"
)
type DatReader struct {
reader *bytes.Reader
}
func newDatReader(source []byte) DatReader {
return DatReader{
reader: bytes.NewReader(source),
}
}
func (d *DatReader) Advance(offset int64) {
d.reader.Seek(offset, io.SeekCurrent)
}
func (d *DatReader) ReadBool() bool {
num, _ := d.reader.ReadByte()
return num == 1
}
func (d *DatReader) ReadModWithCRC() ModIdent {
name := d.ReadString()
version := d.ReadOptimizedVersion(false)
d.ReadUint32() // CRC
return ModIdent{name, &version}
}
func (d *DatReader) ReadString() string {
length, _ := d.reader.ReadByte()
stringBuf := make([]byte, length)
io.ReadFull(d.reader, stringBuf)
return string(stringBuf)
}
func (d *DatReader) ReadUint8() uint8 {
byte, _ := d.reader.ReadByte()
return uint8(byte)
}
func (d *DatReader) ReadUint16() uint16 {
buf := make([]byte, 2)
io.ReadFull(d.reader, buf)
return binary.LittleEndian.Uint16(buf)
}
func (d *DatReader) ReadUint32() uint32 {
buf := make([]byte, 4)
io.ReadFull(d.reader, buf)
return binary.LittleEndian.Uint32(buf)
}
func (d *DatReader) ReadUint16Optimized() uint16 {
first, _ := d.reader.ReadByte()
if first < 255 {
return uint16(first)
}
return d.ReadUint16()
}
func (d *DatReader) ReadOptimizedVersion(withBuild bool) Version {
ver := Version{
d.ReadUint16Optimized(),
d.ReadUint16Optimized(),
d.ReadUint16Optimized(),
}
if withBuild {
ver[3] = d.ReadUint16Optimized()
}
return ver
}
func (d *DatReader) ReadUnoptimizedVersion() Version {
return Version{
d.ReadUint16(),
d.ReadUint16(),
d.ReadUint16(),
d.ReadUint16(),
}
}
|
package test
import (
"context"
"fmt"
"testing"
ma "gx/ipfs/QmNTCey11oxhb1AxDnQBRHtdhap6Ctud872NjAYPYYXPuc/go-multiaddr"
peer "gx/ipfs/QmPJxxDsX2UbchSHobbYuvz7qnyJTFKvaKMzE2rZWJ4x5B/go-libp2p-peer"
pt "gx/ipfs/QmPJxxDsX2UbchSHobbYuvz7qnyJTFKvaKMzE2rZWJ4x5B/go-libp2p-peer/test"
)
func multiaddr(m string) ma.Multiaddr {
maddr, err := ma.NewMultiaddr(m)
if err != nil {
panic(err)
}
return maddr
}
type peerpair struct {
ID peer.ID
Addr []ma.Multiaddr
}
func randomPeer(b *testing.B, addrCount int) *peerpair {
var (
pid peer.ID
err error
addrs = make([]ma.Multiaddr, addrCount)
aFmt = "/ip4/127.0.0.1/tcp/%d/ipfs/%s"
)
b.Helper()
if pid, err = pt.RandPeerID(); err != nil {
b.Fatal(err)
}
for i := 0; i < addrCount; i++ {
if addrs[i], err = ma.NewMultiaddr(fmt.Sprintf(aFmt, i, pid.Pretty())); err != nil {
b.Fatal(err)
}
}
return &peerpair{pid, addrs}
}
func addressProducer(ctx context.Context, b *testing.B, addrs chan *peerpair, addrsPerPeer int) {
b.Helper()
defer close(addrs)
for {
p := randomPeer(b, addrsPerPeer)
select {
case addrs <- p:
case <-ctx.Done():
return
}
}
}
|
package server
import "github.com/RecleverLogger/server/handlers"
type Config struct {
Port string
ReadTimeout int
WriteTimeout int
IdleTimeout int
UseTls bool
TLSCertFile string
TLSKeyFile string
Handlers handlers.Handlers
}
|
/**
*@Author: haoxiongxiao
*@Date: 2019/1/26
*@Description: CREATE GO FILE api_services
*/
package hotel_api_services
const (
appId = 58443
appSecret = "3bd66623713248bab7c97eabe481bbcd"
)
type CommonRequestParams struct {
ShowapiAppid string `json:"showapi_appid"` //100
ShowapiSign string `json:"showapi_sign"` //698d51a19d8a121ce581499d7b701668
ShowapiResGzip string `json:"showapi_res_gzip"` //1或0
ShowapiTimestamp string `json:"showapi_timestamp"` //20141114142239
}
/*
ShowapiResCode
易源返回标志,0为成功,其他为失败。
0成功
-1,系统调用错误
-2,可调用次数或金额为0
-3,读取超时
-4,服务端返回数据解析错误
-5,后端服务器DNS解析错误
-6,服务不存在或未上线
-1000,系统维护
-1002,showapi_appid字段必传
-1003,showapi_sign字段必传
-1004,签名sign验证有误
-1005,showapi_timestamp无效
-1006,app无权限调用接口
-1007,没有订购套餐
-1008,服务商关闭对您的调用权限
-1009,调用频率受限
-1010,找不到您的应用
-1011,子授权app_child_id无效
-1012,子授权已过期或失效
-1013,子授权ip受限
*/
type CommonResponseParams struct {
ShowapiResBody string `json:"showapi_res_body"` //消息体的JSON封装,所有应用级的返回参数将嵌入此对象 。
ShowapiResCode string `json:"showapi_res_code"`
ShowapiResError string `json:"showapi_res_error"` //错误信息的展示
ShowapiResId string `json:"showapi_res_id"` //本次请求id
}
|
package nlu
import (
"github.com/oshankkumar/GatewayOmega/client"
ghttp "github.com/oshankkumar/GatewayOmega/http"
"github.com/oshankkumar/GatewayOmega/services"
"github.com/sirupsen/logrus"
"github.com/spf13/viper"
"net/http"
)
type Nlu struct{}
func newNLUService(opts ...services.SerivceOptionFunc) services.Service {
nlu := &Nlu{}
opt := &services.ServiceOption{}
for _, optFunc := range opts {
optFunc(opt)
}
return nlu
}
func (n *Nlu) Name() string {
return "nlu"
}
func (n *Nlu) Send(r *ghttp.GatewayRequest) (*http.Response, error) {
client.Default.Reset()
client.Default.Verb(r.Req.Method)
client.Default.Base(viper.GetString("services.nlu.addr"))
client.Default.Path(r.Req.URL.Path)
client.Default.Query(r.Req.URL.Query())
client.Default.Header(r.Req.Header)
if body, err := r.Body(); err == nil {
client.Default.Body(body)
}
req, err := client.Default.Request()
logrus.WithFields(logrus.Fields{
"service": "nlu",
"url": req.URL.String(),
"headers": req.Header,
}).Infof("sending request")
if err != nil {
return nil, err
}
return client.Default.Do(req, nil)
}
func init() {
logrus.WithField("service", "nlu").Infof("registering service")
services.Register("nlu", newNLUService)
}
|
package main
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/go-martini/martini"
"github.com/yosssi/rendergold"
"net/http"
"net/http/httptest"
"testing"
)
var (
response *httptest.ResponseRecorder
)
func Request(method string, route string, handler martini.Handler) {
m := martini.Classic()
m.Use(rendergold.Renderer())
m.Get(route, handler)
request, _ := http.NewRequest(method, route, nil)
response = httptest.NewRecorder()
m.ServeHTTP(response, request)
}
func TestMartiniLearning(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "MartiniLearning Suite")
}
|
package podstore
import (
"time"
podstore_protos "github.com/square/p2/pkg/grpc/podstore/protos"
"github.com/square/p2/pkg/launch"
"github.com/square/p2/pkg/manifest"
"github.com/square/p2/pkg/store/consul"
"github.com/square/p2/pkg/store/consul/consulutil"
"github.com/square/p2/pkg/store/consul/podstore"
"github.com/square/p2/pkg/store/consul/statusstore"
"github.com/square/p2/pkg/store/consul/statusstore/podstatus"
"github.com/square/p2/pkg/store/consul/transaction"
"github.com/square/p2/pkg/types"
"github.com/hashicorp/consul/api"
context "golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
type store struct {
scheduler Scheduler
podStatusStore PodStatusStore
consulClient consulutil.ConsulClient
}
var _ podstore_protos.P2PodStoreServer = store{}
type Scheduler interface {
Schedule(manifest manifest.Manifest, node types.NodeName) (key types.PodUniqueKey, err error)
Unschedule(key types.PodUniqueKey) error
}
type PodStatusStore interface {
Get(key types.PodUniqueKey) (podstatus.PodStatus, *api.QueryMeta, error)
Delete(podUniqueKey types.PodUniqueKey) error
WaitForStatus(key types.PodUniqueKey, waitIndex uint64) (podstatus.PodStatus, *api.QueryMeta, error)
List() (map[types.PodUniqueKey]podstatus.PodStatus, error)
MutateStatus(ctx context.Context, key types.PodUniqueKey, mutator func(podstatus.PodStatus) (podstatus.PodStatus, error)) error
}
func NewServer(scheduler Scheduler, podStatusStore PodStatusStore, consulClient consulutil.ConsulClient) store {
return store{
scheduler: scheduler,
podStatusStore: podStatusStore,
consulClient: consulClient,
}
}
func (s store) SchedulePod(_ context.Context, req *podstore_protos.SchedulePodRequest) (*podstore_protos.SchedulePodResponse, error) {
if req.NodeName == "" {
return nil, grpc.Errorf(codes.InvalidArgument, "node_name must be provided")
}
if req.Manifest == "" {
return nil, grpc.Errorf(codes.InvalidArgument, "manifest must be provided")
}
manifest, err := manifest.FromBytes([]byte(req.Manifest))
if err != nil {
return nil, grpc.Errorf(codes.InvalidArgument, "could not parse passed manifest: %s", err)
}
podUniqueKey, err := s.scheduler.Schedule(manifest, types.NodeName(req.NodeName))
if err != nil {
return nil, grpc.Errorf(codes.Unavailable, "could not schedule pod: %s", err)
}
resp := &podstore_protos.SchedulePodResponse{
PodUniqueKey: podUniqueKey.String(),
}
return resp, nil
}
func (s store) UnschedulePod(_ context.Context, req *podstore_protos.UnschedulePodRequest) (*podstore_protos.UnschedulePodResponse, error) {
podUniqueKeyStr := req.GetPodUniqueKey()
if podUniqueKeyStr == "" {
return nil, grpc.Errorf(codes.InvalidArgument, "pod_unique_key must be provided")
}
podUniqueKey, err := types.ToPodUniqueKey(podUniqueKeyStr)
if err != nil {
return nil, grpc.Errorf(codes.InvalidArgument, "pod_unique_key of %q is invalid", podUniqueKeyStr)
}
err = s.scheduler.Unschedule(podUniqueKey)
if podstore.IsNoPod(err) {
return nil, grpc.Errorf(codes.NotFound, "no pod with pod_unique_key of %q found", podUniqueKey)
} else if err != nil {
return nil, grpc.Errorf(codes.Unavailable, "error unscheduling pod: %s", err)
}
return &podstore_protos.UnschedulePodResponse{}, nil
}
// Represents the return values of WaitForStatus on the podstore. This is
// useful so the results can be passed on a channel so we can wait for
// cancellation on the main goroutine
type podStatusResult struct {
status podstatus.PodStatus
err error
}
func (s store) WatchPodStatus(req *podstore_protos.WatchPodStatusRequest, stream podstore_protos.P2PodStore_WatchPodStatusServer) error {
if req.StatusNamespace != consul.PreparerPodStatusNamespace.String() {
// Today this is the only namespace so we just make sure it doesn't diverge from expected
return grpc.Errorf(codes.InvalidArgument, "%q is not an understood namespace, must be %q", req.StatusNamespace, consul.PreparerPodStatusNamespace)
}
podUniqueKey, err := types.ToPodUniqueKey(req.PodUniqueKey)
if err == types.InvalidUUID {
return grpc.Errorf(codes.InvalidArgument, "%q does not parse as pod unique key (uuid)", req.PodUniqueKey)
} else if err != nil {
return grpc.Errorf(codes.Unavailable, err.Error())
}
clientCancel := stream.Context().Done()
var waitIndex uint64
// Do one consistent fetch from consul to ensure we don't return any
// stale results. From then on we'll use watches using the index we got
// from the Get()
status, queryMeta, err := s.podStatusStore.Get(podUniqueKey)
switch {
case statusstore.IsNoStatus(err) && req.WaitForExists:
// The client has asked to not be sent 404s, just wait for the record
// to exist. Don't send a value, just update the wait index to use
// for the next watch.
waitIndex = queryMeta.LastIndex
case err == nil:
// send the value we got
waitIndex = queryMeta.LastIndex
select {
case <-clientCancel:
return nil
default:
if err != nil {
return convertStatusStoreError(err)
}
resp := PodStatusToResp(status)
err = stream.Send(resp)
if err != nil {
return err
}
}
default:
return grpc.Errorf(codes.Unavailable, "error fetching first result from consul: %s", err)
}
podStatusResultCh := make(chan podStatusResult)
innerQuit := make(chan struct{})
defer close(innerQuit)
go func() {
defer close(podStatusResultCh)
for {
status, queryMeta, err := s.podStatusStore.WaitForStatus(podUniqueKey, waitIndex)
if queryMeta != nil {
waitIndex = queryMeta.LastIndex
}
if statusstore.IsNoStatus(err) && req.WaitForExists {
// the client wants 404 to be ignored, start
// the watch again with our new index
continue
}
select {
case podStatusResultCh <- podStatusResult{
status: status,
err: err,
}:
if err != nil {
return
}
case <-innerQuit:
// Client canceled
return
}
}
}()
for {
select {
case <-clientCancel:
return nil
case result := <-podStatusResultCh:
if result.err != nil {
return convertStatusStoreError(result.err)
}
resp := PodStatusToResp(result.status)
err = stream.Send(resp)
if err != nil {
return err
}
}
}
}
func (s store) ListPodStatus(_ context.Context, req *podstore_protos.ListPodStatusRequest) (*podstore_protos.ListPodStatusResponse, error) {
if req.StatusNamespace != consul.PreparerPodStatusNamespace.String() {
// Today this is the only namespace so we just make sure it doesn't diverge from expected
return nil, grpc.Errorf(codes.InvalidArgument, "%q is not an understood namespace, must be %q", req.StatusNamespace, consul.PreparerPodStatusNamespace)
}
statusMap, err := s.podStatusStore.List()
if err != nil {
return nil, grpc.Errorf(codes.Unavailable, "error listing pod status: %s", err)
}
ret := make(map[string]*podstore_protos.PodStatusResponse)
for podUniqueKey, status := range statusMap {
ret[podUniqueKey.String()] = PodStatusToResp(status)
}
return &podstore_protos.ListPodStatusResponse{
PodStatuses: ret,
}, nil
}
func (s store) DeletePodStatus(_ context.Context, req *podstore_protos.DeletePodStatusRequest) (*podstore_protos.DeletePodStatusResponse, error) {
podUniqueKey, err := types.ToPodUniqueKey(req.PodUniqueKey)
if err != nil {
return nil, grpc.Errorf(codes.InvalidArgument, "could not convert %s to a pod unique key: %s", req.PodUniqueKey, err)
}
err = s.podStatusStore.Delete(podUniqueKey)
if err != nil {
return nil, grpc.Errorf(codes.Unavailable, "error deleting pod status for %s: %s", podUniqueKey, err)
}
return &podstore_protos.DeletePodStatusResponse{}, nil
}
func (s store) MarkPodFailed(ctx context.Context, req *podstore_protos.MarkPodFailedRequest) (*podstore_protos.MarkPodFailedResponse, error) {
podUniqueKey, err := types.ToPodUniqueKey(req.PodUniqueKey)
if err != nil {
return nil, grpc.Errorf(codes.InvalidArgument, "could not convert %s to a pod unique key: %s", req.PodUniqueKey, err)
}
mutator := func(podStatus podstatus.PodStatus) (podstatus.PodStatus, error) {
podStatus.PodStatus = podstatus.PodFailed
return podStatus, nil
}
// we don't really need the CAS properties of MutateStatus but there
// should be only one system trying to write the status record so it
// doesn't hurt.
trxctx, cancelFunc := transaction.New(ctx)
defer cancelFunc()
err = s.podStatusStore.MutateStatus(trxctx, podUniqueKey, mutator)
if err != nil {
return nil, grpc.Errorf(codes.Internal, "failed to construct a consul transaction to update pod %s to failed: %s", podUniqueKey, err)
}
err = transaction.MustCommit(trxctx, s.consulClient.KV())
if err != nil {
return nil, grpc.Errorf(codes.Unavailable, "could not update pod %s to failed: %s", podUniqueKey, err)
}
return &podstore_protos.MarkPodFailedResponse{}, nil
}
// converts an error returned by the status store to an appropriate grpc error.
func convertStatusStoreError(err error) error {
if statusstore.IsNoStatus(err) {
return grpc.Errorf(codes.NotFound, err.Error())
} else if err != nil {
return grpc.Errorf(codes.Unavailable, err.Error())
}
return nil
}
func PodStatusToResp(podStatus podstatus.PodStatus) *podstore_protos.PodStatusResponse {
var processStatuses []*podstore_protos.ProcessStatus
for _, processStatus := range podStatus.ProcessStatuses {
processStatuses = append(processStatuses, &podstore_protos.ProcessStatus{
LaunchableId: processStatus.LaunchableID.String(),
EntryPoint: processStatus.EntryPoint,
LastExit: &podstore_protos.ExitStatus{
ExitTime: processStatus.LastExit.ExitTime.Unix(),
ExitCode: int64(processStatus.LastExit.ExitCode),
ExitStatus: int64(processStatus.LastExit.ExitStatus),
},
})
}
return &podstore_protos.PodStatusResponse{
Manifest: podStatus.Manifest,
PodState: podStatus.PodStatus.String(),
ProcessStatuses: processStatuses,
}
}
func PodStatusResponseToPodStatus(resp podstore_protos.PodStatusResponse) podstatus.PodStatus {
var ret podstatus.PodStatus
ret.PodStatus = podstatus.PodState(resp.PodState)
ret.Manifest = resp.Manifest
for _, rawProcessStatus := range resp.ProcessStatuses {
processStatus := podstatus.ProcessStatus{
LaunchableID: launch.LaunchableID(rawProcessStatus.LaunchableId),
EntryPoint: rawProcessStatus.EntryPoint,
}
if rawProcessStatus.LastExit != nil {
processStatus.LastExit = &podstatus.ExitStatus{
ExitTime: time.Unix(rawProcessStatus.LastExit.ExitTime, 0),
ExitCode: int(rawProcessStatus.LastExit.ExitCode),
ExitStatus: int(rawProcessStatus.LastExit.ExitStatus),
}
}
ret.ProcessStatuses = append(ret.ProcessStatuses, processStatus)
}
return ret
}
|
/*
In 2014, demoscener Jakub 'Ilmenit' Debski released a 250-byte(1) procedural graphics demo for the Atari XL called Mona. It's drawing the following picture(2):
mona
Your task is to generate the exact same picture, using the language of your choice.
(1) Breakdown: 136 bytes of data + 114 bytes of code.
(2) The original picture is 128x96. The above version was magnified to 256x192. A few pixels differ from the original, but this is the expected output with the pseudo-code described in this challenge.
How?
This is code-golf. Although you're authorized to use any method, best results will most probably be achieved by using the original algorithm which is described below.
NB: This paragraph is not a specification but rather a general description. Please refer to the pseudo-code and the reference implementation for the details of the algorithm.
The image is made of 64 pseudo-random brush strokes (see this video), cycling through the following colors (in RRGGBB hexadecimal format):
COLOR = [ 0xFFE289, 0xE99E45, 0xA55A00, 0x000000 ]
The background is initially filled with the 4th color (black). Each stroke is shorter than the previous one.
The pseudo-random generator is using a Linear-Feedback Shift Register (LFSR) on a 32-bit integer initially set to 0x7EC80000 and XOR'ed with 0x04C11DB7.
Each stroke is initialized with a 16-bit value which overwrites the least significant bytes of the seed:
BRUSH = [
0x030A, 0x37BE, 0x2F9B, 0x072B, 0x0E3C, 0xF59B, 0x8A91, 0x1B0B,
0x0EBD, 0x9378, 0xB83E, 0xB05A, 0x70B5, 0x0280, 0xD0B1, 0x9CD2,
0x2093, 0x209C, 0x3D11, 0x26D6, 0xDF19, 0x97F5, 0x90A3, 0xA347,
0x8AF7, 0x0859, 0x29AD, 0xA32C, 0x7DFC, 0x0D7D, 0xD57A, 0x3051,
0xD431, 0x542B, 0xB242, 0xB114, 0x8A96, 0x2914, 0xB0F1, 0x532C,
0x0413, 0x0A09, 0x3EBB, 0xE916, 0x1877, 0xB8E2, 0xAC72, 0x80C7,
0x5240, 0x8D3C, 0x3EAF, 0xAD63, 0x1E14, 0xB23D, 0x238F, 0xC07B,
0xAF9D, 0x312E, 0x96CE, 0x25A7, 0x9E37, 0x2C44, 0x2BB9, 0x2139
];
These values are also used to set the new position (bx, by) of the brush at the beginning of the stroke: bx is given by the least significant byte and by is given by the most significant byte.
The direction of the stroke is given by bits #1 and #7 of the seed. (See the SWITCH statement in the pseudo-code.)
Pseudo-code
Below is the algorithm in pseudo-code, assuming 0-indexed arrays, where AND, OR and XOR mean bitwise operations.
seed = 0x7EC80000
dir = 0x00
FOR part = 0 TO 63
word = BRUSH[part]
seed = (seed AND 0xFFFF0000) OR word
bx = word AND 0xFF
by = (word >> 8) AND 0xFF
FOR len = 0 TO (64 - part) * 32 - 1
carry = seed AND 0x80000000
seed = (seed << 1) AND 0xFFFFFFFF
IF carry
seed = seed XOR 0x04C11DB7
dir = seed AND 0xFF
ENDIF
SWITCH dir AND 0x82
CASE 0x00:
by = (by + 1) AND 0x7F
ENDCASE
CASE 0x02:
bx = (bx + 1) AND 0x7F
ENDCASE
CASE 0x80:
by = (by - 1) AND 0x7F
ENDCASE
CASE 0x82:
bx = (bx - 1) AND 0x7F
ENDCASE
ENDSWITCH
drawPixel(bx, by, COLOR[part AND 3])
ENDFOR
ENDFOR
Reference implementation
Below is an ungolfed reference implementation in JavaScript.
Hide code snippet
const SEED = 0x7EC80000,
XOR_MSK = 0x04C11DB7,
COLOR = [
'#FFE289', '#E99E45', '#A55A00', '#000000'
],
BRUSH = [
0x030A, 0x37BE, 0x2F9B, 0x072B, 0x0E3C, 0xF59B, 0x8A91, 0x1B0B,
0x0EBD, 0x9378, 0xB83E, 0xB05A, 0x70B5, 0x0280, 0xD0B1, 0x9CD2,
0x2093, 0x209C, 0x3D11, 0x26D6, 0xDF19, 0x97F5, 0x90A3, 0xA347,
0x8AF7, 0x0859, 0x29AD, 0xA32C, 0x7DFC, 0x0D7D, 0xD57A, 0x3051,
0xD431, 0x542B, 0xB242, 0xB114, 0x8A96, 0x2914, 0xB0F1, 0x532C,
0x0413, 0x0A09, 0x3EBB, 0xE916, 0x1877, 0xB8E2, 0xAC72, 0x80C7,
0x5240, 0x8D3C, 0x3EAF, 0xAD63, 0x1E14, 0xB23D, 0x238F, 0xC07B,
0xAF9D, 0x312E, 0x96CE, 0x25A7, 0x9E37, 0x2C44, 0x2BB9, 0x2139
];
var ctx = document.getElementById('output').getContext('2d'),
seed = SEED,
bx, by, word, len, carry,
dir = 0,
part;
ctx.fillStyle = COLOR[3];
ctx.fillRect(0, 0, 128 * 2, 128 * 2);
for(part = 0; part < 64; part++) {
word = BRUSH[part];
seed = (seed & 0xffff0000) | word;
bx = word & 0xff;
by = (word >> 8) & 0xff;
ctx.fillStyle = COLOR[part & 3];
for(len = 0; len < (64 - part) * 32; len++) {
carry = seed & 0x80000000;
seed <<= 1;
if(carry) {
seed ^= XOR_MSK;
dir = seed & 0xff;
}
switch(dir & 0x82) {
case 0x00:
by = (by + 1) & 0x7f;
break;
case 0x02:
bx = (bx + 1) & 0x7f;
break;
case 0x80:
by = (by - 1) & 0x7f;
break;
case 0x82:
bx = (bx - 1) & 0x7f;
break;
}
ctx.fillRect(bx * 2, by * 2, 2, 2);
}
}
<canvas id="output" width=256 height=192></canvas>
Expand snippet
You can also see an animated version here.
Clarification and rules
The output must be cropped to 128x96, even though the algorithm draws outside this area.
If your language/platform is not able to output the exact colors described above, you must use colors that are as close as possible.
Should you decide to use an alternate method, you still must generate the exact same output.
Just in case: submitting the original 6502 assembly code or any slightly edited version is not allowed.
Can you beat 250 bytes? Happy drawing!
*/
package main
import (
"flag"
"image"
"image/color"
"image/draw"
"image/png"
"os"
)
func main() {
mag := flag.Int("magnify", 4, "magnify by amount")
flag.Parse()
png.Encode(os.Stdout, monalisa(*mag))
}
func monalisa(mag int) *image.RGBA {
COLOR := []color.RGBA{
{0xFF, 0xE2, 0x89, 0xFF},
{0xE9, 0x9E, 0x45, 0xFF},
{0xA5, 0x5A, 0x00, 0xFF},
{0x00, 0x00, 0x00, 0xFF},
}
BRUSH := []uint32{
0x030A, 0x37BE, 0x2F9B, 0x072B, 0x0E3C, 0xF59B, 0x8A91, 0x1B0B,
0x0EBD, 0x9378, 0xB83E, 0xB05A, 0x70B5, 0x0280, 0xD0B1, 0x9CD2,
0x2093, 0x209C, 0x3D11, 0x26D6, 0xDF19, 0x97F5, 0x90A3, 0xA347,
0x8AF7, 0x0859, 0x29AD, 0xA32C, 0x7DFC, 0x0D7D, 0xD57A, 0x3051,
0xD431, 0x542B, 0xB242, 0xB114, 0x8A96, 0x2914, 0xB0F1, 0x532C,
0x0413, 0x0A09, 0x3EBB, 0xE916, 0x1877, 0xB8E2, 0xAC72, 0x80C7,
0x5240, 0x8D3C, 0x3EAF, 0xAD63, 0x1E14, 0xB23D, 0x238F, 0xC07B,
0xAF9D, 0x312E, 0x96CE, 0x25A7, 0x9E37, 0x2C44, 0x2BB9, 0x2139,
}
SEED := uint32(0x7EC80000)
XORMASK := uint32(0x04C11DB7)
img := image.NewRGBA(image.Rect(0, 0, mag*128, mag*128))
draw.Draw(img, img.Bounds(), image.NewUniform(COLOR[3]), image.ZP, draw.Src)
seed := SEED
dir := uint32(0)
for part := 0; part < 64; part++ {
word := BRUSH[part]
seed = (seed & 0xFFFF0000) | word
bx := word & 0xFF
by := (word >> 8) & 0xFF
col := COLOR[part&3]
for len := 0; len < (64-part)*32; len++ {
carry := seed & 0x80000000
seed = (seed << 1) & 0xFFFFFFFF
if carry != 0 {
seed ^= XORMASK
dir = seed & 0xFF
}
switch dir & 0x82 {
case 0x00:
by = (by + 1) & 0x7F
case 0x02:
bx = (bx + 1) & 0x7F
case 0x80:
by = (by - 1) & 0x7F
case 0x82:
bx = (bx - 1) & 0x7F
}
ix := int(bx * uint32(mag))
iy := int(by * uint32(mag))
draw.Draw(img, image.Rect(ix, iy, ix+mag, iy+mag), image.NewUniform(col), image.ZP, draw.Src)
}
}
return img
}
|
package apptweak
import (
"encoding/json"
"fmt"
"net/http"
"strconv"
)
type AppDetailResponse struct {
AD AppDetail `json:"content"`
MD MetaData `json:"metadata"`
}
type ErrorResponse struct {
Err string `json:"error"`
ApplicationID int `json:"application_id,omitempty"`
Device string `json:"device,omitempty"`
Country string `json:"country,omitempty"`
Language string `json:"language,omitempty"`
}
func (e *ErrorResponse) Error() string {
return e.Err
}
type AppDetail struct {
AppID int `json:"id"`
Dev Developer `json:"developer"`
Ratings Rating `json:"rating"`
Description string `json:"description"`
Feats Features `json:"features"`
Icon string `json:"icon"`
Genres []int `json:"genres"`
Permissions []string `json:"permissions"`
Price string `json:"price"`
Size int `json:"size"`
PromotionalText string `json:"promotionalText"`
Screens Screenshots `json:"screenshots"`
Vids Videos `json:"videos"`
Slug string `json:"slug"`
Title string `json:"title"`
SubTitle string `json:"subtitle"`
Versions []Version `json:"versions"`
ReleaseDate string `json:"release_date"`
Devices []string `json:"devices"`
CustomersAlsoBought []string `json:"customers_also_bought"`
}
type Developer struct {
Name string `json:"name"`
DeveloperID int `json:"id"`
}
type Rating struct {
Average float64 `json:"average"`
}
type Features struct {
GameCenter bool `json:"game_center"`
Passbook bool `json:"passbook"`
InApps bool `json:"in:apps"`
}
type Screenshots struct {
IPhone []ScreenshotDetail `json:"iphone"`
IPhone5 []ScreenshotDetail `json:"iphone5"`
IPhone6 []ScreenshotDetail `json:"iphone6"`
IPhone6AndUp []ScreenshotDetail `json:"iphone6+"`
IPad []ScreenshotDetail `json:"ipad"`
IPadPro []ScreenshotDetail `json:"ipadPro"`
IPhone6Plus []ScreenshotDetail `json:"iphone6plus"`
Applewatch []ScreenshotDetail `json:"appleWatch"`
}
type ScreenshotDetail struct {
ID string `json:"id"`
PathComponent string `json:"path_component"`
Filename string `json:"filename"`
URL string `json:"url"`
}
// Videos TODO: quickfix use an iterface since iphone6+ can be {} or []
type Videos struct {
IPhone6AndUp interface{} `json:"iphone6+"`
}
type VideoDetail struct {
Height int `json:"height"`
Width int `json:"width"`
URI string `json:"uri"`
Codecs string `json:"codecs"`
Audio string `json:"audio"`
}
type Version struct {
ReleaseDate string `json:"release_date"`
ReleaseNotes string `json:"-"` // `json:"release_notes"` TODO: Problem was that release_notes was not a string, prob due to symbols etc. Needs fixture!
Version string `json:"version"`
}
type MetaData struct {
Req Request `json:"request"`
Content MetaDataContent `json:"content"`
}
type Request struct {
Path string `json:"path"`
Store string `json:"store"`
Params Parameters `json:"params"`
PerformedAt string `json:"performed_at"`
}
type Parameters struct {
Country string `json:"country"`
Language string `json:"language"`
Device string `json:"device"`
ID string `json:"id"`
Format string `json:"format"`
Num int `json:"num"`
Term string `json:"term"`
//TODO: Add MaxAge
}
type MetaDataContent struct {
Content string `json:"-"`
}
// AppDetails takes appID as string and a struct of type Options such as Country, Device & Language and gives back full Meta Data of the app
func (c *Client) AppDetails(appID int, o Options) (*AppDetailResponse, error) {
uri := defaultBaseURL + "ios/applications/" + strconv.Itoa(appID) + "/metadata.json"
u, err := addOptions(uri, o)
if err != nil {
return nil, err
}
req, err := http.NewRequest("GET", u, nil)
if err != nil {
return nil, err
}
b, err := c.doRequest(req)
if err != nil {
return nil, err
}
var resp AppDetailResponse
err = json.Unmarshal([]byte(b), &resp)
if err != nil {
return nil, fmt.Errorf("Error: %v | Response of Request: %s", err, string(b))
}
return &resp, nil
}
|
package sstable
import (
"io"
"sort"
)
type ReadAtCloser interface {
io.ReaderAt
io.Closer
}
type SSTable struct {
f ReadAtCloser
r []record
m []byte
}
type record struct {
key string
length uint32
offset uint32
cksum uint32
}
func New(f ReadAtCloser) (*SSTable, error) {
t := &SSTable{f, nil, nil}
if err := t.load(); err != nil {
return nil, err
}
if err := t.tryMMap(); err != nil && err != errNotImplemented {
return nil, err
}
return t, nil
}
func (t *SSTable) Close() error {
if t.m != nil {
t.tryMunmap()
t.m = nil
}
return t.f.Close()
}
func (t *SSTable) Len() int {
return len(t.r)
}
func (t *SSTable) Key(idx int) string {
return t.r[idx].key
}
func (t *SSTable) rawValue(idx int) ([]byte, error) {
if t.m != nil {
p := t.r[idx].offset
q := t.r[idx].length + p
return t.m[p:q], nil
}
data := make([]byte, t.r[idx].length)
n, err := t.f.ReadAt(data, int64(t.r[idx].offset))
if err == io.EOF {
return nil, io.ErrUnexpectedEOF
}
if err != nil {
return nil, err
}
if n < len(data) {
return nil, io.ErrUnexpectedEOF
}
return data, nil
}
func (t *SSTable) Value(idx int) ([]byte, error) {
raw, err := t.rawValue(idx)
if err == nil {
err = VerifyChecksum(t.r[idx].cksum, raw)
}
if err != nil {
return nil, err
}
return raw, nil
}
func (t *SSTable) At(idx int) (Pair, error) {
k := t.Key(idx)
v, err := t.Value(idx)
if err != nil {
return Pair{}, err
}
return Pair{k, v}, nil
}
func (t *SSTable) Find(key string) int {
i := sort.Search(len(t.r), func(idx int) bool {
return t.r[idx].key >= key
})
if i < len(t.r) && t.r[i].key == key {
return i
}
return -1
}
func (t *SSTable) Get(key string) ([]byte, error) {
i := t.Find(key)
if i >= 0 {
return t.Value(i)
}
return nil, ErrKeyNotFound
}
func (t *SSTable) Range(lo, hi string) (int, int) {
i := sort.Search(len(t.r), func(idx int) bool {
return t.r[idx].key >= lo
})
j := sort.Search(len(t.r), func(idx int) bool {
return t.r[idx].key >= hi
})
return i, j
}
func (t *SSTable) KeysIn(i, j int) <-chan string {
ch := make(chan string)
go (func() {
for idx := i; idx < j; idx++ {
ch <- t.Key(idx)
}
close(ch)
})()
return ch
}
func (t *SSTable) KeysInRange(lo, hi string) <-chan string {
i, j := t.Range(lo, hi)
return t.KeysIn(i, j)
}
func (t *SSTable) AllKeys() <-chan string {
return t.KeysIn(0, t.Len())
}
func (t *SSTable) In(i, j int) <-chan Pair {
ch := make(chan Pair)
go (func() {
for idx := i; idx < j; idx++ {
item, err := t.At(idx)
if err != nil {
panic(err)
}
ch <- item
}
close(ch)
})()
return ch
}
func (t *SSTable) InRange(lo, hi string) <-chan Pair {
i, j := t.Range(lo, hi)
return t.In(i, j)
}
func (t *SSTable) All() <-chan Pair {
return t.In(0, t.Len())
}
|
package imagekit
//
// RESPONSES
//
type MetadataResponse struct {
Density int `json:"density"`
EXIF *MetadataResponseEXIF `json:"exif"`
Format string `json:"format"`
HasColorProfile bool `json:"hasColorProfile"`
HasTransparency bool `json:"hasTransparency"`
Height int `json:"height"`
PHash string `json:"pHash"`
Quality int `json:"quality"`
Size int `json:"size"`
Width int `json:"width"`
}
type MetadataResponseEXIF struct {
EXIF *MetadataEXIF `json:"exif"`
GPS *MetadataGPS `json:"gps"`
Image *MetadataImage `json:"image"`
Interoperability *MetadataInteroperability `json:"interoperability"`
Makernote *MetadataMakernote `json:"makernote"`
Thumbnail *MetadataThumbnail `json:"thumbnail"`
}
type MetadataEXIF struct {
ApertureValue float32 `json:"ApertureValue"`
ColorSpace int `json:"ColorSpace"`
CreateDate string `json:"CreateDate"`
CustomRendered int `json:"CustomRendered"`
DateTimeOriginal string `json:"DateTimeOriginal"`
ExifImageHeight int `json:"ExifImageHeight"`
ExifImageWidth int `json:"ExifImageWidth"`
ExifVersion string `json:"ExifVersion"`
ExposureCompensation int `json:"ExposureCompensation"`
ExposureMode int `json:"ExposureMode"`
ExposureProgram int `json:"ExposureProgram"`
ExposureTime float32 `json:"ExposureTime"`
FNumber float32 `json:"FNumber"`
Flash int `json:"Flash"`
FlashpixVersion string `json:"FlashpixVersion"`
FocalLength int `json:"FocalLength"`
FocalPlaneResolutionUnit int `json:"FocalPlaneResolutionUnit"`
FocalPlaneXResolution float32 `json:"FocalPlaneXResolution"`
FocalPlaneYResolution float32 `json:"FocalPlaneYResolution"`
ISO int `json:"ISO"`
InteropOffset int `json:"InteropOffset"`
MeteringMode int `json:"MeteringMode"`
SceneCaptureType int `json:"SceneCaptureType"`
ShutterSpeedValue float32 `json:"ShutterSpeedValue"`
SubSecTime string `json:"SubSecTime"`
SubSecTimeDigitized string `json:"SubSecTimeDigitized"`
SubSecTimeOriginal string `json:"SubSecTimeOriginal"`
WhiteBalance int `json:"WhiteBalance"`
}
type MetadataGPS struct {
GPSVersionID []int `json:"GPSVersionID"`
}
type MetadataImage struct {
EXIFOffset int `json:"ExifOffset"`
GPSInfo int `json:"GPSInfo"`
Make string `json:"Make"`
Model string `json:"Model"`
ModifyDate string `json:"ModifyDate"`
Orientation int `json:"Orientation"`
ResolutionUnit int `json:"ResolutionUnit"`
Software string `json:"Software"`
XResolution int `json:"XResolution"`
YCbCrPositioning int `json:"YCbCrPositioning"`
YResolution int `json:"YResolution"`
}
type MetadataInteroperability struct {
InteropIndex string `json:"InteropIndex"`
InteropVersion string `json:"InteropVersion"`
}
type MetadataThumbnail struct {
Compression int `json:"Compression"`
ResolutionUnit int `json:"ResolutionUnit"`
ThumbnailLength int `json:"ThumbnailLength"`
ThumbnailOffset int `json:"ThumbnailOffset"`
XResolution int `json:"XResolution"`
YResolution int `json:"YResolution"`
}
type MetadataMakernote struct {
}
//
// SERVICES
//
// MetadataService handles communication with the metadata related methods of the ImageKit API.
type MetadataService service
|
package sshd
import (
"context"
"fmt"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
"github.com/yankeguo/bastion/sshd/sandbox"
"github.com/yankeguo/bastion/types"
"golang.org/x/crypto/ssh"
"google.golang.org/grpc"
"net"
)
type SSHD struct {
opts types.SSHDOptions
listener net.Listener
clientSigners []ssh.Signer
hostSigner ssh.Signer
sshServerConfig *ssh.ServerConfig
rpcConn *grpc.ClientConn
sessionService types.SessionServiceClient
replayService types.ReplayServiceClient
userService types.UserServiceClient
keyService types.KeyServiceClient
nodeService types.NodeServiceClient
grantService types.GrantServiceClient
masterKeyService types.MasterKeyServiceClient
sandboxManager sandbox.Manager
}
func New(opts types.SSHDOptions) *SSHD {
return &SSHD{
opts: opts,
}
}
func (s *SSHD) initSandboxManager() (err error) {
s.sandboxManager, err = sandbox.NewManager(s.opts)
return
}
func (s *SSHD) initRPCConn() (err error) {
if s.rpcConn, err = grpc.Dial(s.opts.DaemonEndpoint, grpc.WithInsecure()); err != nil {
return
}
s.sessionService = types.NewSessionServiceClient(s.rpcConn)
s.replayService = types.NewReplayServiceClient(s.rpcConn)
s.userService = types.NewUserServiceClient(s.rpcConn)
s.keyService = types.NewKeyServiceClient(s.rpcConn)
s.nodeService = types.NewNodeServiceClient(s.rpcConn)
s.grantService = types.NewGrantServiceClient(s.rpcConn)
s.masterKeyService = types.NewMasterKeyServiceClient(s.rpcConn)
return
}
func (s *SSHD) initHostSigner() (err error) {
s.hostSigner, err = loadSSHPrivateKeyFile(s.opts.HostKey)
return
}
func (s *SSHD) initClientSigners() (err error) {
s.clientSigners = []ssh.Signer{}
for _, key := range s.opts.ClientKeys {
var cs ssh.Signer
if cs, err = loadSSHPrivateKeyFile(key); err != nil {
return
}
s.clientSigners = append(s.clientSigners, cs)
}
return
}
func (s *SSHD) initSSHServerConfig() (err error) {
s.sshServerConfig = &ssh.ServerConfig{
PublicKeyCallback: func(conn ssh.ConnMetadata, key ssh.PublicKey) (ms *ssh.Permissions, err error) {
ILog(conn).Msg("connection accepted")
// decode target user and target node
tu, th := decodeTargetServer(conn.User())
// find the key
var kRes *types.GetKeyResponse
fp := ssh.FingerprintSHA256(key)
if kRes, err = s.keyService.GetKey(context.Background(), &types.GetKeyRequest{Fingerprint: fp}); err != nil {
ELog(conn).Str("fingerprint", fp).Err(err).Msg("failed to lookup key")
err = errors.New("internal error: failed to lookup key")
return
}
// touch the key
if _, ierr := s.keyService.TouchKey(context.Background(), &types.TouchKeyRequest{Fingerprint: kRes.Key.Fingerprint}); ierr != nil {
ELog(conn).Str("fingerprint", fp).Str("account", kRes.Key.Account).Err(ierr).Msg("failed to touch key")
}
// find the user
var uRes *types.GetUserResponse
if uRes, err = s.userService.GetUser(context.Background(), &types.GetUserRequest{Account: kRes.Key.Account}); err != nil {
ELog(conn).Str("fingerprint", fp).Str("account", kRes.Key.Account).Err(err).Msg("failed to lookup user")
err = errors.New("internal error: failed to lookup user")
return
}
// check blocked user
if uRes.User.IsBlocked {
ILog(conn).Str("account", uRes.User.Account).Msg("trying to login a blocked user")
err = errors.New("error: user is blocked")
return
}
// touch the user
if _, ierr := s.userService.TouchUser(context.Background(), &types.TouchUserRequest{Account: uRes.User.Account}); ierr != nil {
ELog(conn).Str("account", uRes.User.Account).Err(ierr).Msg("failed to touch user")
}
// check internal connection
if isSandboxConnection(conn, s.opts.SandboxEndpoint) {
// check key source
if kRes.Key.Source != types.KeySourceSandbox {
ILog(conn).Str("fingerprint", fp).Str("account", uRes.User.Account).Msg("trying to enter lv2 stage with a non-sandbox key")
err = errors.New("error: invalid key source")
return
}
// check format
if len(tu) == 0 || len(th) == 0 {
ILog(conn).Str("account", uRes.User.Account).Str("sshUser", conn.User()).Msg("invalid lv2 stage ssh user format")
err = errors.New("error: invalid format")
return
}
// check node
var nRes *types.GetNodeResponse
if nRes, err = s.nodeService.GetNode(context.Background(), &types.GetNodeRequest{Hostname: th}); err != nil {
ELog(conn).Str("account", uRes.User.Account).Err(err).Msg("failed to lookup node")
err = errors.New("internal error: failed to lookup node")
return
}
// check grant
var cRes *types.CheckGrantResponse
if cRes, err = s.grantService.CheckGrant(context.Background(), &types.CheckGrantRequest{
User: tu,
Account: uRes.User.Account,
Hostname: nRes.Node.Hostname,
}); err != nil {
ELog(conn).Str("account", uRes.User.Account).Str("hostname", nRes.Node.Hostname).Str("user", tu).Err(err).Msg("failed to check grant")
err = errors.New("internal error: failed to check permission")
return
}
if !cRes.Ok {
ILog(conn).Str("account", uRes.User.Account).Str("hostname", nRes.Node.Hostname).Str("user", tu).Msg("trying to access a not granted server")
err = errors.New("error: no permission")
return
}
ms = &ssh.Permissions{
Extensions: map[string]string{
extKeyAccount: uRes.User.Account,
extKeyUser: tu,
extKeyAddress: nRes.Node.Address,
extKeyHostname: nRes.Node.Hostname,
extKeyStage: stageLv2,
},
}
} else {
// connection from external
// check recursive sandbox connection
if kRes.Key.Source == types.KeySourceSandbox {
ILog(conn).Str("fingerprint", fp).Str("account", uRes.User.Account).Msg("trying to enter lv1 stage with a sandbox key")
err = errors.New("error: invalid key source")
return
}
ms = &ssh.Permissions{
Extensions: map[string]string{
extKeyAccount: uRes.User.Account,
extKeyStage: stageLv1,
},
}
}
return
},
}
s.sshServerConfig.AddHostKey(s.hostSigner)
return
}
func (s *SSHD) initListener() (err error) {
s.listener, err = net.Listen("tcp", fmt.Sprintf("%s:%d", s.opts.Host, s.opts.Port))
return
}
func (s *SSHD) submitClientSigners() (err error) {
mks := make([]*types.MasterKey, 0, len(s.clientSigners))
for _, cs := range s.clientSigners {
mks = append(mks, &types.MasterKey{
Fingerprint: string(ssh.FingerprintSHA256(cs.PublicKey())),
PublicKey: string(ssh.MarshalAuthorizedKey(cs.PublicKey())),
})
}
_, err = s.masterKeyService.UpdateAllMasterKeys(context.Background(), &types.UpdateAllMasterKeysRequest{MasterKeys: mks})
return
}
func (s *SSHD) OverrideKeys() (err error) {
// init client signers
if err = s.initClientSigners(); err != nil {
return
}
// init rpcConn
if err = s.initRPCConn(); err != nil {
return
}
// submit client signers to daemon
if err = s.submitClientSigners(); err != nil {
return
}
// list all nodes
var res *types.ListNodesResponse
if res, err = s.nodeService.ListNodes(context.Background(), &types.ListNodesRequest{}); err != nil {
return
}
for _, node := range res.Nodes {
// ignore un-managed nodes
if !node.IsKeyManaged {
continue
}
// create client
var client *ssh.Client
if client, err = ssh.Dial("tcp", fixSSHAddress(node.Address), &ssh.ClientConfig{
User: "root",
Auth: []ssh.AuthMethod{ssh.PublicKeys(s.clientSigners...)},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}); err != nil {
log.Error().Err(err).Str("address", node.Address).Str("hostname", node.Hostname).Msg("failed to create ssh client")
continue
}
// override keys
if err = sshClientOverrideKeys(client, s.clientSigners); err != nil {
log.Error().Err(err).Str("address", node.Address).Str("hostname", node.Hostname).Msg("failed to override keys")
} else {
log.Info().Str("address", node.Address).Str("hostname", node.Hostname).Msg("success")
}
// close client
client.Close()
}
return
}
func (s *SSHD) Run() (err error) {
// init host signer
if err = s.initHostSigner(); err != nil {
return
}
// init client signers
if err = s.initClientSigners(); err != nil {
return
}
// init sandbox manager
if err = s.initSandboxManager(); err != nil {
return
}
// init rpcConn
if err = s.initRPCConn(); err != nil {
return
}
// submit client signers to daemon
if err = s.submitClientSigners(); err != nil {
return
}
// init sshServerConfig, must after host signer and rpcConn
if err = s.initSSHServerConfig(); err != nil {
return
}
// init listener
if err = s.initListener(); err != nil {
return
}
for {
var c net.Conn
if c, err = s.listener.Accept(); err != nil {
return
}
go s.handleConnection(c)
}
return
}
func (s *SSHD) handleConnection(c net.Conn) {
var err error
// variables
var conn *ssh.ServerConn
var nchan <-chan ssh.NewChannel
var rchan <-chan *ssh.Request
// upgrade connection to ssh connection
if conn, nchan, rchan, err = ssh.NewServerConn(c, s.sshServerConfig); err != nil {
log.Error().Err(err).Msg("failed to handshake")
return
}
// handle the connection
ILog(conn).Msg("connection established")
if conn.Permissions.Extensions[extKeyStage] == stageLv1 {
err = s.handleLv1Connection(conn, nchan, rchan)
} else {
err = s.handleLv2Connection(conn, nchan, rchan)
}
ILog(conn).Msg("connection finished")
return
}
func (s *SSHD) updateSandboxPublicKey(sb sandbox.Sandbox, account string) (err error) {
var ak string
if ak, err = sb.GetSSHPublicKey(); err != nil {
return
}
var pk ssh.PublicKey
if pk, _, _, _, err = ssh.ParseAuthorizedKey([]byte(ak)); err != nil {
return
}
fp := ssh.FingerprintSHA256(pk)
_, err = s.keyService.CreateKey(context.Background(), &types.CreateKeyRequest{
Name: "sandbox",
Account: account,
Fingerprint: fp,
Source: types.KeySourceSandbox,
})
return
}
func (s *SSHD) updateSandboxSSHConfig(sb sandbox.Sandbox, account string) (err error) {
var riRes *types.ListGrantItemsResponse
if riRes, err = s.grantService.ListGrantItems(context.Background(), &types.ListGrantItemsRequest{Account: account}); err != nil {
return
}
se := make([]sandbox.SSHEntry, 0)
for _, ri := range riRes.GrantItems {
// skip the special tunnel user
if ri.User == types.GrantUserTunnel {
continue
}
se = append(se, sandbox.SSHEntry{
Name: fmt.Sprintf("%s-%s", ri.Hostname, ri.User),
Host: s.opts.SandboxEndpoint,
Port: uint(s.opts.Port),
User: fmt.Sprintf("%s@%s", ri.User, ri.Hostname),
})
}
_, _, err = sb.ExecScript(sandbox.ScriptSeedSSHConfig(se))
return
}
func (s *SSHD) handleLv1Connection(conn *ssh.ServerConn, ncchan <-chan ssh.NewChannel, grchan <-chan *ssh.Request) (err error) {
// remember to close the connection
defer conn.Close()
account := conn.Permissions.Extensions[extKeyAccount]
// discard global requests
go discardRequests(grchan)
// pre-create a connection-local tunnel pool for failure isolation
tp := NewTunnelPool(s.clientSigners)
defer tp.Close()
// handle new channels
for nc := range ncchan {
if nc.ChannelType() == ChannelTypeDirectTCPIP {
// if channel type is 'direct-tcpip'
// extract host and port from extra data
var pl DirectTCPIPExtraData
if err = ssh.Unmarshal(nc.ExtraData(), &pl); err != nil {
nc.Reject(ssh.UnknownChannelType, "internal error: invalid extra data for 'direct-tcpip' new channel request")
ELog(conn).Str("channel", nc.ChannelType()).Hex("extraData", nc.ExtraData()).Err(err).Msg("invalid extra data for 'direct-tcpip'")
continue
}
var rawIP bool
var address string
if ip := net.ParseIP(pl.Host); ip != nil {
// raw IP, in theory, raw IP is allowed only if user can connect sandbox
rawIP = true
address = pl.Host
} else {
// find the node
var nRes *types.GetNodeResponse
if nRes, err = s.nodeService.GetNode(context.Background(), &types.GetNodeRequest{Hostname: pl.Host}); err != nil {
nc.Reject(ssh.ConnectionFailed, "internal error: failed to lookup node")
ELog(conn).Str("channel", nc.ChannelType()).Str("hostname", pl.Host).Err(err).Msg("failed to lookup node")
continue
}
// check __tunnel__ user permission with given node
var cRes *types.CheckGrantResponse
if cRes, err = s.grantService.CheckGrant(context.Background(), &types.CheckGrantRequest{
Account: account,
User: types.GrantUserTunnel,
Hostname: pl.Host,
}); err != nil {
nc.Reject(ssh.ConnectionFailed, "internal error: failed to check permission")
ELog(conn).Str("channel", nc.ChannelType()).Str("hostname", pl.Host).Err(err).Msg("failed to lookup grant")
continue
}
if !cRes.Ok {
nc.Reject(ssh.ConnectionFailed, "error: no permission")
ILog(conn).Str("channel", nc.ChannelType()).Str("hostname", pl.Host).Msg("trying to create tunnel on a not granted node")
continue
}
address = nRes.Node.Address
}
// accept the new channel
var sc ssh.Channel
var srchan <-chan *ssh.Request
if sc, srchan, err = nc.Accept(); err != nil {
ELog(conn).Str("channel", nc.ChannelType()).Str("hostname", pl.Host).Err(err).Msg("failed to accept new channel")
continue
}
// discard all channel-local requests
go discardRequests(srchan)
// dial and stream 'direct-tcpip'
if rawIP {
go handleLv1RawIPDirectTCPIPChannel(conn, sc, address, int(pl.Port))
} else {
go handleLv1DirectTCPIPChannel(conn, sc, tp, address, int(pl.Port))
}
} else if nc.ChannelType() == ChannelTypeSession {
// find or create the sandbox
var sb sandbox.Sandbox
if sb, err = s.sandboxManager.FindOrCreate(account); err != nil {
nc.Reject(ssh.ConnectionFailed, "internal error: failed to find or create the sandbox")
ELog(conn).Str("channel", nc.ChannelType()).Err(err).Msg("failed to find or create the sandbox")
continue
}
// load public key from sandbox /root/.ssh/id_rsa.pub
if err = s.updateSandboxPublicKey(sb, account); err != nil {
ELog(conn).Str("channel", nc.ChannelType()).Err(err).Msg("failed to extract sandbox public key")
}
// write sandbox /root/.ssh/config
if err = s.updateSandboxSSHConfig(sb, account); err != nil {
ELog(conn).Str("channel", nc.ChannelType()).Err(err).Msg("failed to write ssh config to sandbox")
}
// accept the new channel
var sc ssh.Channel
var srchan <-chan *ssh.Request
if sc, srchan, err = nc.Accept(); err != nil {
ELog(conn).Str("channel", nc.ChannelType()).Err(err).Msg("failed to accept new channel")
continue
}
go handleLv1SessionChannel(conn, sc, srchan, sb, account, s.sessionService, s.replayService)
} else {
ELog(conn).Str("channel", nc.ChannelType()).Msg("unsupported channel type")
nc.Reject(ssh.UnknownChannelType, "error: only channel type 'session' and 'direct-tcpip' is allowed")
continue
}
}
return
}
func (s *SSHD) handleLv2Connection(conn *ssh.ServerConn, ncchan <-chan ssh.NewChannel, grchan <-chan *ssh.Request) (err error) {
defer conn.Close()
// extract connection parameters
user := conn.Permissions.Extensions[extKeyUser]
address := conn.Permissions.Extensions[extKeyAddress]
// no global requests is allowed in LV2 connection
go discardRequests(grchan)
// create ssh.Client
var client *ssh.Client
if client, err = ssh.Dial("tcp", fixSSHAddress(address), &ssh.ClientConfig{
User: "root",
Auth: []ssh.AuthMethod{ssh.PublicKeys(s.clientSigners...)},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}); err != nil {
ELog(conn).Str("address", address).Msg("failed to create ssh client")
return
}
defer client.Close()
// iterate new channel requests
for nc := range ncchan {
// check channel type
if nc.ChannelType() != ChannelTypeSession {
nc.Reject(ssh.UnknownChannelType, "error: unsupported channel type")
ILog(conn).Str("channel", nc.ChannelType()).Msg("unsupported channel type")
continue
}
// open session on remote server
var tc ssh.Channel
var trchan <-chan *ssh.Request
if tc, trchan, err = client.OpenChannel(ChannelTypeSession, nil); err != nil {
nc.Reject(ssh.ConnectionFailed, "error: failed to create new session channel on remote server")
ELog(conn).Str("channel", nc.ChannelType()).Err(err).Msg("failed to create new session channel on remote server")
continue
}
// accept channel
var sc ssh.Channel
var srchan <-chan *ssh.Request
if sc, srchan, err = nc.Accept(); err != nil {
tc.Close()
ELog(conn).Str("channel", nc.ChannelType()).Err(err).Msg("failed to accept new channel")
continue
}
// bridge channels
go handleLv2SessionChannel(conn, sc, srchan, tc, trchan, user)
}
return
}
func (s *SSHD) Shutdown() {
if s.listener != nil {
s.listener.Close()
}
}
|
package main
import (
"log"
"testing"
nats "github.com/nats-io/nats.go"
"github.com/satori/uuid"
)
type SalesItem struct {
ItemID string `json:"ID"`
Name string `json:"name"`
Qty float32 `json:"qty"`
UnitPrice float32 `json:"unitprice"`
Status string `json:"status"`
Location string `json:"location"`
}
type SalesOrder struct {
SalesOrderID string `json:"ID"`
Amount float32 `json:"amount"`
Items []SalesItem `json:"items"`
Status string `json:"status"`
}
type Item struct {
ItemID string `json:"ID"`
Name string `json:"name"`
Qty float32 `json:"qty"`
Status string `json:"status"`
Location string `json:"location"`
}
type JobOrder struct {
JobOrderID string `json:"ID"`
Name string `json:"name"`
Items []Item `json:"item"`
Status string `json:"status"`
}
type Response struct {
From string `json:"from"`
Message string `json:"message"`
}
func NewUuid() string {
newUUID := uuid.NewV4()
// if err != nil {
// return "", fmt.Errorf("Error on generating new uuid, %v", err)
// }
return newUUID.String() //, nil
}
func Connect(clientID, natsServers string) (*Connector, error) {
connector := NewConnector(clientID)
//log.Printf("clientID: %s, natsServers: %s\n", clientID, natsServers)
err := connector.SetupConnectionToNATS(natsServers, nats.MaxReconnects(-1))
if err != nil {
log.Printf("Problem setting up connection to NATS servers, %v", err)
return nil, err
}
return connector, nil
}
func ErrorIfNotNil(t *testing.T, err error, s string) {
if err != nil {
LogAndFail(t, s)
}
}
func LogAndFail(t *testing.T, s string) {
t.Log(s)
t.Fail()
}
|
package document
import (
"fmt"
)
// ErrDocNotFound returned if desired document not found
type ErrDocNotFound struct {
Annotation string
Kind string
}
func (e ErrDocNotFound) Error() string {
return fmt.Sprintf("Document annotated by %s with Kind %s not found", e.Annotation, e.Kind)
}
|
//
// Copyright 2020 The AVFS authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package orefafs
import (
"bytes"
"os"
"sort"
"sync/atomic"
"time"
"github.com/avfs/avfs"
)
// split splits path immediately following the final Separator,
// separating it into a directory and file name component.
// If there is no Separator in path, split returns an empty dir
// and file set to path.
// The returned values have the property that path = dir+file.
func split(path string) (dir, file string) {
i := len(path) - 1
for i >= 0 && !os.IsPathSeparator(path[i]) {
i--
}
if i == 0 {
return path[:1], path[1:]
}
return path[:i], path[i+1:]
}
// addChild adds a child to a node.
func (nd *node) addChild(name string, child *node) {
if nd.children == nil {
nd.children = make(children)
}
nd.children[name] = child
}
// createDir creates a new directory.
func (vfs *OrefaFS) createDir(parent *node, absPath, fileName string, perm os.FileMode) *node {
mode := os.ModeDir | (perm & avfs.FileModeMask &^ os.FileMode(vfs.umask))
return vfs.createNode(parent, absPath, fileName, mode)
}
// createFile creates a new file.
func (vfs *OrefaFS) createFile(parent *node, absPath, fileName string, perm os.FileMode) *node {
mode := perm & avfs.FileModeMask &^ os.FileMode(vfs.umask)
return vfs.createNode(parent, absPath, fileName, mode)
}
// createNode creates a new node (directory or file).
func (vfs *OrefaFS) createNode(parent *node, absPath, fileName string, mode os.FileMode) *node {
nd := &node{
id: atomic.AddUint64(&vfs.lastId, 1),
mtime: time.Now().UnixNano(),
mode: mode,
uid: vfs.currentUser.Uid(),
gid: vfs.currentUser.Gid(),
nlink: 1,
}
parent.mu.Lock()
parent.addChild(fileName, nd)
parent.mu.Unlock()
vfs.mu.Lock()
vfs.nodes[absPath] = nd
vfs.mu.Unlock()
return nd
}
// fillStatFrom returns a fStat (implementation of os.FileInfo) from a dirNode dn named name.
func (nd *node) fillStatFrom(name string) *fStat {
nd.mu.RLock()
fst := &fStat{
id: nd.id,
name: name,
size: nd.size(),
mode: nd.mode,
mtime: nd.mtime,
uid: nd.uid,
gid: nd.gid,
nlink: nd.nlink,
}
nd.mu.RUnlock()
return fst
}
// infos returns a slice of FileInfo from a directory ordered by name.
func (nd *node) infos() []os.FileInfo {
l := len(nd.children)
if l == 0 {
return nil
}
dirInfos := make([]os.FileInfo, 0, l)
for name, cn := range nd.children {
dirInfos = append(dirInfos, cn.fillStatFrom(name))
}
sort.Slice(dirInfos, func(i, j int) bool { return dirInfos[i].Name() < dirInfos[j].Name() })
return dirInfos
}
// names returns a slice of file names from a directory ordered by name.
func (nd *node) names() []string {
l := len(nd.children)
if l == 0 {
return nil
}
dirNames := make([]string, 0, l)
for name := range nd.children {
dirNames = append(dirNames, name)
}
sort.Strings(dirNames)
return dirNames
}
// remove deletes the content of a node.
func (nd *node) remove() {
nd.children = nil
nd.nlink--
if nd.nlink == 0 {
nd.data = nil
}
}
// setMode sets the permissions of the file node.
func (nd *node) setMode(mode os.FileMode) {
nd.mode &^= avfs.FileModeMask
nd.mode |= mode & avfs.FileModeMask
}
// setModTime sets the modification time of the node.
func (nd *node) setModTime(mtime time.Time) {
nd.mtime = mtime.UnixNano()
}
// setOwner sets the user and group id.
func (nd *node) setOwner(uid, gid int) {
nd.uid = uid
nd.gid = gid
}
// size returns the size of the file.
func (nd *node) size() int64 {
if nd.mode.IsDir() {
return int64(len(nd.children))
}
return int64(len(nd.data))
}
// Size returns the size of the file.
func (nd *node) Size() int64 {
nd.mu.RLock()
s := nd.size()
nd.mu.RUnlock()
return s
}
// truncate truncates the file.
func (nd *node) truncate(size int64) {
if size == 0 {
nd.data = nil
return
}
diff := int(size) - len(nd.data)
if diff > 0 {
nd.data = append(nd.data, bytes.Repeat([]byte{0}, diff)...)
return
}
nd.data = nd.data[:size]
}
|
package lib
const (
// PROD 正式环境
PROD = "prod"
// DEV 开发环境,一般用于本机测试
DEV = "dev"
// TEST 测试环境
TEST = "test"
)
// Resp 用于定义返回数据格式(json)
type Resp struct {
Ret Code `json:"ret"`
Msg string `json:"msg,omitempty"`
Detail string `json:"detail,omitempty"`
Data interface{} `json:"data,omitempty"`
}
|
package main
/*
Create basic nodejs generator.
Requirements:
Write to package.json (use npm install or perhaps
use base template and allow user to fill in options)
*/
func main() {
t := generator{map[string]bool{
"package.json": true,
"app.js": true,
"index.html": true,
"js": true,
"css": true,
},
ProjectConfig{
name: "",
author: "",
pt: "",
},
}
readFiles("", t.FileMap)
writeFiles("", t.FileMap)
}
|
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package common implements code and utilities shared across all packages in
// client/.
package common
|
package cmd
import (
"fmt"
"strconv"
"strings"
"github.com/Unreal4tress/go-sourceformat/vmf"
"gonum.org/v1/gonum/spatial/r3"
)
type DispInfo struct {
Power int
Seps int
StartPosition r3.Vec
Normals [][]r3.Vec
Distances [][]float64
}
func parseDispInfo(side *vmf.Node) *DispInfo {
nodes := side.Nodes("dispinfo")
if len(nodes) == 0 {
return nil
}
dispinfo := nodes[0]
r := new(DispInfo)
r.Power = dispinfo.Int("power")
r.Seps = []int{2, 3, 5, 9, 17}[r.Power]
fmt.Sscanf(dispinfo.String("startposition"), "[%f %f %f]",
&r.StartPosition.X, &r.StartPosition.Y, &r.StartPosition.Z)
normals := dispinfo.Nodes("normals")[0]
r.Normals = make([][]r3.Vec, r.Seps)
for i := 0; i < r.Seps; i++ {
key := fmt.Sprintf("row%v", i)
vs := strings.Split(normals.String(key), " ")
row := make([]r3.Vec, r.Seps)
for j := 0; j < r.Seps; j++ {
v := r3.Vec{}
v.X, _ = strconv.ParseFloat(vs[j+0], 64)
v.Y, _ = strconv.ParseFloat(vs[j+1], 64)
v.Z, _ = strconv.ParseFloat(vs[j+2], 64)
row[j] = v
}
r.Normals[i] = row
}
distances := dispinfo.Nodes("distances")[0]
r.Distances = make([][]float64, r.Seps)
for i := 0; i < r.Seps; i++ {
key := fmt.Sprintf("row%v", i)
text := distances.String(key)
row := make([]float64, r.Seps)
for j, v := range strings.Split(text, " ") {
row[j], _ = strconv.ParseFloat(v, 64)
}
r.Distances[i] = row
}
return r
}
|
package route
type insertRequest struct {
Route string `json:"route" validate:"required"`
Description string `json:"description" validate:"required"`
Method string `json:"method" validate:"required"`
Tags []string `json:"tags"`
}
type updateRequest struct {
Route string `json:"route" validate:"required"`
Id int `json:"id" validate:"required"`
Description string `json:"description" validate:"required"`
Method string `json:"method" validate:"required"`
Tags []string `json:"tags"`
}
func insertRequestConvert(r *insertRequest) *Route {
return &Route{
Route: r.Route,
Description: r.Description,
Method: r.Method,
}
}
func updateRequestConvert(r *updateRequest) *Route {
return &Route{
Id: r.Id,
Route: r.Route,
Description: r.Description,
Method: r.Method,
}
}
type insertResponse RouteWithTags
func newInsertResponse(group *RouteWithTags) *insertResponse {
return (*insertResponse)(group)
}
type listResponse []RouteWithTags
func newListResponse(routes []RouteWithTags) listResponse {
return routes
}
type deleteRequest struct {
Id int `json:"id" validate:"required"`
}
type getRequest struct {
Id int `json:"id" validate:"required"`
}
type listRequest struct {
Filter filter `json:"filter"`
}
type filter struct {
Tags tags `json:"tags"`
}
type tags struct {
Names []string `json:"names"`
Exclude bool `json:"exclude"`
}
|
package iface
import (
"log"
"time"
)
type WeatherCode int
const (
CodeUnknown WeatherCode = iota
CodeCloudy
CodeFog
CodeHeavyRain
CodeHeavyShowers
CodeHeavySnow
CodeHeavySnowShowers
CodeLightRain
CodeLightShowers
CodeLightSleet
CodeLightSleetShowers
CodeLightSnow
CodeLightSnowShowers
CodePartlyCloudy
CodeSunny
CodeThunderyHeavyRain
CodeThunderyShowers
CodeThunderySnowShowers
CodeVeryCloudy
)
type Cond struct {
// Time is the time, where this weather condition applies.
Time time.Time
// Code is the general weather condition and must be one the WeatherCode
// constants.
Code WeatherCode
// Desc is a short string describing the condition. It should be just one
// sentence.
Desc string
// TempC is the temperature in degrees celsius.
TempC *float32
// FeelsLikeC is the felt temperature (with windchill effect e.g.) in
// degrees celsius.
FeelsLikeC *float32
// ChanceOfRainPercent is the probability of rain or snow. It must be in the
// range [0, 100].
ChanceOfRainPercent *int
// PrecipM is the precipitation amount in meters(!) per hour. Must be >= 0.
PrecipM *float32
// VisibleDistM is the visibility range in meters(!). It must be >= 0.
VisibleDistM *float32
// WindspeedKmph is the average wind speed in kilometers per hour. The value
// must be >= 0.
WindspeedKmph *float32
// WindGustKmph is the maximum temporary wind speed in kilometers per
// second. It should be > WindspeedKmph.
WindGustKmph *float32
// WinddirDegree is the direction the wind is blowing from on a clock
// oriented circle with 360 degrees. 0 means the wind is blowing from north,
// 90 means the wind is blowing from east, 180 means the wind is blowing
// from south and 270 means the wind is blowing from west. The value must be
// in the range [0, 359].
WinddirDegree *int
// Humidity is the *relative* humidity and must be in [0, 100].
Humidity *int
}
type Astro struct {
Moonrise time.Time
Moonset time.Time
Sunrise time.Time
Sunset time.Time
}
type Day struct {
// Date is the date of this Day.
Date time.Time
// Slots is a slice of conditions for different times of day. They should be
// ordered by the contained Time field.
Slots []Cond
// Astronomy contains planetary data.
Astronomy Astro
}
type LatLon struct {
Latitude float32
Longitude float32
}
type Data struct {
Current Cond
Forecast []Day
Location string
GeoLoc *LatLon
}
type UnitSystem int
const (
UnitsMetric UnitSystem = iota
UnitsImperial
UnitsSi
UnitsMetricMs
)
func (u UnitSystem) Temp(tempC float32) (res float32, unit string) {
if u == UnitsMetric || u == UnitsMetricMs {
return tempC, "°C"
} else if u == UnitsImperial {
return tempC*1.8 + 32, "°F"
} else if u == UnitsSi {
return tempC + 273.16, "°K"
}
log.Fatalln("Unknown unit system:", u)
return
}
func (u UnitSystem) Speed(spdKmph float32) (res float32, unit string) {
if u == UnitsMetric {
return spdKmph, "km/h"
} else if u == UnitsImperial {
return spdKmph / 1.609, "mph"
} else if u == UnitsSi || u == UnitsMetricMs {
return spdKmph / 3.6, "m/s"
}
log.Fatalln("Unknown unit system:", u)
return
}
func (u UnitSystem) Distance(distM float32) (res float32, unit string) {
if u == UnitsMetric || u == UnitsSi || u == UnitsMetricMs {
if distM < 1 {
return distM * 1000, "mm"
} else if distM < 1000 {
return distM, "m"
} else {
return distM / 1000, "km"
}
} else if u == UnitsImperial {
res, unit = distM/0.0254, "in"
if res < 3*12 { // 1yd = 3ft, 1ft = 12in
return
} else if res < 8*10*22*36 { //1mi = 8fur, 1fur = 10ch, 1ch = 22yd
return res / 36, "yd"
} else {
return res / 8 / 10 / 22 / 36, "mi"
}
}
log.Fatalln("Unknown unit system:", u)
return
}
type Backend interface {
Setup()
Fetch(location string, numdays int) Data
}
type Frontend interface {
Setup()
Render(weather Data, unitSystem UnitSystem)
}
var (
AllBackends = make(map[string]Backend)
AllFrontends = make(map[string]Frontend)
)
|
package main
import (
"fmt"
"./connection"
"./control"
_ "github.com/lib/pq"
)
func main() {
db := connection.Connection()
fmt.Print("\n\n[1] C\n[2] R\n[3] U\n[4] D\n->")
option := 0
fmt.Scanf("%d", &option)
switch option {
case 1:
control.Create(db)
db.Close()
break
case 2:
control.Read(db)
db.Close()
break
case 3:
control.Update(db)
db.Close()
break
case 4:
control.Delete(db)
db.Close()
break
}
}
|
package main
import (
"fmt"
"time"
)
func main() {
c := make(chan string)
cs := make(chan string)
go func() {
for {
c <- "Every 100ms"
time.Sleep(time.Millisecond * 100)
}
}()
go func() {
for {
cs <- "Every second"
time.Sleep(time.Second * 1)
}
}()
for {
select {
case r := <-c:
fmt.Println(r)
case x := <-cs:
fmt.Println(x)
}
}
}
|
package gorpc
import (
"bufio"
"compress/flate"
"encoding/gob"
"fmt"
"io"
"net"
"sync"
"sync/atomic"
"time"
)
// Rpc client.
//
// The client must be started with Client.Start() before use.
//
// It is absolutely safe and encouraged using a single client across arbitrary
// number of concurrently running goroutines.
//
// Default client settings are optimized for high load, so don't override
// them without valid reason.
type Client struct {
// Server TCP address to connect to.
Addr string
// The number of concurrent connections the client should establish
// to the sever.
// By default only one connection is established.
Conns int
// The maximum number of pending requests in the queue.
// Default is 32768.
PendingRequests int
// Maximum request time.
// Default value is 30s.
RequestTimeout time.Duration
// Disable data compression.
// By default data compression is enabled.
DisableCompression bool
// Size of send buffer per each TCP connection in bytes.
// Default value is 1M.
SendBufferSize int
// Size of recv buffer per each TCP connection in bytes.
// Default value is 1M.
RecvBufferSize int
// Connection statistics.
//
// The stats doesn't reset automatically. Feel free resetting it
// any time you wish.
Stats ConnStats
requestsChan chan *clientMessage
clientStopChan chan struct{}
stopWg sync.WaitGroup
}
// Starts rpc client. Establishes connection to the server on Client.Addr.
//
// All the response types the server may return must be registered
// via gorpc.RegisterType() before starting the client.
// There is no need in registering base Go types such as int, string, bool,
// float64, etc. or arrays, slices and maps containing base Go types.
func (c *Client) Start() {
if c.clientStopChan != nil {
panic("gorpc.Client: the given client is already started. Call Client.Stop() before calling Client.Start() again!")
}
if c.PendingRequests <= 0 {
c.PendingRequests = 32768
}
if c.RequestTimeout <= 0 {
c.RequestTimeout = 30 * time.Second
}
if c.SendBufferSize <= 0 {
c.SendBufferSize = 1024 * 1024
}
if c.RecvBufferSize <= 0 {
c.RecvBufferSize = 1024 * 1024
}
c.requestsChan = make(chan *clientMessage, c.PendingRequests)
c.clientStopChan = make(chan struct{})
if c.Conns <= 0 {
c.Conns = 1
}
for i := 0; i < c.Conns; i++ {
c.stopWg.Add(1)
go clientHandler(c)
}
}
// Stops rpc client. Stopped client can be started again.
func (c *Client) Stop() {
close(c.clientStopChan)
c.stopWg.Wait()
c.clientStopChan = nil
}
// Sends the given request to the server and obtains response from the server.
// Returns non-nil error if the response cannot be obtained during
// Client.RequestTimeout or server connection problems occur.
//
// Request and response types may be arbitrary. All the response types
// the server may return must be registered via gorpc.RegisterType() before
// starting the client.
// There is no need in registering base Go types such as int, string, bool,
// float64, etc. or arrays, slices and maps containing base Go types.
//
// Don't forget starting the client with Client.Start() before calling Client.Call().
func (c *Client) Call(request interface{}) (response interface{}, err error) {
return c.CallTimeout(request, c.RequestTimeout)
}
// Sends the given request to the server and obtains response from the server.
// Returns non-nil error if the response cannot be obtained during
// the given timeout or server connection problems occur.
//
// Request and response types may be arbitrary. All the response types
// the server may return must be registered via gorpc.RegisterType() before
// starting the client.
// There is no need in registering base Go types such as int, string, bool,
// float64, etc. or arrays, slices and maps containing base Go types.
//
// Don't forget starting the client with Client.Start() before calling Client.Call().
func (c *Client) CallTimeout(request interface{}, timeout time.Duration) (response interface{}, err error) {
m := clientMessage{
Request: request,
Done: make(chan struct{}),
}
tc := time.NewTimer(timeout)
select {
case c.requestsChan <- &m:
default:
select {
case c.requestsChan <- &m:
case <-tc.C:
err := fmt.Errorf("gorpc.Client: [%s]. Requests' queue with size=%d is overflown", c.Addr, cap(c.requestsChan))
logError("%s", err)
return nil, err
}
}
select {
case <-m.Done:
tc.Stop()
return m.Response, m.Error
case <-tc.C:
err := fmt.Errorf("gorpc.Client: [%s]. Cannot obtain response during timeout=%s", c.Addr, timeout)
logError("%s", err)
return nil, err
}
}
var dialer = &net.Dialer{
Timeout: 10 * time.Second,
KeepAlive: 30 * time.Second,
}
func clientHandler(c *Client) {
defer c.stopWg.Done()
var conn net.Conn
var err error
for {
dialChan := make(chan struct{})
go func() {
if conn, err = dialer.Dial("tcp", c.Addr); err != nil {
logError("gorpc.Client: [%s]. Cannot establish rpc connection: [%s]", c.Addr, err)
time.Sleep(time.Second)
}
close(dialChan)
}()
select {
case <-c.clientStopChan:
return
case <-dialChan:
atomic.AddUint64(&c.Stats.DialCalls, 1)
}
if err != nil {
atomic.AddUint64(&c.Stats.DialErrors, 1)
continue
}
clientHandleConnection(c, conn)
}
}
func clientHandleConnection(c *Client, conn net.Conn) {
var buf [1]byte
if !c.DisableCompression {
buf[0] = 1
}
_, err := conn.Write(buf[:])
if err != nil {
logError("gorpc.Client: [%s]. Error when writing handshake to server: [%s]", c.Addr, err)
conn.Close()
return
}
stopChan := make(chan struct{})
pendingRequests := make(map[uint64]*clientMessage)
var pendingRequestsLock sync.Mutex
writerDone := make(chan error, 1)
go clientWriter(c, conn, pendingRequests, &pendingRequestsLock, stopChan, writerDone)
readerDone := make(chan error, 1)
go clientReader(c, conn, pendingRequests, &pendingRequestsLock, readerDone)
select {
case err = <-writerDone:
close(stopChan)
conn.Close()
<-readerDone
case err = <-readerDone:
close(stopChan)
conn.Close()
<-writerDone
case <-c.clientStopChan:
close(stopChan)
conn.Close()
<-readerDone
<-writerDone
}
if err != nil {
logError("%s", err)
}
for _, m := range pendingRequests {
m.Error = err
close(m.Done)
}
}
type clientMessage struct {
Request interface{}
Response interface{}
Done chan struct{}
Error error
}
func clientWriter(c *Client, w io.Writer, pendingRequests map[uint64]*clientMessage, pendingRequestsLock *sync.Mutex, stopChan <-chan struct{}, done chan<- error) {
var err error
defer func() { done <- err }()
w = newWriterCounter(w, &c.Stats)
bw := bufio.NewWriterSize(w, c.SendBufferSize)
ww := bw
var zw *flate.Writer
if !c.DisableCompression {
zw, _ = flate.NewWriter(bw, flate.BestSpeed)
defer zw.Close()
ww = bufio.NewWriterSize(zw, c.SendBufferSize)
}
e := gob.NewEncoder(ww)
var msgID uint64
for {
var rpcM *clientMessage
select {
case <-stopChan:
return
default:
}
select {
case rpcM = <-c.requestsChan:
default:
if !c.DisableCompression {
if err := ww.Flush(); err != nil {
err = fmt.Errorf("gorpc.Client: [%s]. Cannot flush data to compressed stream: [%s]", c.Addr, err)
return
}
if err := zw.Flush(); err != nil {
err = fmt.Errorf("gorpc.Client: [%s]. Cannot flush compressed data to wire: [%s]", c.Addr, err)
return
}
}
if err := bw.Flush(); err != nil {
err = fmt.Errorf("gorpc.Client: [%s]. Cannot flush requests to wire: [%s]", c.Addr, err)
return
}
select {
case <-stopChan:
return
case rpcM = <-c.requestsChan:
}
}
msgID++
pendingRequestsLock.Lock()
n := len(pendingRequests)
pendingRequests[msgID] = rpcM
pendingRequestsLock.Unlock()
if n > 10*c.PendingRequests {
err = fmt.Errorf("gorpc.Client: [%s]. The server didn't return %d responses yet. Closing server connection in order to prevent client resource leaks", c.Addr, n)
return
}
m := wireMessage{
ID: msgID,
Data: rpcM.Request,
}
if err := e.Encode(&m); err != nil {
err = fmt.Errorf("gorpc.Client: [%s]. Cannot send request to wire: [%s]", c.Addr, err)
return
}
}
}
func clientReader(c *Client, r io.Reader, pendingRequests map[uint64]*clientMessage, pendingRequestsLock *sync.Mutex, done chan<- error) {
var err error
defer func() { done <- err }()
r = newReaderCounter(r, &c.Stats)
br := bufio.NewReaderSize(r, c.RecvBufferSize)
rr := br
if !c.DisableCompression {
zr := flate.NewReader(br)
defer zr.Close()
rr = bufio.NewReaderSize(zr, c.RecvBufferSize)
}
d := gob.NewDecoder(rr)
for {
var m wireMessage
if err := d.Decode(&m); err != nil {
err = fmt.Errorf("gorpc.Client: [%s]. Cannot decode response: [%s]", c.Addr, err)
return
}
pendingRequestsLock.Lock()
rpcM, ok := pendingRequests[m.ID]
delete(pendingRequests, m.ID)
pendingRequestsLock.Unlock()
if !ok {
err = fmt.Errorf("gorpc.Client: [%s]. Unexpected msgID=[%d] obtained from server", c.Addr, m.ID)
return
}
rpcM.Response = m.Data
close(rpcM.Done)
}
}
|
package equinix
import (
"context"
"fmt"
"log"
"time"
"github.com/equinix/ecx-go/v2"
"github.com/equinix/rest-go"
"github.com/hashicorp/go-cty/cty"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
)
var ecxL2ConnectionSchemaNames = map[string]string{
"UUID": "uuid",
"Name": "name",
"ProfileUUID": "profile_uuid",
"Speed": "speed",
"SpeedUnit": "speed_unit",
"Status": "status",
"ProviderStatus": "provider_status",
"Notifications": "notifications",
"PurchaseOrderNumber": "purchase_order_number",
"PortUUID": "port_uuid",
"DeviceUUID": "device_uuid",
"DeviceInterfaceID": "device_interface_id",
"VlanSTag": "vlan_stag",
"VlanCTag": "vlan_ctag",
"NamedTag": "named_tag",
"AdditionalInfo": "additional_info",
"ZSidePortUUID": "zside_port_uuid",
"ZSideVlanSTag": "zside_vlan_stag",
"ZSideVlanCTag": "zside_vlan_ctag",
"SellerRegion": "seller_region",
"SellerMetroCode": "seller_metro_code",
"AuthorizationKey": "authorization_key",
"RedundantUUID": "redundant_uuid",
"RedundancyType": "redundancy_type",
"SecondaryConnection": "secondary_connection",
}
var ecxL2ConnectionDescriptions = map[string]string{
"UUID": "Unique identifier of the connection",
"Name": "Connection name. An alpha-numeric 24 characters string which can include only hyphens and underscores",
"ProfileUUID": "Unique identifier of the service provider's service profile",
"Speed": "Speed/Bandwidth to be allocated to the connection",
"SpeedUnit": "Unit of the speed/bandwidth to be allocated to the connection",
"Status": "Connection provisioning status on Equinix Fabric side",
"ProviderStatus": "Connection provisioning status on service provider's side",
"Notifications": "A list of email addresses used for sending connection update notifications",
"PurchaseOrderNumber": "Connection's purchase order number to reflect on the invoice",
"PortUUID": "Unique identifier of the buyer's port from which the connection would originate",
"DeviceUUID": "Unique identifier of the Network Edge virtual device from which the connection would originate",
"DeviceInterfaceID": "Identifier of network interface on a given device, used for a connection. If not specified then first available interface will be selected",
"VlanSTag": "S-Tag/Outer-Tag of the connection, a numeric character ranging from 2 - 4094",
"VlanCTag": "C-Tag/Inner-Tag of the connection, a numeric character ranging from 2 - 4094",
"NamedTag": "The type of peering to set up in case when connecting to Azure Express Route. One of Public, Private, Microsoft, Manual",
"AdditionalInfo": "One or more additional information key-value objects",
"ZSidePortUUID": "Unique identifier of the port on the remote side (z-side)",
"ZSideVlanSTag": "S-Tag/Outer-Tag of the connection on the remote side (z-side)",
"ZSideVlanCTag": "C-Tag/Inner-Tag of the connection on the remote side (z-side)",
"SellerRegion": "The region in which the seller port resides",
"SellerMetroCode": "The metro code that denotes the connection’s remote side (z-side)",
"AuthorizationKey": "Text field used to authorize connection on the provider side. Value depends on a provider service profile used for connection",
"RedundantUUID": "Unique identifier of the redundant connection, applicable for HA connections",
"RedundancyType": "Connection redundancy type, applicable for HA connections. Either primary or secondary",
"SecondaryConnection": "Definition of secondary connection for redundant, HA connectivity",
}
var ecxL2ConnectionAdditionalInfoSchemaNames = map[string]string{
"Name": "name",
"Value": "value",
}
var ecxL2ConnectionAdditionalInfoDescriptions = map[string]string{
"Name": "Additional information key",
"Value": "Additional information value",
}
func resourceECXL2Connection() *schema.Resource {
return &schema.Resource{
CreateContext: resourceECXL2ConnectionCreate,
ReadContext: resourceECXL2ConnectionRead,
UpdateContext: resourceECXL2ConnectionUpdate,
DeleteContext: resourceECXL2ConnectionDelete,
Schema: createECXL2ConnectionResourceSchema(),
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(5 * time.Minute),
Delete: schema.DefaultTimeout(5 * time.Minute),
},
Description: "Resource allows creation and management of Equinix Fabric layer 2 connections",
}
}
func createECXL2ConnectionResourceSchema() map[string]*schema.Schema {
return map[string]*schema.Schema{
ecxL2ConnectionSchemaNames["UUID"]: {
Type: schema.TypeString,
Computed: true,
Description: ecxL2ConnectionDescriptions["UUID"],
},
ecxL2ConnectionSchemaNames["Name"]: {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringLenBetween(1, 24),
Description: ecxL2ConnectionDescriptions["Name"],
},
ecxL2ConnectionSchemaNames["ProfileUUID"]: {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
AtLeastOneOf: []string{ecxL2ConnectionSchemaNames["ProfileUUID"], ecxL2ConnectionSchemaNames["ZSidePortUUID"]},
ValidateFunc: validation.StringIsNotEmpty,
Description: ecxL2ConnectionDescriptions["ProfileUUID"],
},
ecxL2ConnectionSchemaNames["Speed"]: {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(1),
Description: ecxL2ConnectionDescriptions["Speed"],
},
ecxL2ConnectionSchemaNames["SpeedUnit"]: {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{"MB", "GB"}, false),
Description: ecxL2ConnectionDescriptions["SpeedUnit"],
},
ecxL2ConnectionSchemaNames["Status"]: {
Type: schema.TypeString,
Computed: true,
Description: ecxL2ConnectionDescriptions["Status"],
},
ecxL2ConnectionSchemaNames["ProviderStatus"]: {
Type: schema.TypeString,
Computed: true,
Description: ecxL2ConnectionDescriptions["ProviderStatus"],
},
ecxL2ConnectionSchemaNames["Notifications"]: {
Type: schema.TypeSet,
Required: true,
ForceNew: true,
MinItems: 1,
Elem: &schema.Schema{
Type: schema.TypeString,
ValidateFunc: stringIsEmailAddress(),
},
Description: ecxL2ConnectionDescriptions["Notifications"],
},
ecxL2ConnectionSchemaNames["PurchaseOrderNumber"]: {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validation.StringLenBetween(1, 30),
Description: ecxL2ConnectionDescriptions["PurchaseOrderNumber"],
},
ecxL2ConnectionSchemaNames["PortUUID"]: {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validation.StringIsNotEmpty,
AtLeastOneOf: []string{ecxL2ConnectionSchemaNames["PortUUID"], ecxL2ConnectionSchemaNames["DeviceUUID"]},
ConflictsWith: []string{ecxL2ConnectionSchemaNames["DeviceUUID"]},
Description: ecxL2ConnectionDescriptions["PortUUID"],
},
ecxL2ConnectionSchemaNames["DeviceUUID"]: {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validation.StringIsNotEmpty,
ConflictsWith: []string{ecxL2ConnectionSchemaNames["PortUUID"]},
Description: ecxL2ConnectionDescriptions["DeviceUUID"],
},
ecxL2ConnectionSchemaNames["DeviceInterfaceID"]: {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
ConflictsWith: []string{ecxL2ConnectionSchemaNames["PortUUID"]},
Description: ecxL2ConnectionDescriptions["DeviceInterfaceID"],
},
ecxL2ConnectionSchemaNames["VlanSTag"]: {
Type: schema.TypeInt,
Optional: true,
Computed: true,
ForceNew: true,
ValidateFunc: validation.IntBetween(2, 4092),
RequiredWith: []string{ecxL2ConnectionSchemaNames["PortUUID"]},
ConflictsWith: []string{ecxL2ConnectionSchemaNames["DeviceUUID"]},
Description: ecxL2ConnectionDescriptions["VlanSTag"],
},
ecxL2ConnectionSchemaNames["VlanCTag"]: {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
ValidateFunc: validation.IntBetween(2, 4092),
ConflictsWith: []string{ecxL2ConnectionSchemaNames["DeviceUUID"]},
Description: ecxL2ConnectionDescriptions["VlanCTag"],
},
ecxL2ConnectionSchemaNames["NamedTag"]: {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validation.StringInSlice([]string{"Private", "Public", "Microsoft", "Manual"}, false),
Description: ecxL2ConnectionDescriptions["NamedTag"],
},
ecxL2ConnectionSchemaNames["AdditionalInfo"]: {
Type: schema.TypeSet,
Optional: true,
ForceNew: true,
MinItems: 1,
Description: ecxL2ConnectionDescriptions["AdditionalInfo"],
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
ecxL2ConnectionAdditionalInfoSchemaNames["Name"]: {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringIsNotEmpty,
Description: ecxL2ConnectionAdditionalInfoDescriptions["Name"],
},
ecxL2ConnectionAdditionalInfoSchemaNames["Value"]: {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringIsNotEmpty,
Description: ecxL2ConnectionAdditionalInfoDescriptions["Value"],
},
},
},
},
ecxL2ConnectionSchemaNames["ZSidePortUUID"]: {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Computed: true,
ValidateFunc: validation.StringIsNotEmpty,
Description: ecxL2ConnectionDescriptions["ZSidePortUUID"],
},
ecxL2ConnectionSchemaNames["ZSideVlanSTag"]: {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
Computed: true,
ValidateFunc: validation.IntBetween(2, 4092),
Description: ecxL2ConnectionDescriptions["ZSideVlanSTag"],
},
ecxL2ConnectionSchemaNames["ZSideVlanCTag"]: {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
Computed: true,
ValidateFunc: validation.IntBetween(2, 4092),
Description: ecxL2ConnectionDescriptions["ZSideVlanCTag"],
},
ecxL2ConnectionSchemaNames["SellerRegion"]: {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validation.StringIsNotEmpty,
Description: ecxL2ConnectionDescriptions["SellerRegion"],
},
ecxL2ConnectionSchemaNames["SellerMetroCode"]: {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ValidateFunc: stringIsMetroCode(),
Description: ecxL2ConnectionDescriptions["SellerMetroCode"],
},
ecxL2ConnectionSchemaNames["AuthorizationKey"]: {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ValidateFunc: validation.StringIsNotEmpty,
Description: ecxL2ConnectionDescriptions["AuthorizationKey"],
},
ecxL2ConnectionSchemaNames["RedundantUUID"]: {
Type: schema.TypeString,
Computed: true,
Description: ecxL2ConnectionDescriptions["RedundantUUID"],
},
ecxL2ConnectionSchemaNames["RedundancyType"]: {
Type: schema.TypeString,
Computed: true,
Description: ecxL2ConnectionDescriptions["RedundancyType"],
},
ecxL2ConnectionSchemaNames["SecondaryConnection"]: {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
MaxItems: 1,
Description: ecxL2ConnectionDescriptions["SecondaryConnection"],
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
ecxL2ConnectionSchemaNames["UUID"]: {
Type: schema.TypeString,
Computed: true,
Description: ecxL2ConnectionDescriptions["UUID"],
},
ecxL2ConnectionSchemaNames["Name"]: {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringLenBetween(1, 24),
Description: ecxL2ConnectionDescriptions["Name"],
},
ecxL2ConnectionSchemaNames["ProfileUUID"]: {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ValidateFunc: validation.StringIsNotEmpty,
Description: ecxL2ConnectionDescriptions["ProfileUUID"],
},
ecxL2ConnectionSchemaNames["Speed"]: {
Type: schema.TypeInt,
Optional: true,
Computed: true,
ForceNew: true,
ValidateFunc: validation.IntAtLeast(1),
Description: ecxL2ConnectionDescriptions["Speed"],
},
ecxL2ConnectionSchemaNames["SpeedUnit"]: {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ValidateFunc: validation.StringInSlice([]string{"MB", "GB"}, false),
RequiredWith: []string{ecxL2ConnectionSchemaNames["SecondaryConnection"] + ".0." + ecxL2ConnectionSchemaNames["Speed"]},
Description: ecxL2ConnectionDescriptions["SpeedUnit"],
},
ecxL2ConnectionSchemaNames["Status"]: {
Type: schema.TypeString,
Computed: true,
Description: ecxL2ConnectionDescriptions["Status"],
},
ecxL2ConnectionSchemaNames["ProviderStatus"]: {
Type: schema.TypeString,
Computed: true,
Description: ecxL2ConnectionDescriptions["ProviderStatus"],
},
ecxL2ConnectionSchemaNames["PortUUID"]: {
Type: schema.TypeString,
ForceNew: true,
Optional: true,
ValidateFunc: validation.StringIsNotEmpty,
AtLeastOneOf: []string{ecxL2ConnectionSchemaNames["SecondaryConnection"] + ".0." + ecxL2ConnectionSchemaNames["PortUUID"],
ecxL2ConnectionSchemaNames["SecondaryConnection"] + ".0." + ecxL2ConnectionSchemaNames["DeviceUUID"]},
ConflictsWith: []string{ecxL2ConnectionSchemaNames["SecondaryConnection"] + ".0." + ecxL2ConnectionSchemaNames["DeviceUUID"]},
Description: ecxL2ConnectionDescriptions["PortUUID"],
},
ecxL2ConnectionSchemaNames["DeviceUUID"]: {
Type: schema.TypeString,
ForceNew: true,
Optional: true,
ValidateFunc: validation.StringIsNotEmpty,
ConflictsWith: []string{ecxL2ConnectionSchemaNames["SecondaryConnection"] + ".0." + ecxL2ConnectionSchemaNames["PortUUID"]},
Description: ecxL2ConnectionDescriptions["DeviceUUID"],
},
ecxL2ConnectionSchemaNames["DeviceInterfaceID"]: {
Type: schema.TypeInt,
Optional: true,
Computed: true,
ForceNew: true,
ConflictsWith: []string{ecxL2ConnectionSchemaNames["SecondaryConnection"] + ".0." + ecxL2ConnectionSchemaNames["PortUUID"]},
Description: ecxL2ConnectionDescriptions["DeviceInterfaceID"],
},
ecxL2ConnectionSchemaNames["VlanSTag"]: {
Type: schema.TypeInt,
ForceNew: true,
Optional: true,
Computed: true,
ValidateFunc: validation.IntBetween(2, 4092),
RequiredWith: []string{ecxL2ConnectionSchemaNames["SecondaryConnection"] + ".0." + ecxL2ConnectionSchemaNames["PortUUID"]},
ConflictsWith: []string{ecxL2ConnectionSchemaNames["SecondaryConnection"] + ".0." + ecxL2ConnectionSchemaNames["DeviceUUID"]},
Description: ecxL2ConnectionDescriptions["VlanSTag"],
},
ecxL2ConnectionSchemaNames["VlanCTag"]: {
Type: schema.TypeInt,
ForceNew: true,
Optional: true,
ValidateFunc: validation.IntBetween(2, 4092),
ConflictsWith: []string{ecxL2ConnectionSchemaNames["SecondaryConnection"] + ".0." + ecxL2ConnectionSchemaNames["DeviceUUID"]},
Description: ecxL2ConnectionDescriptions["VlanCTag"],
},
ecxL2ConnectionSchemaNames["ZSidePortUUID"]: {
Type: schema.TypeString,
Computed: true,
Description: ecxL2ConnectionDescriptions["ZSidePortUUID"],
},
ecxL2ConnectionSchemaNames["ZSideVlanSTag"]: {
Type: schema.TypeInt,
Computed: true,
Description: ecxL2ConnectionDescriptions["ZSideVlanSTag"],
},
ecxL2ConnectionSchemaNames["ZSideVlanCTag"]: {
Type: schema.TypeInt,
Computed: true,
Description: ecxL2ConnectionDescriptions["ZSideVlanCTag"],
},
ecxL2ConnectionSchemaNames["SellerRegion"]: {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ValidateFunc: validation.StringIsNotEmpty,
Description: ecxL2ConnectionDescriptions["SellerRegion"],
},
ecxL2ConnectionSchemaNames["SellerMetroCode"]: {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ValidateFunc: stringIsMetroCode(),
Description: ecxL2ConnectionDescriptions["SellerMetroCode"],
},
ecxL2ConnectionSchemaNames["AuthorizationKey"]: {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ValidateFunc: validation.StringIsNotEmpty,
Description: ecxL2ConnectionDescriptions["AuthorizationKey"],
},
ecxL2ConnectionSchemaNames["RedundantUUID"]: {
Type: schema.TypeString,
Computed: true,
Description: ecxL2ConnectionDescriptions["RedundantUUID"],
},
ecxL2ConnectionSchemaNames["RedundancyType"]: {
Type: schema.TypeString,
Computed: true,
Description: ecxL2ConnectionDescriptions["RedundancyType"],
},
},
},
},
}
}
func resourceECXL2ConnectionCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
conf := m.(*Config)
var diags diag.Diagnostics
primary, secondary := createECXL2Connections(d)
var primaryID *string
var err error
if secondary != nil {
primaryID, _, err = conf.ecx.CreateL2RedundantConnection(*primary, *secondary)
} else {
primaryID, err = conf.ecx.CreateL2Connection(*primary)
}
if err != nil {
return diag.FromErr(err)
}
d.SetId(ecx.StringValue(primaryID))
createStateConf := &resource.StateChangeConf{
Pending: []string{
ecx.ConnectionStatusProvisioning,
ecx.ConnectionStatusPendingAutoApproval,
},
Target: []string{
ecx.ConnectionStatusProvisioned,
ecx.ConnectionStatusPendingApproval,
ecx.ConnectionStatusPendingBGPPeering,
ecx.ConnectionStatusPendingProviderVlan,
},
Timeout: d.Timeout(schema.TimeoutCreate),
Delay: 2 * time.Second,
MinTimeout: 2 * time.Second,
Refresh: func() (interface{}, string, error) {
resp, err := conf.ecx.GetL2Connection(d.Id())
if err != nil {
return nil, "", err
}
return resp, ecx.StringValue(resp.Status), nil
},
}
if _, err := createStateConf.WaitForStateContext(ctx); err != nil {
return diag.Errorf("error waiting for connection (%s) to be created: %s", d.Id(), err)
}
diags = append(diags, resourceECXL2ConnectionRead(ctx, d, m)...)
return diags
}
func resourceECXL2ConnectionRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
conf := m.(*Config)
var diags diag.Diagnostics
var err error
var primary *ecx.L2Connection
var secondary *ecx.L2Connection
primary, err = conf.ecx.GetL2Connection(d.Id())
if err != nil {
return diag.Errorf("cannot fetch primary connection due to %v", err)
}
if isStringInSlice(ecx.StringValue(primary.Status), []string{
ecx.ConnectionStatusPendingDelete,
ecx.ConnectionStatusDeprovisioning,
ecx.ConnectionStatusDeprovisioned,
ecx.ConnectionStatusDeleted,
}) {
d.SetId("")
return nil
}
if ecx.StringValue(primary.RedundantUUID) != "" {
secondary, err = conf.ecx.GetL2Connection(ecx.StringValue(primary.RedundantUUID))
if err != nil {
return diag.Errorf("cannot fetch secondary connection due to %v", err)
}
}
if err := updateECXL2ConnectionResource(primary, secondary, d); err != nil {
return diag.FromErr(err)
}
return diags
}
func resourceECXL2ConnectionUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
conf := m.(*Config)
var diags diag.Diagnostics
supportedChanges := []string{ecxL2ConnectionSchemaNames["Name"],
ecxL2ConnectionSchemaNames["Speed"],
ecxL2ConnectionSchemaNames["SpeedUnit"]}
primaryChanges := getResourceDataChangedKeys(supportedChanges, d)
primaryUpdateReq := conf.ecx.NewL2ConnectionUpdateRequest(d.Id())
if err := fillFabricL2ConnectionUpdateRequest(primaryUpdateReq, primaryChanges).Execute(); err != nil {
return diag.FromErr(err)
}
if v, ok := d.GetOk(ecxL2ConnectionSchemaNames["RedundantUUID"]); ok {
secondaryChanges := getResourceDataListElementChanges(supportedChanges, ecxL2ConnectionSchemaNames["SecondaryConnection"], 0, d)
secondaryUpdateReq := conf.ecx.NewL2ConnectionUpdateRequest(v.(string))
if err := fillFabricL2ConnectionUpdateRequest(secondaryUpdateReq, secondaryChanges).Execute(); err != nil {
return diag.FromErr(err)
}
}
diags = append(diags, resourceECXL2ConnectionRead(ctx, d, m)...)
return diags
}
func resourceECXL2ConnectionDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
conf := m.(*Config)
var diags diag.Diagnostics
if err := conf.ecx.DeleteL2Connection(d.Id()); err != nil {
restErr, ok := err.(rest.Error)
if ok {
//IC-LAYER2-4021 = Connection already deleted
if hasApplicationErrorCode(restErr.ApplicationErrors, "IC-LAYER2-4021") {
return diags
}
}
return diag.FromErr(err)
}
//remove secondary connection, don't fail on error as there is no partial state on delete
if redID, ok := d.GetOk(ecxL2ConnectionSchemaNames["RedundantUUID"]); ok {
if err := conf.ecx.DeleteL2Connection(redID.(string)); err != nil {
diags = append(diags, diag.Diagnostic{
Severity: diag.Warning,
Summary: fmt.Sprintf("Failed to remove secondary connection with UUID %q", redID.(string)),
Detail: err.Error(),
AttributePath: cty.GetAttrPath(ecxL2ConnectionSchemaNames["RedundantUUID"]),
})
}
}
deleteStateConf := &resource.StateChangeConf{
Pending: []string{
ecx.ConnectionStatusDeprovisioning,
},
Target: []string{
ecx.ConnectionStatusPendingDelete,
ecx.ConnectionStatusDeprovisioned,
},
Timeout: d.Timeout(schema.TimeoutDelete),
Delay: 2 * time.Second,
MinTimeout: 2 * time.Second,
Refresh: func() (interface{}, string, error) {
resp, err := conf.ecx.GetL2Connection(d.Id())
if err != nil {
return nil, "", err
}
return resp, ecx.StringValue(resp.Status), nil
},
}
if _, err := deleteStateConf.WaitForStateContext(ctx); err != nil {
return diag.Errorf("error waiting for connection (%s) to be removed: %s", d.Id(), err)
}
return diags
}
func createECXL2Connections(d *schema.ResourceData) (*ecx.L2Connection, *ecx.L2Connection) {
var primary, secondary *ecx.L2Connection
primary = &ecx.L2Connection{}
if v, ok := d.GetOk(ecxL2ConnectionSchemaNames["Name"]); ok {
primary.Name = ecx.String(v.(string))
}
if v, ok := d.GetOk(ecxL2ConnectionSchemaNames["ProfileUUID"]); ok {
primary.ProfileUUID = ecx.String(v.(string))
}
if v, ok := d.GetOk(ecxL2ConnectionSchemaNames["Speed"]); ok {
primary.Speed = ecx.Int(v.(int))
}
if v, ok := d.GetOk(ecxL2ConnectionSchemaNames["SpeedUnit"]); ok {
primary.SpeedUnit = ecx.String(v.(string))
}
if v, ok := d.GetOk(ecxL2ConnectionSchemaNames["Notifications"]); ok {
primary.Notifications = expandSetToStringList(v.(*schema.Set))
}
if v, ok := d.GetOk(ecxL2ConnectionSchemaNames["PurchaseOrderNumber"]); ok {
primary.PurchaseOrderNumber = ecx.String(v.(string))
}
if v, ok := d.GetOk(ecxL2ConnectionSchemaNames["PortUUID"]); ok {
primary.PortUUID = ecx.String(v.(string))
}
if v, ok := d.GetOk(ecxL2ConnectionSchemaNames["DeviceUUID"]); ok {
primary.DeviceUUID = ecx.String(v.(string))
}
if v, ok := d.GetOk(ecxL2ConnectionSchemaNames["DeviceInterfaceID"]); ok {
primary.DeviceInterfaceID = ecx.Int(v.(int))
}
if v, ok := d.GetOk(ecxL2ConnectionSchemaNames["VlanSTag"]); ok {
primary.VlanSTag = ecx.Int(v.(int))
}
if v, ok := d.GetOk(ecxL2ConnectionSchemaNames["VlanCTag"]); ok {
primary.VlanCTag = ecx.Int(v.(int))
}
if v, ok := d.GetOk(ecxL2ConnectionSchemaNames["NamedTag"]); ok {
primary.NamedTag = ecx.String(v.(string))
}
if v, ok := d.GetOk(ecxL2ConnectionSchemaNames["AdditionalInfo"]); ok {
primary.AdditionalInfo = expandECXL2ConnectionAdditionalInfo(v.(*schema.Set))
}
if v, ok := d.GetOk(ecxL2ConnectionSchemaNames["ZSidePortUUID"]); ok {
primary.ZSidePortUUID = ecx.String(v.(string))
}
if v, ok := d.GetOk(ecxL2ConnectionSchemaNames["ZSideVlanSTag"]); ok {
primary.ZSideVlanSTag = ecx.Int(v.(int))
}
if v, ok := d.GetOk(ecxL2ConnectionSchemaNames["ZSideVlanCTag"]); ok {
primary.ZSideVlanCTag = ecx.Int(v.(int))
}
if v, ok := d.GetOk(ecxL2ConnectionSchemaNames["SellerRegion"]); ok {
primary.SellerRegion = ecx.String(v.(string))
}
if v, ok := d.GetOk(ecxL2ConnectionSchemaNames["SellerMetroCode"]); ok {
primary.SellerMetroCode = ecx.String(v.(string))
}
if v, ok := d.GetOk(ecxL2ConnectionSchemaNames["AuthorizationKey"]); ok {
primary.AuthorizationKey = ecx.String(v.(string))
}
if v, ok := d.GetOk(ecxL2ConnectionSchemaNames["SecondaryConnection"]); ok {
secondary = expandECXL2ConnectionSecondary(v.([]interface{}))
}
return primary, secondary
}
func updateECXL2ConnectionResource(primary *ecx.L2Connection, secondary *ecx.L2Connection, d *schema.ResourceData) error {
if err := d.Set(ecxL2ConnectionSchemaNames["UUID"], primary.UUID); err != nil {
return fmt.Errorf("error reading UUID: %s", err)
}
if err := d.Set(ecxL2ConnectionSchemaNames["Name"], primary.Name); err != nil {
return fmt.Errorf("error reading Name: %s", err)
}
if err := d.Set(ecxL2ConnectionSchemaNames["ProfileUUID"], primary.ProfileUUID); err != nil {
return fmt.Errorf("error reading ProfileUUID: %s", err)
}
if err := d.Set(ecxL2ConnectionSchemaNames["Speed"], primary.Speed); err != nil {
return fmt.Errorf("error reading Speed: %s", err)
}
if err := d.Set(ecxL2ConnectionSchemaNames["SpeedUnit"], primary.SpeedUnit); err != nil {
return fmt.Errorf("error reading SpeedUnit: %s", err)
}
if err := d.Set(ecxL2ConnectionSchemaNames["Status"], primary.Status); err != nil {
return fmt.Errorf("error reading Status: %s", err)
}
if err := d.Set(ecxL2ConnectionSchemaNames["ProviderStatus"], primary.ProviderStatus); err != nil {
return fmt.Errorf("error reading ProviderStatus: %s", err)
}
if err := d.Set(ecxL2ConnectionSchemaNames["Notifications"], primary.Notifications); err != nil {
return fmt.Errorf("error reading Notifications: %s", err)
}
if err := d.Set(ecxL2ConnectionSchemaNames["PurchaseOrderNumber"], primary.PurchaseOrderNumber); err != nil {
return fmt.Errorf("error reading PurchaseOrderNumber: %s", err)
}
if err := d.Set(ecxL2ConnectionSchemaNames["PortUUID"], primary.PortUUID); err != nil {
return fmt.Errorf("error reading PortUUID: %s", err)
}
if err := d.Set(ecxL2ConnectionSchemaNames["DeviceUUID"], primary.DeviceUUID); err != nil {
return fmt.Errorf("error reading DeviceUUID: %s", err)
}
if err := d.Set(ecxL2ConnectionSchemaNames["VlanSTag"], primary.VlanSTag); err != nil {
return fmt.Errorf("error reading VlanSTag: %s", err)
}
if err := d.Set(ecxL2ConnectionSchemaNames["VlanCTag"], primary.VlanCTag); err != nil {
return fmt.Errorf("error reading VlanCTag: %s", err)
}
if err := d.Set(ecxL2ConnectionSchemaNames["NamedTag"], primary.NamedTag); err != nil {
return fmt.Errorf("error reading NamedTag: %s", err)
}
if err := d.Set(ecxL2ConnectionSchemaNames["AdditionalInfo"], flattenECXL2ConnectionAdditionalInfo(primary.AdditionalInfo)); err != nil {
return fmt.Errorf("error reading AdditionalInfo: %s", err)
}
if err := d.Set(ecxL2ConnectionSchemaNames["ZSidePortUUID"], primary.ZSidePortUUID); err != nil {
return fmt.Errorf("error reading ZSidePortUUID: %s", err)
}
if err := d.Set(ecxL2ConnectionSchemaNames["ZSideVlanSTag"], primary.ZSideVlanSTag); err != nil {
return fmt.Errorf("error reading ZSideVlanSTag: %s", err)
}
if err := d.Set(ecxL2ConnectionSchemaNames["ZSideVlanCTag"], primary.ZSideVlanCTag); err != nil {
return fmt.Errorf("error reading ZSideVlanCTag: %s", err)
}
if err := d.Set(ecxL2ConnectionSchemaNames["SellerRegion"], primary.SellerRegion); err != nil {
return fmt.Errorf("error reading SellerRegion: %s", err)
}
if err := d.Set(ecxL2ConnectionSchemaNames["SellerMetroCode"], primary.SellerMetroCode); err != nil {
return fmt.Errorf("error reading SellerMetroCode: %s", err)
}
if err := d.Set(ecxL2ConnectionSchemaNames["AuthorizationKey"], primary.AuthorizationKey); err != nil {
return fmt.Errorf("error reading AuthorizationKey: %s", err)
}
if err := d.Set(ecxL2ConnectionSchemaNames["RedundantUUID"], primary.RedundantUUID); err != nil {
return fmt.Errorf("error reading RedundantUUID: %s", err)
}
if err := d.Set(ecxL2ConnectionSchemaNames["RedundancyType"], primary.RedundancyType); err != nil {
return fmt.Errorf("error reading RedundancyType: %s", err)
}
if secondary != nil {
var prevSecondary *ecx.L2Connection
if v, ok := d.GetOk(ecxL2ConnectionSchemaNames["SecondaryConnection"]); ok {
prevSecondary = expandECXL2ConnectionSecondary(v.([]interface{}))
}
if err := d.Set(ecxL2ConnectionSchemaNames["SecondaryConnection"], flattenECXL2ConnectionSecondary(prevSecondary, secondary)); err != nil {
return fmt.Errorf("error reading SecondaryConnection: %s", err)
}
}
return nil
}
func flattenECXL2ConnectionSecondary(previous, conn *ecx.L2Connection) interface{} {
transformed := make(map[string]interface{})
transformed[ecxL2ConnectionSchemaNames["UUID"]] = conn.UUID
transformed[ecxL2ConnectionSchemaNames["Name"]] = conn.Name
transformed[ecxL2ConnectionSchemaNames["ProfileUUID"]] = conn.ProfileUUID
transformed[ecxL2ConnectionSchemaNames["Speed"]] = conn.Speed
transformed[ecxL2ConnectionSchemaNames["SpeedUnit"]] = conn.SpeedUnit
transformed[ecxL2ConnectionSchemaNames["Status"]] = conn.Status
transformed[ecxL2ConnectionSchemaNames["ProviderStatus"]] = conn.ProviderStatus
transformed[ecxL2ConnectionSchemaNames["PortUUID"]] = conn.PortUUID
transformed[ecxL2ConnectionSchemaNames["DeviceUUID"]] = conn.DeviceUUID
transformed[ecxL2ConnectionSchemaNames["DeviceInterfaceID"]] = conn.DeviceInterfaceID
if previous != nil && ecx.IntValue(previous.DeviceInterfaceID) != 0 {
transformed[ecxL2ConnectionSchemaNames["DeviceInterfaceID"]] = previous.DeviceInterfaceID
}
transformed[ecxL2ConnectionSchemaNames["VlanSTag"]] = conn.VlanSTag
transformed[ecxL2ConnectionSchemaNames["VlanCTag"]] = conn.VlanCTag
transformed[ecxL2ConnectionSchemaNames["ZSidePortUUID"]] = conn.ZSidePortUUID
transformed[ecxL2ConnectionSchemaNames["ZSideVlanSTag"]] = conn.ZSideVlanSTag
transformed[ecxL2ConnectionSchemaNames["ZSideVlanCTag"]] = conn.ZSideVlanCTag
transformed[ecxL2ConnectionSchemaNames["SellerRegion"]] = conn.SellerRegion
transformed[ecxL2ConnectionSchemaNames["SellerMetroCode"]] = conn.SellerMetroCode
transformed[ecxL2ConnectionSchemaNames["AuthorizationKey"]] = conn.AuthorizationKey
transformed[ecxL2ConnectionSchemaNames["RedundantUUID"]] = conn.RedundantUUID
transformed[ecxL2ConnectionSchemaNames["RedundancyType"]] = conn.RedundancyType
return []interface{}{transformed}
}
func expandECXL2ConnectionSecondary(conns []interface{}) *ecx.L2Connection {
if len(conns) < 1 {
log.Printf("[WARN] resource_ecx_l2_connection expanding empty secondary connection collection")
return nil
}
conn := conns[0].(map[string]interface{})
transformed := ecx.L2Connection{}
if v, ok := conn[ecxL2ConnectionSchemaNames["Name"]]; ok {
transformed.Name = ecx.String(v.(string))
}
if v, ok := conn[ecxL2ConnectionSchemaNames["ProfileUUID"]]; ok && !isEmpty(v) {
transformed.ProfileUUID = ecx.String(v.(string))
}
if v, ok := conn[ecxL2ConnectionSchemaNames["Speed"]]; ok && !isEmpty(v) {
transformed.Speed = ecx.Int(v.(int))
}
if v, ok := conn[ecxL2ConnectionSchemaNames["SpeedUnit"]]; ok && !isEmpty(v) {
transformed.SpeedUnit = ecx.String(v.(string))
}
if v, ok := conn[ecxL2ConnectionSchemaNames["PortUUID"]]; ok && !isEmpty(v) {
transformed.PortUUID = ecx.String(v.(string))
}
if v, ok := conn[ecxL2ConnectionSchemaNames["DeviceUUID"]]; ok && !isEmpty(v) {
transformed.DeviceUUID = ecx.String(v.(string))
}
if v, ok := conn[ecxL2ConnectionSchemaNames["DeviceInterfaceID"]]; ok && !isEmpty(v) {
transformed.DeviceInterfaceID = ecx.Int(v.(int))
}
if v, ok := conn[ecxL2ConnectionSchemaNames["VlanSTag"]]; ok && !isEmpty(v) {
transformed.VlanSTag = ecx.Int(v.(int))
}
if v, ok := conn[ecxL2ConnectionSchemaNames["VlanCTag"]]; ok && !isEmpty(v) {
transformed.VlanCTag = ecx.Int(v.(int))
}
if v, ok := conn[ecxL2ConnectionSchemaNames["SellerRegion"]]; ok && !isEmpty(v) {
transformed.SellerRegion = ecx.String(v.(string))
}
if v, ok := conn[ecxL2ConnectionSchemaNames["SellerMetroCode"]]; ok && !isEmpty(v) {
transformed.SellerMetroCode = ecx.String(v.(string))
}
if v, ok := conn[ecxL2ConnectionSchemaNames["AuthorizationKey"]]; ok && !isEmpty(v) {
transformed.AuthorizationKey = ecx.String(v.(string))
}
return &transformed
}
func flattenECXL2ConnectionAdditionalInfo(infos []ecx.L2ConnectionAdditionalInfo) interface{} {
transformed := make([]interface{}, 0, len(infos))
for _, info := range infos {
transformed = append(transformed, map[string]interface{}{
ecxL2ConnectionAdditionalInfoSchemaNames["Name"]: info.Name,
ecxL2ConnectionAdditionalInfoSchemaNames["Value"]: info.Value,
})
}
return transformed
}
func expandECXL2ConnectionAdditionalInfo(infos *schema.Set) []ecx.L2ConnectionAdditionalInfo {
transformed := make([]ecx.L2ConnectionAdditionalInfo, 0, infos.Len())
for _, info := range infos.List() {
infoMap := info.(map[string]interface{})
transformed = append(transformed, ecx.L2ConnectionAdditionalInfo{
Name: ecx.String(infoMap[ecxL2ConnectionAdditionalInfoSchemaNames["Name"]].(string)),
Value: ecx.String(infoMap[ecxL2ConnectionAdditionalInfoSchemaNames["Value"]].(string)),
})
}
return transformed
}
func fillFabricL2ConnectionUpdateRequest(updateReq ecx.L2ConnectionUpdateRequest, changes map[string]interface{}) ecx.L2ConnectionUpdateRequest {
for change, changeValue := range changes {
switch change {
case ecxL2ConnectionSchemaNames["Name"]:
updateReq.WithName(changeValue.(string))
case ecxL2ConnectionSchemaNames["Speed"]:
updateReq.WithSpeed(changeValue.(int))
case ecxL2ConnectionSchemaNames["SpeedUnit"]:
updateReq.WithSpeedUnit(changeValue.(string))
}
}
return updateReq
}
|
package watcher
import (
"fmt"
"fs_sync/files"
"fs_sync/models"
"fs_sync/modules/cmd"
"log"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/fsnotify/fsnotify"
)
var userHost = models.UserHost{
User: "vagrant",
Host: "192.168.10.101",
}
type Watcher struct {
FileSyncManager *files.FileSyncManager
watcher *fsnotify.Watcher
}
func New() *Watcher {
watcher, err := fsnotify.NewWatcher()
if err != nil {
log.Fatal(err)
}
return &Watcher{
FileSyncManager: files.NewFileSyncManager(),
watcher: watcher,
}
}
/**
Adds all the files in the directorty to the list of
files to sync
*/
func (this *Watcher) AddDirs(fileDirs ...string) error {
errorStrings := make([]string, 0)
for _, file := range fileDirs {
err := this.FileSyncManager.AddFileToSynclist(file)
if err != nil {
errorStrings = append(errorStrings, err.Error())
continue
}
//TODO: investigate, this call. Watcher watches the directory
//but it might not watch the content of the files which is what we
//want to sync
err = this.watcher.Add(file)
if err != nil {
errorStrings = append(errorStrings, err.Error())
}
}
if len(errorStrings) == 0 {
return nil
}
return fmt.Errorf("%s", strings.Join(errorStrings, "\n"))
}
//TODO
func (this *Watcher) RemoveDir(dir string) {
}
func (this *Watcher) Start() {
syncFiles := this.FileSyncManager.GetFilesToSync()
cmdName := "rsync"
for _, file := range syncFiles {
remoteFile := file
remoteHome, err := cmd.GetEnv(userHost, "HOME")
if err != nil {
log.Fatal(err)
}
//TODO: If not??
if filepath.IsAbs(file) {
remoteFile = strings.Replace(file, os.Getenv("HOME"), remoteHome, 1)
}
exist, err := cmd.PathExist(userHost, filepath.Dir(remoteFile))
if err != nil {
log.Fatalf("Error checking if path exists. %s", err)
}
if !exist {
err := cmd.CreatePath(userHost, filepath.Dir(remoteFile))
if err != nil {
log.Fatalf("Error creating path in remote host. %s", err)
}
}
this.watcher.Add(file)
stat, err := os.Stat(file)
if err != nil {
panic(err)
}
if stat.IsDir() {
continue
}
cmdArgs := strings.Split(
fmt.Sprintf("-az --stats %s vagrant@192.168.10.101:%s", file, remoteFile), " ")
log.Printf("syncing file %s", file)
cmd := exec.Command(cmdName, cmdArgs...)
output, err := cmd.CombinedOutput()
if err != nil {
log.Fatalf("error running rsync. %s\n%s", string(output), err)
}
}
log.Printf("Starting watcherFunc...")
go this.watcherFunc()
}
func (this *Watcher) watcherFunc() {
for {
select {
case event, ok := <-this.watcher.Events:
if !ok {
log.Println("Not on in event")
return
}
log.Println("event:", event)
remoteHome, err := cmd.GetEnv(userHost, "HOME")
if err != nil {
log.Fatal(err)
}
cmdName := "rsync"
if event.Op&fsnotify.Write == fsnotify.Write {
f := strings.Replace(event.Name, os.Getenv("HOME"), remoteHome, 1)
cmdArgs := strings.Split(
fmt.Sprintf("-az --stats %s vagrant@192.168.10.101:%s", event.Name, f), " ")
cmd := exec.Command(cmdName, cmdArgs...)
output, err := cmd.CombinedOutput()
if err != nil {
log.Fatalf("error running rsync. %s\n%s", string(output), err)
}
}
case err, ok := <-this.watcher.Errors:
if !ok {
log.Println("Not on in errors")
return
}
log.Println("error:", err)
}
}
}
func (this *Watcher) Close() error {
return this.watcher.Close()
}
|
// Copyright 2013 Google Inc. All rights reserved.
// Copyright 2016 the gousb Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gousb
import (
"fmt"
"log"
"testing"
)
func TestListDevices(t *testing.T) {
_, done := newFakeLibusb()
defer done()
c := NewContext()
defer c.Close()
c.Debug(0)
descs := []*DeviceDesc{}
devs, err := c.ListDevices(func(desc *DeviceDesc) bool {
descs = append(descs, desc)
return true
})
defer func() {
for _, d := range devs {
d.Close()
}
}()
if err != nil {
t.Fatalf("ListDevices(): %s", err)
}
if got, want := len(devs), len(fakeDevices); got != want {
t.Fatalf("len(devs) = %d, want %d (based on num fake devs)", got, want)
}
if got, want := len(devs), len(descs); got != want {
t.Fatalf("len(devs) = %d, want %d (based on num opened devices)", got, want)
}
for i := range devs {
if got, want := devs[i].Desc, descs[i]; got != want {
t.Errorf("dev[%d].Desc = %p, want %p", i, got, want)
}
}
}
func TestOpenDeviceWithVIDPID(t *testing.T) {
_, done := newFakeLibusb()
defer done()
for _, d := range []struct {
vid, pid ID
exists bool
}{
{0x7777, 0x0003, false},
{0x8888, 0x0001, false},
{0x8888, 0x0002, true},
{0x9999, 0x0001, true},
{0x9999, 0x0002, false},
} {
c := NewContext()
defer c.Close()
dev, err := c.OpenDeviceWithVIDPID(d.vid, d.pid)
if (dev != nil) != d.exists {
t.Errorf("OpenDeviceWithVIDPID(%s/%s): device != nil is %v, want %v", ID(d.vid), ID(d.pid), dev != nil, d.exists)
}
if err != nil {
t.Errorf("OpenDeviceWithVIDPID(%s/%s): got error %v, want nil", ID(d.vid), ID(d.pid), err)
}
if dev != nil {
if dev.Desc.Vendor != ID(d.vid) || dev.Desc.Product != ID(d.pid) {
t.Errorf("OpenDeviceWithVIDPID(%s/%s): the device returned has VID/PID %s/%s, different from specified in the arguments", ID(d.vid), ID(d.pid), dev.Desc.Vendor, dev.Desc.Product)
}
dev.Close()
}
}
}
// This examples demonstrates the use of a few convenience functions that
// can be used in simple situations and with simple devices.
// It opens a device with a given VID/PID,
// claims the default interface (use the same config as currently active,
// interface 0, alternate setting 0) and tries to write 5 bytes of data
// to endpoint number 7.
func Example_simple() {
// Initialize a new Context.
ctx := NewContext()
defer ctx.Close()
// Open any device with a given VID/PID using a convenience function.
dev, err := ctx.OpenDeviceWithVIDPID(0x046d, 0xc526)
if err != nil {
log.Fatalf("Could not open a device: %v", err)
}
defer dev.Close()
// Claim the default interface using a convenience function.
// The default interface is always #0 alt #0 in the currently active
// config.
intf, done, err := dev.DefaultInterface()
if err != nil {
log.Fatalf("%s.DefaultInterface(): %v", dev, err)
}
defer done()
// Open an OUT endpoint.
ep, err := intf.OutEndpoint(7)
if err != nil {
log.Fatalf("%s.OutEndpoint(7): %v", intf, err)
}
// Generate some data to write.
data := make([]byte, 5)
for i := range data {
data[i] = byte(i)
}
// Write data to the USB device.
numBytes, err := ep.Write(data)
if numBytes != 5 {
log.Fatalf("%s.Write([5]): only %d bytes written, returned error is %v", numBytes, err)
}
fmt.Println("5 bytes successfuly sent to the endpoint")
}
// This example demostrates the full API for accessing endpoints.
// It opens a device with a known VID/PID, switches the device to
// configuration #2, in that configuration it opens (claims) interface #3 with alternate setting #0.
// Within that interface setting it opens an IN endpoint number 6 and an OUT endpoint number 5, then starts copying
// data between them,
func Example_complex() {
// Initialize a new Context.
ctx := NewContext()
defer ctx.Close()
// Iterate through available Devices, finding all that match a known VID/PID.
vid, pid := ID(0x04f2), ID(0xb531)
devs, err := ctx.ListDevices(func(desc *DeviceDesc) bool {
// this function is called for every device present.
// Returning true means the device should be opened.
return desc.Vendor == vid && desc.Product == pid
})
// All returned devices are now open and will need to be closed.
for _, d := range devs {
defer d.Close()
}
if err != nil {
log.Fatalf("ListDevices(): %v", err)
}
if len(devs) == 0 {
log.Fatalf("no devices found matching VID %s and PID %s", vid, pid)
}
// Pick the first device found.
dev := devs[0]
// Switch the configuration to #2.
cfg, err := dev.Config(2)
if err != nil {
log.Fatalf("%s.Config(2): %v", dev, err)
}
defer cfg.Close()
// In the config #2, claim interface #3 with alt setting #0.
intf, err := cfg.Interface(3, 0)
if err != nil {
log.Fatalf("%s.Interface(3, 0): %v", cfg, err)
}
defer intf.Close()
// In this interface open endpoint #6 for reading.
epIn, err := intf.InEndpoint(6)
if err != nil {
log.Fatalf("%s.InEndpoint(6): %v", intf, err)
}
// And in the same interface open endpoint #5 for writing.
epOut, err := intf.OutEndpoint(5)
if err != nil {
log.Fatalf("%s.OutEndpoint(5): %v", intf, err)
}
// Buffer large enough for 10 USB packets from endpoint 6.
buf := make([]byte, 10*epIn.Desc.MaxPacketSize)
total := 0
// Repeat the read/write cycle 10 times.
for i := 0; i < 10; i++ {
// readBytes might be smaller than the buffer size. readBytes might be greater than zero even if err is not nil.
readBytes, err := epIn.Read(buf)
if err != nil {
fmt.Println("Read returned an error:", err)
}
if readBytes == 0 {
log.Fatalf("IN endpoint 6 returned 0 bytes of data.")
}
// writeBytes might be smaller than the buffer size if an error occured. writeBytes might be greater than zero even if err is not nil.
writeBytes, err := epOut.Write(buf[:readBytes])
if err != nil {
fmt.Println("Write returned an error:", err)
}
if writeBytes != readBytes {
log.Fatalf("IN endpoint 5 received only %d bytes of data out of %d sent", writeBytes, readBytes)
}
total += writeBytes
}
fmt.Printf("Total number of bytes copied: %d\n", total)
}
|
// Copyright (c) 2014 James Wendel. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package auth
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"sync"
"time"
)
type datastore struct {
mutex sync.RWMutex
authFilename string
authFileinfo os.FileInfo
tokenFilename string
tokenTimeout int
domainMap DomainAuths
tokenMap DomainTokens
}
// DomainAuths is map[domainName]Domain. Maps a domain name to data
// about that domain
type DomainAuths map[string]Domain
// Domain data for proxyauth and oauth lookup
type Domain struct {
// map[username]HashedPassword
Users map[string]string
// map[client_id]client_secret
Clients map[string]string
}
// domainJSON structure to read domains from JSON input file
type domainJSON struct {
Name string `json:"domain"`
Users []userJSON `json:"users"`
Clients []clientJSON `json:"clients"`
}
// user structure to read users from JSON input file
type userJSON struct {
Username string `json:"username"`
Password string `json:"password"`
}
// client TODO
type clientJSON struct {
ID string `json:"client_id"`
Secret string `json:"client_secret"`
}
// Init loads the passed in json file, unmarshels the data,
// and starts a fileWatcher to look for changes to the file
func (ds *datastore) Init(authFile, tokenFile string) error {
ds.authFilename = authFile
ds.tokenFilename = tokenFile
b, err := ds.loadFile(ds.authFilename)
if err != nil {
return err
}
err = ds.unmarshal(b)
if err != nil {
return err
}
ds.authFileinfo, err = os.Stat(ds.authFilename)
if err != nil {
return err
}
if len(tokenFile) > 0 {
ds.loadTokens()
}
go ds.fileWatcher()
go ds.startSigHandler()
return nil
}
// DomainExists checks if the given domain exists in the data store.
func (ds *datastore) DomainExists(domain string) bool {
ds.mutex.RLock()
defer ds.mutex.RUnlock()
_, ok := ds.domainMap[domain]
return ok
}
// UserPasswordValid returns true when the password is valid for a given domain/user
// else it just returns false. Password is expected to be in encrypted form.
func (ds *datastore) UserPasswordValid(domain, username, password string) bool {
ds.mutex.RLock()
defer ds.mutex.RUnlock()
d, ok := ds.domainMap[domain]
if !ok {
return false
}
pass, ok := d.Users[username]
if !ok {
return false
}
if pass == password {
return true
}
return false
}
// loadFile loads the full file from disk
func (ds *datastore) loadFile(f string) ([]byte, error) {
// Load the data source from disk
b, err := ioutil.ReadFile(f)
if err != nil {
return nil, err
}
return b, nil
}
// unmarshal converts bytes to a JSON structure then populates the
// datastore.dataMap with the results.
func (ds *datastore) unmarshal(bytes []byte) error {
// Updating the user database, write lock needed
ds.mutex.Lock()
defer ds.mutex.Unlock()
var domains []domainJSON
err := json.Unmarshal(bytes, &domains)
if err != nil {
return err
}
domainMap := make(map[string]Domain)
tokenMap := make(map[string]Tokens)
// Loop over all domains and users, inserting them into the domainMap
// The user password will be encrypted with this step
for _, d := range domains {
_, ok := domainMap[d.Name]
if !ok {
var domain Domain
domain.Users = make(map[string]string)
for _, u := range d.Users {
_, ok := domain.Users[u.Username]
if !ok {
domain.Users[u.Username] = EncryptPassword(u.Password)
} else {
return fmt.Errorf("duplicate username '%v' for domain '%v'", u.Username, d.Name)
}
}
domain.Clients = make(map[string]string)
for _, u := range d.Clients {
_, ok := domain.Clients[u.ID]
if !ok {
domain.Clients[u.ID] = u.Secret
} else {
return fmt.Errorf("duplicate client_id '%v' for domain '%v'", u.ID, d.Name)
}
}
domainMap[d.Name] = domain
tokenMap[d.Name] = make(map[string]time.Time)
} else {
return fmt.Errorf("duplicate domain '%v' in input file", d.Name)
}
}
ds.domainMap = domainMap
ds.tokenMap = tokenMap
return nil
}
// fileWatcher checks once every 3 seconds if the source json file has changed
// based on it's timestamp. If it chagnes it will reload the user data.
func (ds *datastore) fileWatcher() {
for {
time.Sleep(3 * time.Second)
fi, err := os.Stat(ds.authFilename)
if err != nil {
fmt.Printf("Failed watching file '%v' for updates\n", ds.authFilename)
return
}
if !fi.ModTime().Equal(ds.authFileinfo.ModTime()) {
// file modified time changed, reload data
b, err := ds.loadFile(ds.authFilename)
if err != nil {
fmt.Printf("Error loading file '%v': %v", ds.authFilename, err)
return
}
err = ds.unmarshal(b)
if err != nil {
fmt.Printf("Error unmarshling '%v': %v", ds.authFilename, err)
return
}
ds.authFileinfo = fi
}
}
}
|
package main
import (
"sync"
"sync/atomic"
"fmt"
)
func main() {
var sum uint32 = 100
var wg sync.WaitGroup
for i := 0; i < 50; i++ {
wg.Add(1)
go func() {
defer wg.Done()
//sum += 1 //1
atomic.AddUint32(&sum, 1) //2
fmt.Println(sum)
}()
}
wg.Wait()
fmt.Println(sum)
}
|
package ex122
import (
"os"
"reflect"
"testing"
)
// This test ensures that the program terminates without crashing.
func Test(t *testing.T) {
// Even metarecursion! (YMMV)
Display("rV", reflect.ValueOf(os.Stderr))
// Output:
// Display rV (reflect.Value):
// (*rV.typ).size = 8
// (*rV.typ).ptrdata = 8
// (*rV.typ).hash = 871609668
// (*rV.typ)._ = 0
// ...
type Cycle struct {
Value int
Tail *Cycle
}
var c Cycle
c = Cycle{42, &c}
if false {
Display("c", c)
// Output:
// Display c (display.Cycle):
// c.Value = 42
// (*c.Tail).Value = 42
// (*(*c.Tail).Tail).Value = 42
// ...ad infinitum...
}
}
|
package gopd
import (
"bytes"
"encoding/json"
"fmt"
)
const DOCUMENT_API_ENDPOINT = "https://api.pandadoc.com/public/v1/documents"
type FromTemplateDocument struct {
Name string `json:"name"`
TemplateUuid string `json:"template_uuid"`
Recipients []Recipient `json:"recipients"`
Tokens []Token `json:"tokens,omitempty"`
Fields []Field `json:"fields,omitempty"`
Metadata map[string]string `json:"metadata,omitempty"`
PricingTables []PricingTable `json:"pricing_tables,omitempty"`
}
type FromPdfDocument struct {
//TODO: implement creating from PDF
}
func (d FromTemplateDocument) Create() (*Response, error) {
jsonStr, err := json.Marshal(d)
if err != nil {
return nil, err
}
body, err := SendRequest(
"POST",
DOCUMENT_API_ENDPOINT,
bytes.NewBuffer(jsonStr),
"application/json",
"201 Created",
)
if err != nil {
return nil, err
}
var response Response
err = json.Unmarshal(body, &response)
if err != nil {
return nil, err
}
return &response, nil
}
func (d FromPdfDocument) Create() {
}
func GetDocumentStatus(docId string) (*DocumentStatus, error) {
body, err := SendRequest(
"GET",
fmt.Sprintf("%s/%s", DOCUMENT_API_ENDPOINT, docId),
nil,
"application/json",
"200 OK",
)
if err != nil {
return nil, err
}
var status DocumentStatus
err = json.Unmarshal(body, &status)
if err != nil {
return nil, err
}
return &status, nil
}
func GetDocumentDetails(docId string) (*Document, error) {
body, err := SendRequest(
"GET",
fmt.Sprintf("%s/%s/details", DOCUMENT_API_ENDPOINT, docId),
nil,
"application/json",
"200 OK",
)
if err != nil {
return nil, err
}
var document Document
err = json.Unmarshal(body, &document)
if err != nil {
return nil, err
}
return &document, nil
}
func SendDocument(docId string, msg string, silent bool) (*Response, error) {
type SendMsg struct {
Message string `json:"message"`
Silent bool `json:"silent"`
}
jsonStr, err := json.Marshal(SendMsg{msg, silent})
if err != nil {
return nil, err
}
body, err := SendRequest(
"POST",
fmt.Sprintf("%s/%s/send", DOCUMENT_API_ENDPOINT, docId),
bytes.NewBuffer(jsonStr),
"application/json",
"200 OK",
)
if err != nil {
return nil, err
}
var resp Response
err = json.Unmarshal(body, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}
type ShareResponse struct {
Id string `json:"id"`
ExpiresAt string `json:"expires_at"`
}
func ShareDocument(docId string, recipient string, lifetime int) (*ShareResponse, error) {
type ShareMsg struct {
Recipient string `json:"recipient"`
Lifetime int `json:"lifetime"`
}
jsonStr, err := json.Marshal(ShareMsg{recipient, lifetime})
if err != nil {
return nil, err
}
body, err := SendRequest(
"POST",
fmt.Sprintf("%s/%s/session", DOCUMENT_API_ENDPOINT, docId),
bytes.NewBuffer(jsonStr),
"application/json",
"201 Created",
)
if err != nil {
return nil, err
}
var resp ShareResponse
err = json.Unmarshal(body, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}
func DownloadDocument(docId string) ([]byte, error) {
body, err := SendRequest(
"GET",
fmt.Sprintf("%s/%s/download", DOCUMENT_API_ENDPOINT, docId),
nil,
"application/json",
"200 OK",
)
if err != nil {
return nil, err
}
return body, nil
}
|
package consumer
import (
"context"
rabbitmqmodels "github.com/superbet-group/code-cadets-2021/lecture_3/03_project/calculator/internal/infrastructure/rabbitmq/models"
)
// Consumer offers methods for consuming from input queues.
type Consumer struct {
betFromControllerConsumer BetFromController
eventUpdateConsumer BetEventUpdateConsumer
}
// New creates and returns a new Consumer.
func New(betFromController BetFromController, eventUpdateConsumer BetEventUpdateConsumer) *Consumer {
return &Consumer{
betFromControllerConsumer: betFromController,
eventUpdateConsumer: eventUpdateConsumer,
}
}
// ConsumeBets consumes bets queue.
func (c *Consumer) ConsumeBetsFromController(ctx context.Context) (<-chan rabbitmqmodels.BetFromController, error) {
return c.betFromControllerConsumer.Consume(ctx)
}
// ConsumeEventUpdates consumes event updates queue.
func (c *Consumer) ConsumeEventUpdates(ctx context.Context) (<-chan rabbitmqmodels.BetEventUpdate, error) {
return c.eventUpdateConsumer.Consume(ctx)
}
|
package iproto
// Абстрагируемся от способа серилизации и десерилизации сущности
type Marshaller interface {
Marshal() ([]byte, error)
UnMarshal([]byte) error
}
func Marshal(val Marshaller) ([]byte, error) {
data, err := val.Marshal()
if err != nil {
return nil, err
}
return data, nil
}
func UnMarshal(src []byte, val Marshaller) error {
return val.UnMarshal(src)
}
|
package machines
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/pointer"
"sigs.k8s.io/yaml"
configv1 "github.com/openshift/api/config/v1"
machinev1 "github.com/openshift/api/machine/v1"
machinev1alpha1 "github.com/openshift/api/machine/v1alpha1"
machinev1beta1 "github.com/openshift/api/machine/v1beta1"
baremetalapi "github.com/openshift/cluster-api-provider-baremetal/pkg/apis"
baremetalprovider "github.com/openshift/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1"
libvirtapi "github.com/openshift/cluster-api-provider-libvirt/pkg/apis"
libvirtprovider "github.com/openshift/cluster-api-provider-libvirt/pkg/apis/libvirtproviderconfig/v1beta1"
ovirtproviderapi "github.com/openshift/cluster-api-provider-ovirt/pkg/apis"
ovirtprovider "github.com/openshift/cluster-api-provider-ovirt/pkg/apis/ovirtprovider/v1beta1"
"github.com/openshift/installer/pkg/asset"
"github.com/openshift/installer/pkg/asset/ignition/machine"
"github.com/openshift/installer/pkg/asset/installconfig"
icaws "github.com/openshift/installer/pkg/asset/installconfig/aws"
icazure "github.com/openshift/installer/pkg/asset/installconfig/azure"
icgcp "github.com/openshift/installer/pkg/asset/installconfig/gcp"
"github.com/openshift/installer/pkg/asset/machines/alibabacloud"
"github.com/openshift/installer/pkg/asset/machines/aws"
"github.com/openshift/installer/pkg/asset/machines/azure"
"github.com/openshift/installer/pkg/asset/machines/baremetal"
"github.com/openshift/installer/pkg/asset/machines/gcp"
"github.com/openshift/installer/pkg/asset/machines/ibmcloud"
"github.com/openshift/installer/pkg/asset/machines/libvirt"
"github.com/openshift/installer/pkg/asset/machines/machineconfig"
"github.com/openshift/installer/pkg/asset/machines/nutanix"
"github.com/openshift/installer/pkg/asset/machines/openstack"
"github.com/openshift/installer/pkg/asset/machines/ovirt"
"github.com/openshift/installer/pkg/asset/machines/powervs"
"github.com/openshift/installer/pkg/asset/machines/vsphere"
"github.com/openshift/installer/pkg/asset/rhcos"
rhcosutils "github.com/openshift/installer/pkg/rhcos"
"github.com/openshift/installer/pkg/types"
alibabacloudtypes "github.com/openshift/installer/pkg/types/alibabacloud"
awstypes "github.com/openshift/installer/pkg/types/aws"
awsdefaults "github.com/openshift/installer/pkg/types/aws/defaults"
azuretypes "github.com/openshift/installer/pkg/types/azure"
azuredefaults "github.com/openshift/installer/pkg/types/azure/defaults"
baremetaltypes "github.com/openshift/installer/pkg/types/baremetal"
externaltypes "github.com/openshift/installer/pkg/types/external"
gcptypes "github.com/openshift/installer/pkg/types/gcp"
ibmcloudtypes "github.com/openshift/installer/pkg/types/ibmcloud"
libvirttypes "github.com/openshift/installer/pkg/types/libvirt"
nonetypes "github.com/openshift/installer/pkg/types/none"
nutanixtypes "github.com/openshift/installer/pkg/types/nutanix"
openstacktypes "github.com/openshift/installer/pkg/types/openstack"
ovirttypes "github.com/openshift/installer/pkg/types/ovirt"
powervstypes "github.com/openshift/installer/pkg/types/powervs"
vspheretypes "github.com/openshift/installer/pkg/types/vsphere"
ibmcloudapi "github.com/openshift/machine-api-provider-ibmcloud/pkg/apis"
ibmcloudprovider "github.com/openshift/machine-api-provider-ibmcloud/pkg/apis/ibmcloudprovider/v1"
mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
)
const (
// workerMachineSetFileName is the format string for constructing the worker MachineSet filenames.
workerMachineSetFileName = "99_openshift-cluster-api_worker-machineset-%s.yaml"
// workerMachineFileName is the format string for constructing the worker Machine filenames.
workerMachineFileName = "99_openshift-cluster-api_worker-machines-%s.yaml"
// workerUserDataFileName is the filename used for the worker user-data secret.
workerUserDataFileName = "99_openshift-cluster-api_worker-user-data-secret.yaml"
// decimalRootVolumeSize is the size in GB we use for some platforms.
// See below.
decimalRootVolumeSize = 120
// powerOfTwoRootVolumeSize is the size in GB we use for other platforms.
// The reasons for the specific choices between these two may boil down
// to which section of code the person adding a platform was copy-pasting from.
// https://github.com/openshift/openshift-docs/blob/main/modules/installation-requirements-user-infra.adoc#minimum-resource-requirements
powerOfTwoRootVolumeSize = 128
)
var (
workerMachineSetFileNamePattern = fmt.Sprintf(workerMachineSetFileName, "*")
workerMachineFileNamePattern = fmt.Sprintf(workerMachineFileName, "*")
_ asset.WritableAsset = (*Worker)(nil)
)
func defaultAWSMachinePoolPlatform(poolName string) awstypes.MachinePool {
defaultEBSType := awstypes.VolumeTypeGp3
// gp3 is not offered in all local-zones locations used by Edge Pools.
// Once it is available, it can be used as default for all machine pools.
// https://aws.amazon.com/about-aws/global-infrastructure/localzones/features
if poolName == types.MachinePoolEdgeRoleName {
defaultEBSType = awstypes.VolumeTypeGp2
}
return awstypes.MachinePool{
EC2RootVolume: awstypes.EC2RootVolume{
Type: defaultEBSType,
Size: decimalRootVolumeSize,
},
}
}
func defaultLibvirtMachinePoolPlatform() libvirttypes.MachinePool {
return libvirttypes.MachinePool{}
}
func defaultAzureMachinePoolPlatform() azuretypes.MachinePool {
return azuretypes.MachinePool{
OSDisk: azuretypes.OSDisk{
DiskSizeGB: powerOfTwoRootVolumeSize,
DiskType: azuretypes.DefaultDiskType,
},
}
}
func defaultGCPMachinePoolPlatform(arch types.Architecture) gcptypes.MachinePool {
return gcptypes.MachinePool{
InstanceType: icgcp.DefaultInstanceTypeForArch(arch),
OSDisk: gcptypes.OSDisk{
DiskSizeGB: powerOfTwoRootVolumeSize,
DiskType: "pd-ssd",
},
}
}
func defaultIBMCloudMachinePoolPlatform() ibmcloudtypes.MachinePool {
return ibmcloudtypes.MachinePool{
InstanceType: "bx2-4x16",
}
}
func defaultOpenStackMachinePoolPlatform() openstacktypes.MachinePool {
return openstacktypes.MachinePool{
Zones: []string{""},
}
}
func defaultBareMetalMachinePoolPlatform() baremetaltypes.MachinePool {
return baremetaltypes.MachinePool{}
}
func defaultOvirtMachinePoolPlatform() ovirttypes.MachinePool {
return ovirttypes.MachinePool{
CPU: &ovirttypes.CPU{
Cores: 4,
Sockets: 1,
Threads: 1,
},
MemoryMB: 16348,
OSDisk: &ovirttypes.Disk{
SizeGB: decimalRootVolumeSize,
},
VMType: ovirttypes.VMTypeServer,
AutoPinningPolicy: ovirttypes.AutoPinningNone,
}
}
func defaultVSphereMachinePoolPlatform() vspheretypes.MachinePool {
return vspheretypes.MachinePool{
NumCPUs: 4,
NumCoresPerSocket: 4,
MemoryMiB: 16384,
OSDisk: vspheretypes.OSDisk{
DiskSizeGB: decimalRootVolumeSize,
},
}
}
func defaultPowerVSMachinePoolPlatform() powervstypes.MachinePool {
return powervstypes.MachinePool{
MemoryGiB: 32,
Processors: intstr.FromString("0.5"),
ProcType: machinev1.PowerVSProcessorTypeShared,
SysType: "s922",
}
}
func defaultNutanixMachinePoolPlatform() nutanixtypes.MachinePool {
return nutanixtypes.MachinePool{
NumCPUs: 4,
NumCoresPerSocket: 1,
MemoryMiB: 16384,
OSDisk: nutanixtypes.OSDisk{
DiskSizeGiB: decimalRootVolumeSize,
},
}
}
// awsSetPreferredInstanceByEdgeZone discovers supported instanceType for each edge pool
// using the existing preferred instance list used by worker compute pool.
// Each machine set in the edge pool, created for each zone, can use different instance
// types depending on the instance offerings in the location (Local Zones).
func awsSetPreferredInstanceByEdgeZone(ctx context.Context, defaultTypes []string, meta *icaws.Metadata, zones icaws.Zones) (ok bool, err error) {
for zone := range zones {
preferredType, err := aws.PreferredInstanceType(ctx, meta, defaultTypes, []string{zone})
if err != nil {
logrus.Warn(errors.Wrap(err, fmt.Sprintf("unable to select instanceType on the zone[%v] from the preferred list: %v. You must update the MachineSet manifest", zone, defaultTypes)))
continue
}
if _, ok := zones[zone]; !ok {
zones[zone] = &icaws.Zone{Name: zone}
}
zones[zone].PreferredInstanceType = preferredType
}
return true, nil
}
// Worker generates the machinesets for `worker` machine pool.
type Worker struct {
UserDataFile *asset.File
MachineConfigFiles []*asset.File
MachineSetFiles []*asset.File
MachineFiles []*asset.File
}
// Name returns a human friendly name for the Worker Asset.
func (w *Worker) Name() string {
return "Worker Machines"
}
// Dependencies returns all of the dependencies directly needed by the
// Worker asset
func (w *Worker) Dependencies() []asset.Asset {
return []asset.Asset{
&installconfig.ClusterID{},
// PlatformCredsCheck just checks the creds (and asks, if needed)
// We do not actually use it in this asset directly, hence
// it is put in the dependencies but not fetched in Generate
&installconfig.PlatformCredsCheck{},
&installconfig.InstallConfig{},
new(rhcos.Image),
new(rhcos.Release),
&machine.Worker{},
}
}
// Generate generates the Worker asset.
func (w *Worker) Generate(dependencies asset.Parents) error {
ctx := context.TODO()
clusterID := &installconfig.ClusterID{}
installConfig := &installconfig.InstallConfig{}
rhcosImage := new(rhcos.Image)
rhcosRelease := new(rhcos.Release)
wign := &machine.Worker{}
dependencies.Get(clusterID, installConfig, rhcosImage, rhcosRelease, wign)
workerUserDataSecretName := "worker-user-data"
machines := []machinev1beta1.Machine{}
machineConfigs := []*mcfgv1.MachineConfig{}
machineSets := []runtime.Object{}
var err error
ic := installConfig.Config
for _, pool := range ic.Compute {
pool := pool // this makes golint happy... G601: Implicit memory aliasing in for loop. (gosec)
if pool.Hyperthreading == types.HyperthreadingDisabled {
ignHT, err := machineconfig.ForHyperthreadingDisabled("worker")
if err != nil {
return errors.Wrap(err, "failed to create ignition for hyperthreading disabled for worker machines")
}
machineConfigs = append(machineConfigs, ignHT)
}
if ic.SSHKey != "" {
ignSSH, err := machineconfig.ForAuthorizedKeys(ic.SSHKey, "worker")
if err != nil {
return errors.Wrap(err, "failed to create ignition for authorized SSH keys for worker machines")
}
machineConfigs = append(machineConfigs, ignSSH)
}
if ic.FIPS {
ignFIPS, err := machineconfig.ForFIPSEnabled("worker")
if err != nil {
return errors.Wrap(err, "failed to create ignition for FIPS enabled for worker machines")
}
machineConfigs = append(machineConfigs, ignFIPS)
}
if ic.Platform.Name() == powervstypes.Name {
// always enable multipath for powervs.
ignMultipath, err := machineconfig.ForMultipathEnabled("worker")
if err != nil {
return errors.Wrap(err, "failed to create ignition for multipath enabled for worker machines")
}
machineConfigs = append(machineConfigs, ignMultipath)
}
// The maximum number of networks supported on ServiceNetwork is two, one IPv4 and one IPv6 network.
// The cluster-network-operator handles the validation of this field.
// Reference: https://github.com/openshift/cluster-network-operator/blob/fc3e0e25b4cfa43e14122bdcdd6d7f2585017d75/pkg/network/cluster_config.go#L45-L52
if ic.Platform.Name() == openstacktypes.Name && len(installConfig.Config.ServiceNetwork) == 2 {
// Only configure kernel args for dual-stack clusters.
ignIPv6, err := machineconfig.ForDualStackAddresses("worker")
if err != nil {
return errors.Wrap(err, "failed to create ignition to configure IPv6 for worker machines")
}
machineConfigs = append(machineConfigs, ignIPv6)
}
switch ic.Platform.Name() {
case alibabacloudtypes.Name:
client, err := installConfig.AlibabaCloud.Client()
if err != nil {
return err
}
vswitchMaps, err := installConfig.AlibabaCloud.VSwitchMaps()
if err != nil {
return errors.Wrap(err, "failed to get VSwitchs map")
}
mpool := alibabacloudtypes.DefaultWorkerMachinePoolPlatform()
mpool.ImageID = string(*rhcosImage)
mpool.Set(ic.Platform.AlibabaCloud.DefaultMachinePlatform)
mpool.Set(pool.Platform.AlibabaCloud)
if len(mpool.Zones) == 0 {
if len(vswitchMaps) > 0 {
for zone := range vswitchMaps {
mpool.Zones = append(mpool.Zones, zone)
}
} else {
azs, err := client.GetAvailableZonesByInstanceType(mpool.InstanceType)
if err != nil || len(azs) == 0 {
return errors.Wrap(err, "failed to fetch availability zones")
}
mpool.Zones = azs
}
}
pool.Platform.AlibabaCloud = &mpool
sets, err := alibabacloud.MachineSets(
clusterID.InfraID,
ic,
&pool,
"worker",
workerUserDataSecretName,
installConfig.Config.Platform.AlibabaCloud.Tags,
vswitchMaps,
)
if err != nil {
return errors.Wrap(err, "failed to create worker machine objects")
}
for _, set := range sets {
machineSets = append(machineSets, set)
}
case awstypes.Name:
subnets := icaws.Subnets{}
zones := icaws.Zones{}
if len(ic.Platform.AWS.Subnets) > 0 {
var subnetsMeta icaws.Subnets
switch pool.Name {
case types.MachinePoolEdgeRoleName:
subnetsMeta, err = installConfig.AWS.EdgeSubnets(ctx)
if err != nil {
return err
}
default:
subnetsMeta, err = installConfig.AWS.PrivateSubnets(ctx)
if err != nil {
return err
}
}
for _, subnet := range subnetsMeta {
subnets[subnet.Zone.Name] = subnet
}
}
mpool := defaultAWSMachinePoolPlatform(pool.Name)
osImage := strings.SplitN(string(*rhcosImage), ",", 2)
osImageID := osImage[0]
if len(osImage) == 2 {
osImageID = "" // the AMI will be generated later on
}
mpool.AMIID = osImageID
mpool.Set(ic.Platform.AWS.DefaultMachinePlatform)
mpool.Set(pool.Platform.AWS)
zoneDefaults := false
if len(mpool.Zones) == 0 {
if len(subnets) > 0 {
for _, subnet := range subnets {
if subnet.Zone == nil {
return errors.Wrapf(err, "failed to find zone attributes for subnet %s", subnet.ID)
}
mpool.Zones = append(mpool.Zones, subnet.Zone.Name)
zones[subnet.Zone.Name] = subnets[subnet.Zone.Name].Zone
}
} else {
mpool.Zones, err = installConfig.AWS.AvailabilityZones(ctx)
if err != nil {
return err
}
zoneDefaults = true
}
}
// Requirements when using edge compute pools to populate machine sets.
if pool.Name == types.MachinePoolEdgeRoleName {
err = installConfig.AWS.SetZoneAttributes(ctx, mpool.Zones, zones)
if err != nil {
return errors.Wrap(err, "failed to retrieve zone attributes for edge compute pool")
}
if pool.Replicas == nil || *pool.Replicas == 0 {
pool.Replicas = pointer.Int64(int64(len(mpool.Zones)))
}
}
if mpool.InstanceType == "" {
instanceTypes := awsdefaults.InstanceTypes(installConfig.Config.Platform.AWS.Region, installConfig.Config.ControlPlane.Architecture, configv1.HighlyAvailableTopologyMode)
switch pool.Name {
case types.MachinePoolEdgeRoleName:
ok, err := awsSetPreferredInstanceByEdgeZone(ctx, instanceTypes, installConfig.AWS, zones)
if err != nil {
return errors.Wrap(err, "failed to find default instance type for edge pool, you must define on the compute pool")
}
if !ok {
logrus.Warn(errors.Wrap(err, "failed to find preferred instance type for edge pool, using default"))
mpool.InstanceType = instanceTypes[0]
}
default:
mpool.InstanceType, err = aws.PreferredInstanceType(ctx, installConfig.AWS, instanceTypes, mpool.Zones)
if err != nil {
logrus.Warn(errors.Wrapf(err, "failed to find default instance type for %s pool", pool.Name))
mpool.InstanceType = instanceTypes[0]
}
}
}
// if the list of zones is the default we need to try to filter the list in case there are some zones where the instance might not be available
if zoneDefaults {
mpool.Zones, err = aws.FilterZonesBasedOnInstanceType(ctx, installConfig.AWS, mpool.InstanceType, mpool.Zones)
if err != nil {
logrus.Warn(errors.Wrap(err, "failed to filter zone list"))
}
}
pool.Platform.AWS = &mpool
sets, err := aws.MachineSets(&aws.MachineSetInput{
ClusterID: clusterID.InfraID,
InstallConfigPlatformAWS: installConfig.Config.Platform.AWS,
Subnets: subnets,
Zones: zones,
Pool: &pool,
Role: pool.Name,
UserDataSecret: workerUserDataSecretName,
})
if err != nil {
return errors.Wrap(err, "failed to create worker machine objects")
}
for _, set := range sets {
machineSets = append(machineSets, set)
}
case azuretypes.Name:
mpool := defaultAzureMachinePoolPlatform()
mpool.InstanceType = azuredefaults.ComputeInstanceType(
installConfig.Config.Platform.Azure.CloudName,
installConfig.Config.Platform.Azure.Region,
pool.Architecture,
)
mpool.Set(ic.Platform.Azure.DefaultMachinePlatform)
mpool.Set(pool.Platform.Azure)
session, err := installConfig.Azure.Session()
if err != nil {
return errors.Wrap(err, "failed to fetch session")
}
client := icazure.NewClient(session)
if len(mpool.Zones) == 0 {
azs, err := client.GetAvailabilityZones(context.TODO(), ic.Platform.Azure.Region, mpool.InstanceType)
if err != nil {
return errors.Wrap(err, "failed to fetch availability zones")
}
mpool.Zones = azs
if len(azs) == 0 {
// if no azs are given we set to []string{""} for convenience over later operations.
// It means no-zoned for the machine API
mpool.Zones = []string{""}
}
}
pool.Platform.Azure = &mpool
capabilities, err := client.GetVMCapabilities(context.TODO(), mpool.InstanceType, installConfig.Config.Platform.Azure.Region)
if err != nil {
return err
}
useImageGallery := ic.Platform.Azure.CloudName != azuretypes.StackCloud
sets, err := azure.MachineSets(clusterID.InfraID, ic, &pool, string(*rhcosImage), "worker", workerUserDataSecretName, capabilities, useImageGallery)
if err != nil {
return errors.Wrap(err, "failed to create worker machine objects")
}
for _, set := range sets {
machineSets = append(machineSets, set)
}
case baremetaltypes.Name:
mpool := defaultBareMetalMachinePoolPlatform()
mpool.Set(ic.Platform.BareMetal.DefaultMachinePlatform)
mpool.Set(pool.Platform.BareMetal)
pool.Platform.BareMetal = &mpool
// Use managed user data secret, since images used by MachineSet
// are always up to date
workerUserDataSecretName = "worker-user-data-managed"
sets, err := baremetal.MachineSets(clusterID.InfraID, ic, &pool, "", "worker", workerUserDataSecretName)
if err != nil {
return errors.Wrap(err, "failed to create worker machine objects")
}
for _, set := range sets {
machineSets = append(machineSets, set)
}
case gcptypes.Name:
mpool := defaultGCPMachinePoolPlatform(pool.Architecture)
mpool.Set(ic.Platform.GCP.DefaultMachinePlatform)
mpool.Set(pool.Platform.GCP)
if len(mpool.Zones) == 0 {
azs, err := gcp.ZonesForInstanceType(ic.Platform.GCP.ProjectID, ic.Platform.GCP.Region, mpool.InstanceType)
if err != nil {
return errors.Wrap(err, "failed to fetch availability zones")
}
mpool.Zones = azs
}
pool.Platform.GCP = &mpool
sets, err := gcp.MachineSets(clusterID.InfraID, ic, &pool, string(*rhcosImage), "worker", workerUserDataSecretName)
if err != nil {
return errors.Wrap(err, "failed to create worker machine objects")
}
for _, set := range sets {
machineSets = append(machineSets, set)
}
case ibmcloudtypes.Name:
subnets := map[string]string{}
if len(ic.Platform.IBMCloud.ComputeSubnets) > 0 {
subnetMetas, err := installConfig.IBMCloud.ComputeSubnets(ctx)
if err != nil {
return err
}
for _, subnet := range subnetMetas {
subnets[subnet.Zone] = subnet.Name
}
}
mpool := defaultIBMCloudMachinePoolPlatform()
mpool.Set(ic.Platform.IBMCloud.DefaultMachinePlatform)
mpool.Set(pool.Platform.IBMCloud)
if len(mpool.Zones) == 0 {
azs, err := ibmcloud.AvailabilityZones(ic.Platform.IBMCloud.Region)
if err != nil {
return errors.Wrap(err, "failed to fetch availability zones")
}
mpool.Zones = azs
}
pool.Platform.IBMCloud = &mpool
sets, err := ibmcloud.MachineSets(clusterID.InfraID, ic, subnets, &pool, "worker", workerUserDataSecretName)
if err != nil {
return errors.Wrap(err, "failed to create worker machine objects")
}
for _, set := range sets {
machineSets = append(machineSets, set)
}
case libvirttypes.Name:
mpool := defaultLibvirtMachinePoolPlatform()
mpool.Set(ic.Platform.Libvirt.DefaultMachinePlatform)
mpool.Set(pool.Platform.Libvirt)
pool.Platform.Libvirt = &mpool
sets, err := libvirt.MachineSets(clusterID.InfraID, ic, &pool, "worker", workerUserDataSecretName)
if err != nil {
return errors.Wrap(err, "failed to create worker machine objects")
}
for _, set := range sets {
machineSets = append(machineSets, set)
}
case openstacktypes.Name:
mpool := defaultOpenStackMachinePoolPlatform()
mpool.Set(ic.Platform.OpenStack.DefaultMachinePlatform)
mpool.Set(pool.Platform.OpenStack)
pool.Platform.OpenStack = &mpool
imageName, _ := rhcosutils.GenerateOpenStackImageName(string(*rhcosImage), clusterID.InfraID)
sets, err := openstack.MachineSets(clusterID.InfraID, ic, &pool, imageName, "worker", workerUserDataSecretName, nil)
if err != nil {
return errors.Wrap(err, "failed to create worker machine objects")
}
for _, set := range sets {
machineSets = append(machineSets, set)
}
case vspheretypes.Name:
mpool := defaultVSphereMachinePoolPlatform()
mpool.Set(ic.Platform.VSphere.DefaultMachinePlatform)
mpool.Set(pool.Platform.VSphere)
pool.Platform.VSphere = &mpool
templateName := clusterID.InfraID + "-rhcos"
sets, err := vsphere.MachineSets(clusterID.InfraID, ic, &pool, templateName, "worker", workerUserDataSecretName)
if err != nil {
return errors.Wrap(err, "failed to create worker machine objects")
}
for _, set := range sets {
machineSets = append(machineSets, set)
}
// If static IPs are configured, we must generate worker machines and scale the machinesets to 0.
if ic.Platform.VSphere.Hosts != nil {
logrus.Debug("Generating worker machines with static IPs.")
templateName := clusterID.InfraID + "-rhcos"
machines, err = vsphere.Machines(clusterID.InfraID, ic, &pool, templateName, "worker", workerUserDataSecretName)
if err != nil {
return errors.Wrap(err, "failed to create worker machine objects")
}
logrus.Debugf("Generated %v worker machines.", len(machines))
for _, ms := range sets {
ms.Spec.Replicas = pointer.Int32(0)
}
}
case ovirttypes.Name:
mpool := defaultOvirtMachinePoolPlatform()
mpool.Set(ic.Platform.Ovirt.DefaultMachinePlatform)
mpool.Set(pool.Platform.Ovirt)
pool.Platform.Ovirt = &mpool
imageName, _ := rhcosutils.GenerateOpenStackImageName(string(*rhcosImage), clusterID.InfraID)
sets, err := ovirt.MachineSets(clusterID.InfraID, ic, &pool, imageName, "worker", workerUserDataSecretName)
if err != nil {
return errors.Wrap(err, "failed to create worker machine objects for ovirt provider")
}
for _, set := range sets {
machineSets = append(machineSets, set)
}
case powervstypes.Name:
mpool := defaultPowerVSMachinePoolPlatform()
mpool.Set(ic.Platform.PowerVS.DefaultMachinePlatform)
mpool.Set(pool.Platform.PowerVS)
pool.Platform.PowerVS = &mpool
sets, err := powervs.MachineSets(clusterID.InfraID, ic, &pool, "worker", "worker-user-data")
if err != nil {
return errors.Wrap(err, "failed to create worker machine objects for powervs provider")
}
for _, set := range sets {
machineSets = append(machineSets, set)
}
case externaltypes.Name, nonetypes.Name:
case nutanixtypes.Name:
mpool := defaultNutanixMachinePoolPlatform()
mpool.Set(ic.Platform.Nutanix.DefaultMachinePlatform)
mpool.Set(pool.Platform.Nutanix)
if err = mpool.ValidateConfig(ic.Platform.Nutanix); err != nil {
return errors.Wrap(err, "failed to create master machine objects")
}
pool.Platform.Nutanix = &mpool
imageName := nutanixtypes.RHCOSImageName(clusterID.InfraID)
sets, err := nutanix.MachineSets(clusterID.InfraID, ic, &pool, imageName, "worker", workerUserDataSecretName)
if err != nil {
return errors.Wrap(err, "failed to create worker machine objects")
}
for _, set := range sets {
machineSets = append(machineSets, set)
}
default:
return fmt.Errorf("invalid Platform")
}
}
data, err := userDataSecret(workerUserDataSecretName, wign.File.Data)
if err != nil {
return errors.Wrap(err, "failed to create user-data secret for worker machines")
}
w.UserDataFile = &asset.File{
Filename: filepath.Join(directory, workerUserDataFileName),
Data: data,
}
w.MachineConfigFiles, err = machineconfig.Manifests(machineConfigs, "worker", directory)
if err != nil {
return errors.Wrap(err, "failed to create MachineConfig manifests for worker machines")
}
w.MachineSetFiles = make([]*asset.File, len(machineSets))
padFormat := fmt.Sprintf("%%0%dd", len(fmt.Sprintf("%d", len(machineSets))))
for i, machineSet := range machineSets {
data, err := yaml.Marshal(machineSet)
if err != nil {
return errors.Wrapf(err, "marshal worker %d", i)
}
padded := fmt.Sprintf(padFormat, i)
w.MachineSetFiles[i] = &asset.File{
Filename: filepath.Join(directory, fmt.Sprintf(workerMachineSetFileName, padded)),
Data: data,
}
}
w.MachineFiles = make([]*asset.File, len(machines))
for i, machineDef := range machines {
data, err := yaml.Marshal(machineDef)
if err != nil {
return errors.Wrapf(err, "marshal master %d", i)
}
padded := fmt.Sprintf(padFormat, i)
w.MachineFiles[i] = &asset.File{
Filename: filepath.Join(directory, fmt.Sprintf(workerMachineFileName, padded)),
Data: data,
}
}
return nil
}
// Files returns the files generated by the asset.
func (w *Worker) Files() []*asset.File {
files := make([]*asset.File, 0, 1+len(w.MachineConfigFiles)+len(w.MachineSetFiles))
if w.UserDataFile != nil {
files = append(files, w.UserDataFile)
}
files = append(files, w.MachineConfigFiles...)
files = append(files, w.MachineSetFiles...)
files = append(files, w.MachineFiles...)
return files
}
// Load reads the asset files from disk.
func (w *Worker) Load(f asset.FileFetcher) (found bool, err error) {
file, err := f.FetchByName(filepath.Join(directory, workerUserDataFileName))
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
w.UserDataFile = file
w.MachineConfigFiles, err = machineconfig.Load(f, "worker", directory)
if err != nil {
return true, err
}
fileList, err := f.FetchByPattern(filepath.Join(directory, workerMachineSetFileNamePattern))
if err != nil {
return true, err
}
w.MachineSetFiles = fileList
fileList, err = f.FetchByPattern(filepath.Join(directory, workerMachineFileNamePattern))
if err != nil {
return true, err
}
w.MachineFiles = fileList
return true, nil
}
// MachineSets returns MachineSet manifest structures.
func (w *Worker) MachineSets() ([]machinev1beta1.MachineSet, error) {
scheme := runtime.NewScheme()
baremetalapi.AddToScheme(scheme)
ibmcloudapi.AddToScheme(scheme)
libvirtapi.AddToScheme(scheme)
ovirtproviderapi.AddToScheme(scheme)
scheme.AddKnownTypes(machinev1alpha1.GroupVersion,
&machinev1alpha1.OpenstackProviderSpec{},
)
scheme.AddKnownTypes(machinev1beta1.SchemeGroupVersion,
&machinev1beta1.AWSMachineProviderConfig{},
&machinev1beta1.VSphereMachineProviderSpec{},
&machinev1beta1.AzureMachineProviderSpec{},
&machinev1beta1.GCPMachineProviderSpec{},
)
machinev1.Install(scheme)
scheme.AddKnownTypes(machinev1.GroupVersion,
&machinev1.AlibabaCloudMachineProviderConfig{},
&machinev1.NutanixMachineProviderConfig{},
&machinev1.PowerVSMachineProviderConfig{},
)
machinev1beta1.AddToScheme(scheme)
decoder := serializer.NewCodecFactory(scheme).UniversalDecoder(
baremetalprovider.SchemeGroupVersion,
ibmcloudprovider.SchemeGroupVersion,
libvirtprovider.SchemeGroupVersion,
machinev1.GroupVersion,
machinev1alpha1.GroupVersion,
ovirtprovider.SchemeGroupVersion,
machinev1beta1.SchemeGroupVersion,
)
machineSets := []machinev1beta1.MachineSet{}
for i, file := range w.MachineSetFiles {
machineSet := &machinev1beta1.MachineSet{}
err := yaml.Unmarshal(file.Data, &machineSet)
if err != nil {
return machineSets, errors.Wrapf(err, "unmarshal worker %d", i)
}
obj, _, err := decoder.Decode(machineSet.Spec.Template.Spec.ProviderSpec.Value.Raw, nil, nil)
if err != nil {
return machineSets, errors.Wrapf(err, "unmarshal worker %d", i)
}
machineSet.Spec.Template.Spec.ProviderSpec.Value = &runtime.RawExtension{Object: obj}
machineSets = append(machineSets, *machineSet)
}
return machineSets, nil
}
|
package mux
import (
"reflect"
"testing"
)
func TestExtractPathVars(t *testing.T) {
type args struct {
pattern string
}
tests := []struct {
name string
args args
wantPath string
wantVars []pathVar
wantErr bool
}{
{"noVars", args{"/path/to/resource"}, "/path/to/resource", []pathVar{}, false},
{"oneVar", args{"/path/{to}/resource"}, "/path/{*}/resource", []pathVar{{1, "to"}}, false},
{"multiVars", args{"/test/{for}/{EXTRACT}"}, "/test/{*}/{*}", []pathVar{{1, "for"}, {2, "EXTRACT"}}, false},
{"underscoreVar", args{"/test/{for_extract}"}, "/test/{*}", []pathVar{{1, "for_extract"}}, false},
{"noLeadingSlash", args{"path/to/resource"}, "", nil, true},
{"slashInWildcard", args{"/path/{t/o}/resource"}, "", nil, true},
{"invalidCharacter", args{"/path/{%to$}/resource"}, "", nil, true},
{"nonVarCurly", args{"/path/to/{re}source"}, "/path/to/{re}source", []pathVar{}, false},
{"floatingNonVarCurly", args{"/path/to/re{sou}rce"}, "/path/to/re{sou}rce", []pathVar{}, false},
{"unterminatedWildcard", args{"/path/to/{resource"}, "", nil, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotPath, gotVars, err := extractPathVars(tt.args.pattern)
if (err != nil) != tt.wantErr {
t.Errorf("extractPathVars() error = %v, wantErr %v", err, tt.wantErr)
return
}
if gotPath != tt.wantPath {
t.Errorf("extractPathVars() gotPath = %v, want %v", gotPath, tt.wantPath)
}
if !reflect.DeepEqual(gotVars, tt.wantVars) {
t.Errorf("extractPathVars() gotVars = %v, want %v", gotVars, tt.wantVars)
}
})
}
}
|
package agent
import (
"sync"
"time"
"github.com/bryanl/dolb/kvs"
etcdclient "github.com/coreos/etcd/client"
"golang.org/x/net/context"
)
// Locker locks and blocks until it is unlocked.
type Locker interface {
Lock() error
Unlock() error
}
type etcdLocker struct {
context context.Context
key string
who string
kv kvs.KVS
}
func newEtcdLocker(context context.Context, key, who string, kv kvs.KVS) *etcdLocker {
return &etcdLocker{
context: context,
key: key,
who: who,
kv: kv,
}
}
var _ Locker = &etcdLocker{}
func (el *etcdLocker) Lock() error {
for {
_, err := el.kv.Get(el.lockKey(), nil)
if err != nil {
if eerr, ok := err.(etcdclient.Error); ok && eerr.Code == etcdclient.ErrorCodeNodeExist {
break
}
}
time.Sleep(100 * time.Millisecond)
}
_, err := el.kv.Set(el.lockKey(), el.who, nil)
return err
}
func (el *etcdLocker) Unlock() error {
return el.kv.Delete(el.lockKey())
}
func (el *etcdLocker) lockKey() string {
return el.key + ".lock"
}
type memLocker struct {
mu sync.Mutex
}
var _ Locker = &memLocker{}
func (ml *memLocker) Lock() error {
ml.mu.Lock()
return nil
}
func (ml *memLocker) Unlock() error {
ml.mu.Unlock()
return nil
}
|
package main
// TODO: we shouldn't need to prefix Go command with path, it should honor the PATH when sshing in
// there may be an issue with the ssh library, or possibly just something misconfigured with the pi we're testing on
import (
"encoding/json"
"fmt"
"github.com/ClarityServices/skynet2"
"github.com/ClarityServices/skynet2/log"
"go/build"
"io/ioutil"
"os/exec"
"path"
"strings"
)
type builder struct {
BuildConfig buildConfig `json:"Build"`
DeployConfig deployConfig `json:"Deploy"`
term Terminal
scm Scm
projectPath string
pack *build.Package
}
type buildConfig struct {
Host string
User string
Jail string
CgoCFlags string `json:"CGO_CFLAGS"`
CgoLdFlags string `json:"CGO_LDFLAGS"`
GoRoot string
GoPath string
AppRepo string
AppPath string
RepoType string
RepoBranch string
UpdatePackages bool
BuildAllPackages bool
RunTests bool
TestSkynet bool
PreBuildCommands []string
PostBuildCommands []string
}
type deployConfig struct {
DeployPath string
// TODO: Should default to directory name if not supplied
BinaryName string
User string
}
var context = build.Default
func newBuilder(config string) *builder {
if config == "" {
config = "./build.cfg"
}
f, err := ioutil.ReadFile(config)
if err != nil {
log.Fatal("Failed to read: " + config)
}
b := new(builder)
err = json.Unmarshal(f, b)
if err != nil {
log.Fatal("Failed to parse " + config + ": " + err.Error())
}
if isHostLocal(b.BuildConfig.Host) {
fmt.Println("Connecting to build machine: " + b.BuildConfig.Host)
b.term = new(LocalTerminal)
} else {
sshClient := new(SSHConn)
b.term = sshClient
fmt.Println("Connecting to build machine: " + b.BuildConfig.Host)
err = sshClient.Connect(b.BuildConfig.Host, b.BuildConfig.User)
if err != nil {
log.Fatal("Failed to connect to build machine: " + err.Error())
}
}
b.validatePackage()
return b
}
func Build(config string) {
b := newBuilder(config)
b.performBuild()
b.term.Close()
}
func Deploy(config string, criteria *skynet.Criteria) {
b := newBuilder(config)
b.deploy(getHosts(criteria))
b.term.Close()
}
func (b *builder) performBuild() {
b.setupScm()
if b.validateBuildEnvironment() {
b.updateCode()
b.term.SetEnv("GOPATH", b.goPath())
b.term.SetEnv("GOROOT", b.BuildConfig.GoRoot)
b.term.SetEnv("CGO_CFLAGS", b.BuildConfig.CgoCFlags)
b.term.SetEnv("CGO_LDFLAGS", b.BuildConfig.CgoLdFlags)
b.runCommands(b.BuildConfig.PreBuildCommands)
b.updateDependencies()
b.buildProject()
if b.BuildConfig.RunTests {
b.runTests()
}
b.runCommands(b.BuildConfig.PostBuildCommands)
}
}
func (b *builder) validatePackage() {
// Validate this package is a command
var err error
b.pack, err = context.ImportDir(".", 0)
if err != nil {
log.Fatal("Could not import package for validation")
}
if !b.pack.IsCommand() {
log.Fatal("Package is not a command")
}
}
// Ensure all directories exist
func (b *builder) validateBuildEnvironment() (valid bool) {
var err error
valid = true
// Validate Jail exists
_, err = b.term.Exec("ls " + b.BuildConfig.Jail)
if err != nil {
fmt.Println("Could not find Jail directory: " + err.Error())
valid = false
}
// Validate GOROOT exists
_, err = b.term.Exec("ls " + b.BuildConfig.GoRoot)
if err != nil {
fmt.Println("Could not find GOROOT directory: " + err.Error())
valid = false
}
// Validate Go Binary exists
_, err = b.term.Exec("ls " + b.BuildConfig.GoRoot + "/bin/go")
if err != nil {
fmt.Println("Could not find Go binary: " + err.Error())
valid = false
}
// Validate Git exists
_, err = b.term.Exec("which " + b.scm.BinaryName())
if err != nil {
fmt.Println("Could not find " + b.BuildConfig.RepoType + " binary: " + err.Error())
valid = false
}
return
}
// Checkout project from repository
func (b *builder) updateCode() {
p, err := b.scm.ImportPathFromRepo(b.BuildConfig.AppRepo)
b.projectPath = path.Join(b.BuildConfig.Jail, "src", p)
if err != nil {
log.Fatal(err.Error())
}
out, err := b.term.Exec("ls " + b.projectPath)
if err != nil {
fmt.Println("Creating project directories")
out, err = b.term.Exec("mkdir -p " + b.projectPath)
if err != nil {
log.Fatal("Could not create project directories")
}
fmt.Println(string(out))
}
// Fetch code base
b.scm.SetTerminal(b.term)
b.scm.Checkout(b.BuildConfig.AppRepo, b.BuildConfig.RepoBranch, b.projectPath)
}
func (b *builder) setupScm() {
switch b.BuildConfig.RepoType {
case "git":
b.scm = new(GitScm)
default:
log.Fatal("unkown RepoType")
}
}
func (b *builder) updateDependencies() {
b.getPackageDependencies(path.Join(b.projectPath, b.BuildConfig.AppPath))
}
func (b *builder) buildProject() {
p := path.Join(b.projectPath, b.BuildConfig.AppPath)
flags := "-v"
if b.BuildConfig.BuildAllPackages {
flags = flags + " -a"
}
fmt.Println("Building packages")
out, err := b.term.ExecPath(b.BuildConfig.GoRoot+"/bin/go install "+flags, p)
fmt.Println(string(out))
if err != nil {
log.Fatal("Failed build: " + err.Error())
}
}
func (b *builder) runTests() {
p := path.Join(b.projectPath, b.BuildConfig.AppPath)
fmt.Println("Testing packages")
out, err := b.term.ExecPath(b.BuildConfig.GoRoot+"/bin/go test", p)
fmt.Println(string(out))
if err != nil {
log.Fatal("Failed tests: " + err.Error())
}
if b.BuildConfig.TestSkynet {
b.testSkynet()
}
}
func (b *builder) testSkynet() {
fmt.Println("Testing Skynet")
p := path.Join(b.BuildConfig.Jail, "src/github.com/ClarityServices/skynet2")
b.getPackageDependencies(p)
out, err := b.term.ExecPath(b.BuildConfig.GoRoot+"/bin/go test ./...", p)
fmt.Println(string(out))
if err != nil {
log.Fatal("Failed tests: " + err.Error())
}
}
func (b *builder) getPackageDependencies(p string) {
flags := []string{"-d"}
if b.BuildConfig.UpdatePackages {
flags = append(flags, "-u")
}
fmt.Println("Fetching dependencies")
out, err := b.term.ExecPath(b.BuildConfig.GoRoot+"/bin/go get "+strings.Join(flags, " ")+" ./...", p)
fmt.Println(string(out))
if err != nil {
log.Fatal("Failed to fetch dependencies\n" + err.Error())
}
}
func (b *builder) runCommands(cmds []string) {
for _, cmd := range cmds {
out, err := b.term.Exec(cmd)
fmt.Println(string(out))
if err != nil {
log.Fatal("Failed to execute dependent command: " + cmd + "\n" + err.Error())
}
}
}
func (b *builder) goPath() string {
if b.BuildConfig.GoPath != "" {
return b.BuildConfig.Jail + ":" + b.BuildConfig.GoPath
}
return b.BuildConfig.Jail
}
func (b *builder) deploy(hosts []string) {
for _, host := range hosts {
var out []byte
var err error
if isHostLocal(host) && isHostLocal(b.BuildConfig.Host) {
// Built locally, deploying locally
fmt.Println("Copying local binary")
// First move binary to .old in case it's currently running
command := exec.Command("mv", path.Join(b.DeployConfig.DeployPath, b.DeployConfig.BinaryName), path.Join(b.DeployConfig.DeployPath, b.DeployConfig.BinaryName+".old"))
out, err = command.CombinedOutput()
if err == nil {
fmt.Println(string(out))
command = exec.Command("cp", path.Join(b.BuildConfig.Jail, "bin", path.Base(b.BuildConfig.AppPath)), path.Join(b.DeployConfig.DeployPath, b.DeployConfig.BinaryName))
out, err = command.CombinedOutput()
}
} else if isHostLocal(host) && !isHostLocal(b.BuildConfig.Host) {
// Built remotely, deploying locally
fmt.Println("Copying binary from build machine")
h, p := splitHostPort(b.BuildConfig.Host)
// First move binary to .old in case it's currently running
command := exec.Command("mv", path.Join(b.DeployConfig.DeployPath, b.DeployConfig.BinaryName), path.Join(b.DeployConfig.DeployPath, b.DeployConfig.BinaryName+".old"))
out, _ = command.CombinedOutput()
fmt.Println(string(out))
command = exec.Command("scp", "-P", p, b.BuildConfig.User+"@"+h+":"+path.Join(b.BuildConfig.Jail, "bin", path.Base(b.BuildConfig.AppPath)),
path.Join(b.DeployConfig.DeployPath, b.DeployConfig.BinaryName))
out, err = command.CombinedOutput()
} else if !isHostLocal(host) && isHostLocal(b.BuildConfig.Host) {
// Built locally, deploying remotely
fmt.Println("Pushing binary to host: " + host)
h, p := splitHostPort(host)
// First move binary to .old in case it's currently running
command := exec.Command("ssh", "-p", p, b.DeployConfig.User+"@"+h, "mv", path.Join(b.DeployConfig.DeployPath, b.DeployConfig.BinaryName), path.Join(b.DeployConfig.DeployPath, b.DeployConfig.BinaryName+".old"))
out, _ = command.CombinedOutput()
fmt.Println(string(out))
command = exec.Command("scp", "-P", p, path.Join(b.BuildConfig.Jail, "bin", path.Base(b.BuildConfig.AppPath)), b.DeployConfig.User+"@"+h+":"+path.Join(b.DeployConfig.DeployPath, b.DeployConfig.BinaryName))
out, err = command.CombinedOutput()
} else if !isHostLocal(host) && !isHostLocal(b.BuildConfig.Host) {
// Built remotely, deployed remotely
fmt.Println("Pushing binary from build box to host: " + host)
h, p := splitHostPort(host)
// First move binary to .old in case it's currently running
out, _ := b.term.Exec("ssh -p " + p + " " + b.DeployConfig.User + "@" + h + " mv " + path.Join(b.DeployConfig.DeployPath, b.DeployConfig.BinaryName) + " " + path.Join(b.DeployConfig.DeployPath, b.DeployConfig.BinaryName+".old"))
fmt.Println(string(out))
out, err = b.term.Exec("scp -P " + p + " " + path.Join(b.BuildConfig.Jail, "bin", path.Base(b.BuildConfig.AppPath)) + " " + b.DeployConfig.User + "@" + h + ":" + path.Join(b.DeployConfig.DeployPath, b.DeployConfig.BinaryName))
}
fmt.Println(string(out))
if err != nil {
log.Fatal("Failed to deploy: " + err.Error())
}
}
}
func splitHostPort(host string) (string, string) {
parts := strings.Split(host, ":")
if len(parts) < 2 {
return parts[0], "22"
}
return parts[0], parts[1]
}
func isHostLocal(host string) bool {
if host == "localhost" || host == "127.0.0.1" || host == "" {
return true
}
return false
}
|
package std
import (
_ "fmt"
"github.com/iansmith/tropical"
)
type DefaultMouseDispatch struct {
focusPolicy tropical.MousePolicy
focusedInteractor tropical.Interactor
FocusPolicies []tropical.MousePolicy
Monitors []tropical.MouseMonitor
}
func NewDefaultMouseDispatch() tropical.MouseDispatch {
return &DefaultMouseDispatch{
FocusPolicies: []tropical.MousePolicy{&DraggerPolicy{}, &ClickerPolicy{}},
Monitors: []tropical.MouseMonitor{},
}
}
type ClickerPolicy struct {
}
type DraggerPolicy struct {
startX, startY int
}
func (m *ClickerPolicy) Start(event tropical.Event, target tropical.Interactor) bool {
_, ok := target.(tropical.Clicker)
return ok && event.Type() == tropical.MouseDown
}
func (m *ClickerPolicy) Process(event tropical.Event, target tropical.Interactor) {
focus := target.(tropical.Clicker) //this will panic if it's not a clickre!
// we want to do a bounds check against the event so convert to local coords
ToLocalFromGlobal(target, event)
switch event.Type() {
case tropical.MouseDown:
//ignored, because we only get here if we have picked so there is nothing
//do until the mouse up
case tropical.MouseUp:
if event.X() < 0 || event.Y() < 0 || event.X() >= target.Width() || event.Y() >= target.Height() {
break //isn't inside the bounds
}
focus.Click()
case tropical.MouseMove:
//ignored
default:
print("clickerpolicy: unexpected event type ", event.Type().String(), "ignoring")
}
//restore event back to global coords
ToGlobalFromLocal(target, event)
}
func (d *DraggerPolicy) Process(event tropical.Event, target tropical.Interactor) {
focus := target.(tropical.Dragger) //this will panic if it's not a dragger!
diffX := event.X() - d.startX
diffY := event.Y() - d.startY
switch event.Type() {
case tropical.MouseDown:
focus.DragStart()
focus.Drag(diffX, diffY)
case tropical.MouseUp:
focus.Drag(diffX, diffY)
focus.DragEnd()
case tropical.MouseMove:
focus.Drag(diffX, diffY)
default:
print("draggerpolicy: unexpected event type ", event.Type().String(), "ignoring")
}
}
func (d *DraggerPolicy) Start(event tropical.Event, target tropical.Interactor) bool {
_, ok := target.(tropical.Dragger)
if ok && event.Type() == tropical.MouseDown {
d.startX = event.X()
d.startY = event.Y()
return true
}
return false
}
func (d *DefaultMouseDispatch) Process(event tropical.Event, root tropical.RootInteractor) {
//
// focusedInteractor!=nil implies that somebody captured the start of a mouse
// protocol and wanted to focus it on that object
//
if d.focusedInteractor != nil {
//print(fmt.Sprintf("have focused object: %T wih policy %T", d.focusedInteractor, d.focusPolicy))
if d.focusedInteractor != nil {
d.focusPolicy.Process(event, d.focusedInteractor)
if event.Type() == tropical.MouseUp {
d.focusPolicy = nil
}
}
if event.Type() == tropical.MouseUp {
d.focusedInteractor = nil
}
return
}
list := root.Pick(event)
/*
for i, picked := range list.Hits() {
print("pick list contains ", i, fmt.Sprintf("%T", picked))
}
*/
outer:
for _, picked := range list.Hits() {
for _, candidate := range d.FocusPolicies {
if candidate.Start(event, picked) {
d.focusedInteractor = picked
d.focusPolicy = candidate
d.focusPolicy.Process(event, picked)
ToGlobalFromLocal(picked, event)
//print(fmt.Sprintf("other root coords? %d,%d", event.X(), event.Y()))
break outer
}
}
}
//allow monitors to also get the info
for _, mon := range d.Monitors {
switch event.Type() {
case tropical.MouseDown:
mon.MouseDown(event)
case tropical.MouseUp:
mon.MouseUp(event)
case tropical.MouseMove:
mon.MouseMove(event)
default:
print("monitor: unexpected event type ", event.Type().String(), "ignoring")
}
}
}
|
package cmd
import (
"fmt"
"github.com/davecgh/go-spew/spew"
"github.com/mitchellh/colorstring"
"github.com/nwlucas/proj-cli/env"
"github.com/spf13/cobra"
)
var cmdDebug = &cobra.Command{
Use: "debug",
Short: "Spews out debug info.",
Long: `This is to be used for debug purposes.
It should dump the application environment as well as core variables at the time of running.`,
Run: func(cmd *cobra.Command, args []string) {
Debug()
},
}
func Debug() {
colorstring.Printf("[blue]Debug Information.\n")
colorstring.Printf(" [blue]Environment:\n")
for _, val := range env.OsEnv {
fmt.Printf(" - %s \n", val)
}
colorstring.Printf(" [cyan]Go Specific:\n")
fmt.Println(" - GO Version: ", env.RunEnv.GoVersion)
fmt.Println(" - GO Operating System(GOOS): ", env.RunEnv.GoOs)
fmt.Println(" - GO Root(GOROOT): ", env.RunEnv.GoRoot)
fmt.Println(" - GO Processors: ", env.RunEnv.NumCPU)
colorstring.Printf(" [green]Config:\n")
spew.Dump(env.Cfg)
// for k, v := range m {
// fmt.Println(" - %v = %v", k, v)
// }
}
|
package relay
import (
"context"
"fmt"
"time"
"github.com/pkg/errors"
"google.golang.org/grpc"
"github.com/batchcorp/collector-schemas/build/go/protos/records"
"github.com/batchcorp/collector-schemas/build/go/protos/services"
"github.com/batchcorp/plumber/backends/rstreams/types"
)
var (
ErrMissingID = errors.New("missing ID in relay message")
ErrMissingKeyName = errors.New("missing Key in relay message")
ErrMissingStreamName = errors.New("missing Stream in relay message")
)
// handleRedisPubSub sends a RedisPubSub relay message to the GRPC server
func (r *Relay) handleRedisStreams(ctx context.Context, conn *grpc.ClientConn, messages []interface{}) error {
sinkRecords, err := r.convertMessagesToRedisStreamsSinkRecords(messages)
if err != nil {
return fmt.Errorf("unable to convert messages to redis-stream sink records: %s", err)
}
client := services.NewGRPCCollectorClient(conn)
return r.CallWithRetry(ctx, "AddRedisStreamsRecord", func(ctx context.Context) error {
_, err := client.AddRedisStreamsRecord(ctx, &services.RedisStreamsRecordRequest{
Records: sinkRecords,
}, grpc.MaxCallSendMsgSize(MaxGRPCMessageSize))
return err
})
}
// validateRedisRelayMessage ensures all necessary values are present for a RedisPubSub relay message
func (r *Relay) validateRedisStreamsRelayMessage(msg *types.RelayMessage) error {
if msg == nil {
return ErrMissingMessage
}
if msg.Value == nil {
return ErrMissingMessageValue
}
if msg.ID == "" {
return ErrMissingID
}
if msg.Key == "" {
return ErrMissingKeyName
}
if msg.Stream == "" {
return ErrMissingStreamName
}
return nil
}
// convertRedisMessageToProtobufRecord creates a records.RedisSinkRecord from a redis.Message which can then
// be sent to the GRPC server
func (r *Relay) convertMessagesToRedisStreamsSinkRecords(messages []interface{}) ([]*records.RedisStreamsRecord, error) {
sinkRecords := make([]*records.RedisStreamsRecord, 0)
for i, v := range messages {
relayMessage, ok := v.(*types.RelayMessage)
if !ok {
r.log.Errorf("unable to type assert incoming message as RelayMessage (index: %d)", i)
continue
}
if err := r.validateRedisStreamsRelayMessage(relayMessage); err != nil {
r.log.Errorf("unable to validate redis-streams relay message: %s", err)
continue
}
// Create a sink record
sinkRecords = append(sinkRecords, &records.RedisStreamsRecord{
Id: relayMessage.ID,
Key: relayMessage.Key,
Value: string(relayMessage.Value),
Stream: relayMessage.Stream,
Timestamp: time.Now().UTC().UnixNano(),
ForceDeadLetter: r.DeadLetter,
})
}
return sinkRecords, nil
}
|
package module
import (
"fmt"
"roman/database"
rn "roman/proto/roman"
"strings"
roman "github.com/StefanSchroeder/Golang-Roman"
)
type QuestionUnit struct {
model *database.Model
listUnitNumber []database.UnitNumber
}
func (qu *QuestionUnit) generateListUnit(stringResult string, tokenAnalysis *rn.TokenAnalysis) string {
if tokenAnalysis == nil {
return stringResult
}
if tokenAnalysis.Key == KEY_UNIT {
stringResult = stringResult + "," + tokenAnalysis.Value
}
return qu.generateListUnit(stringResult, tokenAnalysis.Next)
}
func (qu *QuestionUnit) SetListUnitNumber(listUnitName []string) {
listUnitNumber := qu.model.GetListUnitNumber(listUnitName)
qu.listUnitNumber = listUnitNumber
}
func (qu *QuestionUnit) GetRomanic(value string) string {
for _, data := range qu.listUnitNumber {
if data.Name == value {
return data.RomanValue
}
}
return ""
}
func (qu *QuestionUnit) GenerateRomanText(arrayUnitText []string) string {
var romanText string
for _, v := range arrayUnitText {
romanText = romanText + qu.GetRomanic(v)
}
return romanText
}
func (qu *QuestionUnit) GetRomanicText(listUnitText string) string {
arrayUnitText := strings.Split(listUnitText, ",")
qu.SetListUnitNumber(arrayUnitText)
return qu.GenerateRomanText(arrayUnitText)
}
func (qu *QuestionUnit) GetTotalAmount(tokenAnalysis *rn.TokenAnalysis) int {
stringListUnit := qu.generateListUnit("", tokenAnalysis)
amount := roman.Arabic(qu.GetRomanicText(stringListUnit))
return amount
}
func (qu *QuestionUnit) GenerateResponseString(amount int) string {
var result string
for _, data := range qu.listUnitNumber {
result = result + " " + data.Name
}
if result == "" {
return result
}
return result + " is " + fmt.Sprint(amount)
}
func (qu *QuestionUnit) Process(tokenAnalysis *rn.TokenAnalysis) string {
return qu.GenerateResponseString(qu.GetTotalAmount(tokenAnalysis))
}
func NewQuestionUnit() *QuestionUnit {
var questionUnit QuestionUnit
questionUnit.model = database.NewModel()
return &questionUnit
}
|
package mat
type Shape interface {
ID() int64
GetTransform() Mat4x4
GetInverse() Mat4x4
GetInverseTranspose() Mat4x4
SetTransform(transform Mat4x4)
GetMaterial() Material
SetMaterial(material Material)
IntersectLocal(ray Ray) []Intersection
NormalAtLocal(point Tuple4, intersection *Intersection) Tuple4
GetLocalRay() Ray
GetParent() Shape
SetParent(shape Shape)
CastsShadow() bool
Name() string
// Init()
}
func WorldToObject(shape Shape, point Tuple4) Tuple4 {
if shape.GetParent() != nil {
point = WorldToObject(shape.GetParent(), point)
}
return MultiplyByTuple(shape.GetInverse(), point)
}
func WorldToObjectPtr(shape Shape, point Tuple4, out *Tuple4) {
if shape.GetParent() != nil {
WorldToObjectPtr(shape.GetParent(), point, &point)
}
i := shape.GetInverse()
MultiplyByTuplePtr(&i, &point, out)
}
func NormalToWorld(shape Shape, normal Tuple4) Tuple4 {
normal = MultiplyByTuple(shape.GetInverseTranspose(), normal)
normal[3] = 0.0 // set w to 0
normal = Normalize(normal)
if shape.GetParent() != nil {
normal = NormalToWorld(shape.GetParent(), normal)
}
return normal
}
func NormalToWorldPtr(shape Shape, normal *Tuple4) {
it := shape.GetInverseTranspose()
MultiplyByTuplePtr(&it, normal, normal)
normal[3] = 0.0 // set w to 0
NormalizePtr(normal, normal)
if shape.GetParent() != nil {
NormalToWorldPtr(shape.GetParent(), normal)
}
}
|
package auth
import (
"chlorine/cl"
"chlorine/storage"
"github.com/gorilla/sessions"
"log"
)
func GetMemberIfAuthorized(service cl.MemberService,
session *sessions.Session) (*storage.Member, bool) {
memberID, ok := session.Values["MemberID"].(int)
if !ok {
return nil, false
}
member, err := service.GetMember(memberID)
if err != nil {
log.Printf("auth: %s", err)
return nil, false
}
return member, true
}
func IsMemberAdministrator(service cl.MemberService, member *storage.Member) bool {
role, err := service.GetMemberRole(*member.ID)
if err != nil {
return false
}
return role.IsAdmin
}
|
package testdata
import (
"github.com/frk/gosql/internal/testdata/common"
)
type SelectWithRecordNestedSliceQuery struct {
Nesteds []*common.Nested `rel:"test_nested:n"`
}
|
package main
type Settings struct {
BotToken,
YouTubeDeveloperKey,
MusicGraphKey,
GuildID,
TextChannelID,
VoiceChannelID string
}
type Genre struct {
ID, Name string
}
type Song struct {
ID, QueueID, Title, OrderedBy, Duration, Status, VideoURL string
VK bool
}
type PurgeMessage struct {
ID, ChannelID string
TimeSent int64
}
type MGSong struct {
Status struct {
Code int `json:"code"`
Message string `json:"message"`
API string `json:"api"`
} `json:"status"`
Pagination struct {
Count int `json:"count"`
Total int `json:"total"`
Offset int `json:"offset"`
} `json:"pagination"`
Data []struct {
Popularity string `json:"popularity"`
TrackArtistID string `json:"track_artist_id"`
PrimaryTempo float64 `json:"primary_tempo,omitempty"`
LabelName string `json:"label_name,omitempty"`
TrackIndex string `json:"track_index"`
Duration int `json:"duration"`
MainGenre string `json:"main_genre"`
TrackSpotifyID string `json:"track_spotify_id,omitempty"`
EntityType string `json:"entity_type"`
TrackRefID string `json:"track_ref_id"`
ID string `json:"id"`
OriginalReleaseYear int `json:"original_release_year"`
TrackYoutubeID string `json:"track_youtube_id,omitempty"`
ArtistName string `json:"artist_name"`
Isrc string `json:"isrc,omitempty"`
TrackArtistRefID string `json:"track_artist_ref_id,omitempty"`
ReleaseYear int `json:"release_year"`
TrackAlbumID string `json:"track_album_id"`
Title string `json:"title"`
Explicit string `json:"explicit,omitempty"`
TrackAlbumRefID string `json:"track_album_ref_id,omitempty"`
AlbumTitle string `json:"album_title"`
Lyricist []struct {
ID string `json:"id"`
Name string `json:"name"`
} `json:"lyricist,omitempty"`
Composer []struct {
ID string `json:"id"`
Name string `json:"name"`
} `json:"composer,omitempty"`
TrackMusicbrainzID string `json:"track_musicbrainz_id,omitempty"`
Producer []struct {
ID string `json:"id"`
Name string `json:"name"`
} `json:"producer,omitempty"`
Writer []struct {
ID string `json:"id"`
Name string `json:"name"`
} `json:"writer,omitempty"`
} `json:"data"`
}
type VKAuth struct {
AccessToken string `json:"access_token"`
}
type VKAudioSearch struct {
Response struct {
Count int `json:"count"`
Items []struct {
ID int `json:"id"`
OwnerID int `json:"owner_id"`
Artist string `json:"artist"`
Title string `json:"title"`
Duration int `json:"duration"`
Date int `json:"date"`
URL string `json:"url"`
LyricsID int `json:"lyrics_id,omitempty"`
GenreID int `json:"genre_id,omitempty"`
AlbumID int `json:"album_id,omitempty"`
} `json:"items"`
} `json:"response"`
}
|
package bson
import (
"reflect"
"github.com/EverythingMe/bson/bson"
"github.com/dvirsky/go-pylog/logging"
"github.com/EverythingMe/meduza/query"
"github.com/EverythingMe/meduza/transport"
)
type BsonProtocol struct {
}
func read(msg transport.Message, v interface{}) error {
if err := bson.Unmarshal(msg.Body, v); err != nil {
return logging.Errorf("Could not unmarshal %s: %s", reflect.TypeOf(v), err)
}
return nil
}
func newMessage(v interface{}, t transport.MessageType) (transport.Message, error) {
b, err := bson.Marshal(v)
if err != nil {
return transport.Message{}, logging.Errorf("Could not marshal %s to message: %s", reflect.TypeOf(v), err)
}
return transport.Message{
Type: t,
Body: b,
}, nil
}
func (BsonProtocol) readGetQuery(msg transport.Message) (ret query.GetQuery, err error) {
err = read(msg, &ret)
return
}
func (BsonProtocol) readUpdateQuery(msg transport.Message) (ret query.UpdateQuery, err error) {
err = read(msg, &ret)
return
}
func (BsonProtocol) readPutQuery(msg transport.Message) (ret query.PutQuery, err error) {
err = read(msg, &ret)
return
}
func (BsonProtocol) readDelQuery(msg transport.Message) (ret query.DelQuery, err error) {
err = read(msg, &ret)
return
}
func (BsonProtocol) readGetResponse(msg transport.Message) (ret query.GetResponse, err error) {
err = read(msg, &ret)
return
}
func (BsonProtocol) readUpdateResponse(msg transport.Message) (ret query.UpdateResponse, err error) {
err = read(msg, &ret)
return
}
func (BsonProtocol) readAddResponse(msg transport.Message) (ret query.PutResponse, err error) {
err = read(msg, &ret)
return
}
func (BsonProtocol) readDelResponse(msg transport.Message) (ret query.DelResponse, err error) {
err = read(msg, &ret)
return
}
func (BsonProtocol) readPingResponse(msg transport.Message) (ret query.PingResponse, err error) {
err = read(msg, &ret)
return
}
// ReadMessage accepts a transport message, and according to its type, tries to deserialize it into
// a request or response object
func (p BsonProtocol) ReadMessage(msg transport.Message) (ret interface{}, err error) {
switch msg.Type {
case transport.GetMessage:
ret, err = p.readGetQuery(msg)
case transport.UpdateMessage:
ret, err = p.readUpdateQuery(msg)
case transport.PutMessage:
ret, err = p.readPutQuery(msg)
case transport.DelMessage:
ret, err = p.readDelQuery(msg)
case transport.PingMessage:
ret, err = query.PingQuery{}, nil
case transport.GetResponseMessage:
ret, err = p.readGetResponse(msg)
case transport.UpdateResponseMessage:
ret, err = p.readUpdateResponse(msg)
case transport.PutResponseMessage:
ret, err = p.readAddResponse(msg)
case transport.DelResponseMessage:
ret, err = p.readDelResponse(msg)
case transport.PingResponseMessage:
ret, err = p.readPingResponse(msg)
default:
ret, err = nil, logging.Errorf("Could not read message: message type '%s' invalid", msg.Type)
}
logging.Debug("Read message: %s", ret)
return
}
// WriteMessage takes a request or response object and serializes it into a transport message to be sent to a transport
func (BsonProtocol) WriteMessage(v interface{}) (msg transport.Message, err error) {
// make sure that if we're talking about a pointer, we cast to its value
// before we select on a type
val := v
if reflect.TypeOf(v).Kind() == reflect.Ptr {
val = reflect.ValueOf(v).Elem().Interface()
}
switch val.(type) {
case query.PutQuery:
return newMessage(v, transport.PutMessage)
case query.GetQuery:
return newMessage(v, transport.GetMessage)
case query.UpdateQuery:
return newMessage(v, transport.UpdateMessage)
case query.DelQuery:
return newMessage(v, transport.DelMessage)
case query.PingQuery:
return newMessage(v, transport.PingMessage)
case query.PutResponse:
return newMessage(v, transport.PutResponseMessage)
case query.GetResponse:
return newMessage(v, transport.GetResponseMessage)
case query.UpdateResponse:
return newMessage(v, transport.UpdateResponseMessage)
case query.DelResponse:
return newMessage(v, transport.DelResponseMessage)
case query.PingResponse:
return newMessage(v, transport.PingResponseMessage)
}
return transport.Message{}, logging.Errorf("Invalid type for protocol serialization: %s", reflect.TypeOf(v))
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package utils
import (
"context"
"crypto/tls"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/kvproto/pkg/metapb"
berrors "github.com/pingcap/tidb/br/pkg/errors"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/parser/types"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
)
const (
// storeDisconnectionDuration is the max duration of a store to be treated as living.
// when a store doesn't send heartbeat for 100s, it is probably offline, and most of leaders should be transformed.
// (How about network partition between TiKV and PD? Even that is rare.)
// Also note that the offline threshold in PD is 20s, see
// https://github.com/tikv/pd/blob/c40e319f50822678cda71ae62ee2fd70a9cac010/pkg/core/store.go#L523
storeDisconnectionDuration = 100 * time.Second
)
// IsTypeCompatible checks whether type target is compatible with type src
// they're compatible if
// - same null/not null and unsigned flag(maybe we can allow src not null flag, target null flag later)
// - have same evaluation type
// - target's flen and decimal should be bigger or equals to src's
// - elements in target is superset of elements in src if they're enum or set type
// - same charset and collate if they're string types
func IsTypeCompatible(src types.FieldType, target types.FieldType) bool {
if mysql.HasNotNullFlag(src.GetFlag()) != mysql.HasNotNullFlag(target.GetFlag()) {
return false
}
if mysql.HasUnsignedFlag(src.GetFlag()) != mysql.HasUnsignedFlag(target.GetFlag()) {
return false
}
srcEType, dstEType := src.EvalType(), target.EvalType()
if srcEType != dstEType {
return false
}
getFLenAndDecimal := func(tp types.FieldType) (int, int) {
// ref FieldType.CompactStr
defaultFlen, defaultDecimal := mysql.GetDefaultFieldLengthAndDecimal(tp.GetType())
flen, decimal := tp.GetFlen(), tp.GetDecimal()
if flen == types.UnspecifiedLength {
flen = defaultFlen
}
if decimal == types.UnspecifiedLength {
decimal = defaultDecimal
}
return flen, decimal
}
srcFLen, srcDecimal := getFLenAndDecimal(src)
targetFLen, targetDecimal := getFLenAndDecimal(target)
if srcFLen > targetFLen || srcDecimal > targetDecimal {
return false
}
// if they're not enum or set type, elems will be empty
// and if they're not string types, charset and collate will be empty,
// so we check them anyway.
srcElems := src.GetElems()
targetElems := target.GetElems()
if len(srcElems) > len(targetElems) {
return false
}
targetElemSet := make(map[string]struct{})
for _, item := range targetElems {
targetElemSet[item] = struct{}{}
}
for _, item := range srcElems {
if _, ok := targetElemSet[item]; !ok {
return false
}
}
return src.GetCharset() == target.GetCharset() &&
src.GetCollate() == target.GetCollate()
}
func GRPCConn(ctx context.Context, storeAddr string, tlsConf *tls.Config, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
secureOpt := grpc.WithTransportCredentials(insecure.NewCredentials())
if tlsConf != nil {
secureOpt = grpc.WithTransportCredentials(credentials.NewTLS(tlsConf))
}
opts = append(opts,
secureOpt,
grpc.WithBlock(),
grpc.FailOnNonTempDialError(true),
)
gctx, cancel := context.WithTimeout(ctx, time.Second*5)
connection, err := grpc.DialContext(gctx, storeAddr, opts...)
cancel()
if err != nil {
return nil, errors.Trace(err)
}
return connection, nil
}
// CheckStoreLiveness checks whether a store is still alive.
// Some versions of PD may not set the store state in the gRPC response.
// We need to check it manually.
func CheckStoreLiveness(s *metapb.Store) error {
if s.State != metapb.StoreState_Up {
return errors.Annotatef(berrors.ErrKVStorage, "the store state isn't up, it is %s", s.State)
}
// If the field isn't present (the default value), skip this check.
if s.GetLastHeartbeat() > 0 {
lastHeartBeat := time.Unix(0, s.GetLastHeartbeat())
if sinceLastHB := time.Since(lastHeartBeat); sinceLastHB > storeDisconnectionDuration {
return errors.Annotatef(berrors.ErrKVStorage, "the store last heartbeat is too far, at %s", sinceLastHB)
}
}
return nil
}
|
package postgres_backend
import (
"database/sql"
"encoding/json"
"errors"
"github.com/howbazaar/loggo"
"github.com/lib/pq"
"github.com/straumur/straumur"
"time"
)
var (
logger = loggo.GetLogger("straumur.postgres")
)
// Callback for a managed transaction
//
// Example:
//
// err := p.wrapTransaction(func(tx *sql.Tx) error {
// rows, err := tx.Query(query, args...)
// if err != nil {
// return err
// }
// }
//
type TransactionFunc func(*sql.Tx) error
type PostgresDataSource struct {
pg *sql.DB
}
// Converts a row to an event
func scanRow(row *sql.Rows, e *straumur.Event) error {
var entities StringSlice
var references StringSlice
var actors StringSlice
var tags StringSlice
temp := []byte{}
tempkey := []byte{}
err := row.Scan(
&e.ID,
&e.Key,
&tempkey,
&e.Created,
&e.Updated,
&temp,
&e.Description,
&e.Importance,
&e.Origin,
&entities,
&references,
&actors,
&tags)
if err != nil {
return err
}
var data interface{}
err = json.Unmarshal(temp, &data)
if err != nil {
return err
}
var keydata interface{}
err = json.Unmarshal(tempkey, &keydata)
if err != nil {
return err
}
e.Payload = data
e.KeyParams = keydata
e.Entities = entities
e.OtherReferences = references
e.Actors = actors
e.Tags = tags
return nil
}
//Gets an event by id
func (p *PostgresDataSource) GetById(id int) (*straumur.Event, error) {
var e straumur.Event
err := p.wrapTransaction(func(tx *sql.Tx) error {
rows, err := tx.Query(`
SELECT
*
FROM
"event"
WHERE "id" = $1
`, id)
if err != nil {
return err
}
defer rows.Close()
if !rows.Next() {
return sql.ErrNoRows
}
return scanRow(rows, &e)
})
if err != nil {
return nil, err
}
return &e, nil
}
func (d *PostgresDataSource) applyMigrations() {
// Get all table names
// TODO: maybe change the schema name?
rows, err := d.pg.Query(`
select tablename
from pg_tables
where
pg_tables.schemaname = 'public';
`)
if err != nil {
logger.Criticalf("Error: %+v", err)
}
canMigrate := false
var s string
for rows.Next() {
rows.Scan(&s)
if s == "migration_info" {
canMigrate = true
}
}
//No table names returned
if s == "" {
canMigrate = true
}
//Get the list of migrations
m, err := globMigrations()
if err != nil {
logger.Criticalf("Error: %+v", err)
}
//If there were tables, the migration_info
//table should be among them
if s != "" {
rows, err := d.pg.Query(`
select created from
migration_info
order by created
`)
removalDates := []time.Time{}
for rows.Next() {
var t time.Time
err = rows.Scan(&t)
if err != nil {
logger.Criticalf("Error: %+v", err)
}
//Weird, table created with TZ, but Scan doesn't
//add the UTC info
removalDates = append(removalDates, t.UTC())
}
//Filter out migrations which have already been applied
m = m.FilterDates(removalDates)
}
//Run migrations
if canMigrate && len(m) > 0 {
for _, migration := range m {
_, err := d.pg.Exec(migration.content)
if err != nil {
logger.Criticalf("Error: %+v", err)
}
_, err = d.pg.Exec(`
insert into migration_info
(created, content)
values($1, $2)`, migration.date, migration.content)
if err != nil {
logger.Criticalf("Error: %+v", err)
}
}
}
}
func (d *PostgresDataSource) wrapTransaction(t TransactionFunc) (err error) {
var tx *sql.Tx
if tx, err = d.pg.Begin(); err != nil {
return
}
defer func() {
if err != nil {
tx.Rollback()
} else {
tx.Commit()
}
}()
return t(tx)
}
func (p *PostgresDataSource) AggregateType(q straumur.Query, s string) (map[string]int, error) {
if !q.IsValidArrayType(s) {
return nil, errors.New("Invalid type")
}
query, args := buildAggregateQuery(q, s)
m := make(map[string]int)
err := p.wrapTransaction(func(tx *sql.Tx) error {
rows, err := tx.Query(query, args...)
defer rows.Close()
for rows.Next() {
var cStr string
var cInt int
rows.Scan(&cStr, &cInt)
m[cStr] = cInt
}
return err
})
return m, err
}
func (p *PostgresDataSource) Query(q straumur.Query) ([]*straumur.Event, error) {
events := []*straumur.Event{}
query, args := buildSelectQuery(q)
err := p.wrapTransaction(func(tx *sql.Tx) error {
rows, err := tx.Query(query, args...)
defer rows.Close()
for rows.Next() {
var e straumur.Event
err = scanRow(rows, &e)
if err != nil {
return err
}
events = append(events, &e)
}
return err
})
return events, err
}
// Saves or updates an event
func (p *PostgresDataSource) Save(e *straumur.Event) (err error) {
switch e.ID {
case 0:
err = p.wrapTransaction(func(tx *sql.Tx) error {
query, args, err := buildInsertQuery(e)
if err != nil {
return err
}
rows, err := tx.Query(query, args...)
if err != nil {
return err
}
for rows.Next() {
if err := rows.Scan(&e.ID, &e.Created, &e.Updated); err != nil {
return err
}
}
if err := rows.Err(); err != nil {
return err
}
return nil
})
default:
err = p.wrapTransaction(func(tx *sql.Tx) error {
query, args, err := buildUpdateQuery(e)
if err != nil {
return err
}
return tx.QueryRow(query, args...).Scan(&e.Updated)
})
}
//Set the error to nil if we detect duplicate insertion attempts.
switch v := err.(type) {
case *pq.Error:
if v.Code == "23505" {
logger.Warningf("Duplicate insertion detected, %+v", err)
err = nil
}
}
return err
}
//Creates a new PostgresDataSource
func NewPostgresDataSource(connection string) (*PostgresDataSource, error) {
p := PostgresDataSource{}
pg, err := sql.Open("postgres", connection)
if err != nil {
return nil, err
}
p.pg = pg
//Run migrations
p.applyMigrations()
return &p, nil
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package external
import (
"bytes"
"context"
"encoding/hex"
"path/filepath"
"slices"
"strconv"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/br/pkg/lightning/backend"
"github.com/pingcap/tidb/br/pkg/lightning/backend/encode"
"github.com/pingcap/tidb/br/pkg/lightning/backend/kv"
"github.com/pingcap/tidb/br/pkg/lightning/common"
"github.com/pingcap/tidb/br/pkg/membuf"
"github.com/pingcap/tidb/br/pkg/storage"
tidbkv "github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/size"
"go.uber.org/zap"
)
// rangePropertiesCollector collects range properties for each range. The zero
// value of rangePropertiesCollector is not ready to use, should call reset()
// first.
type rangePropertiesCollector struct {
props []*rangeProperty
currProp *rangeProperty
propSizeDist uint64
propKeysDist uint64
}
func (rc *rangePropertiesCollector) reset() {
rc.props = rc.props[:0]
rc.currProp = &rangeProperty{}
}
// encode encodes rc.props to a byte slice.
func (rc *rangePropertiesCollector) encode() []byte {
b := make([]byte, 0, 1024)
return encodeMultiProps(b, rc.props)
}
// WriterSummary is the summary of a writer.
type WriterSummary struct {
WriterID int
Seq int
Min tidbkv.Key
Max tidbkv.Key
TotalSize uint64
}
// OnCloseFunc is the callback function when a writer is closed.
type OnCloseFunc func(summary *WriterSummary)
// DummyOnCloseFunc is a dummy OnCloseFunc.
func DummyOnCloseFunc(*WriterSummary) {}
// WriterBuilder builds a new Writer.
type WriterBuilder struct {
memSizeLimit uint64
writeBatchCount uint64
propSizeDist uint64
propKeysDist uint64
onClose OnCloseFunc
dupeDetectEnabled bool
bufferPool *membuf.Pool
}
// NewWriterBuilder creates a WriterBuilder.
func NewWriterBuilder() *WriterBuilder {
return &WriterBuilder{
memSizeLimit: 256 * size.MB,
writeBatchCount: 8 * 1024,
propSizeDist: 1 * size.MB,
propKeysDist: 8 * 1024,
onClose: DummyOnCloseFunc,
}
}
// SetMemorySizeLimit sets the memory size limit of the writer. When accumulated
// data size exceeds this limit, the writer will flush data as a file to external
// storage.
func (b *WriterBuilder) SetMemorySizeLimit(size uint64) *WriterBuilder {
b.memSizeLimit = size
return b
}
// SetWriterBatchCount sets the batch count of the writer.
func (b *WriterBuilder) SetWriterBatchCount(count uint64) *WriterBuilder {
b.writeBatchCount = count
return b
}
// SetPropSizeDistance sets the distance of range size for each property.
func (b *WriterBuilder) SetPropSizeDistance(dist uint64) *WriterBuilder {
b.propSizeDist = dist
return b
}
// SetPropKeysDistance sets the distance of range keys for each property.
func (b *WriterBuilder) SetPropKeysDistance(dist uint64) *WriterBuilder {
b.propKeysDist = dist
return b
}
// SetOnCloseFunc sets the callback function when a writer is closed.
func (b *WriterBuilder) SetOnCloseFunc(onClose OnCloseFunc) *WriterBuilder {
b.onClose = onClose
return b
}
// SetBufferPool sets the buffer pool of the writer.
func (b *WriterBuilder) SetBufferPool(bufferPool *membuf.Pool) *WriterBuilder {
b.bufferPool = bufferPool
return b
}
// EnableDuplicationDetection enables the duplication detection of the writer.
func (b *WriterBuilder) EnableDuplicationDetection() *WriterBuilder {
b.dupeDetectEnabled = true
return b
}
// Build builds a new Writer. The files writer will create are under the prefix
// of "{prefix}/{writerID}".
func (b *WriterBuilder) Build(
store storage.ExternalStorage,
prefix string,
writerID int,
) *Writer {
bp := b.bufferPool
if bp == nil {
bp = membuf.NewPool()
}
filenamePrefix := filepath.Join(prefix, strconv.Itoa(writerID))
keyAdapter := common.KeyAdapter(common.NoopKeyAdapter{})
if b.dupeDetectEnabled {
keyAdapter = common.DupDetectKeyAdapter{}
}
return &Writer{
rc: &rangePropertiesCollector{
props: make([]*rangeProperty, 0, 1024),
currProp: &rangeProperty{},
propSizeDist: b.propSizeDist,
propKeysDist: b.propKeysDist,
},
memSizeLimit: b.memSizeLimit,
store: store,
kvBuffer: bp.NewBuffer(),
writeBatch: make([]common.KvPair, 0, b.writeBatchCount),
currentSeq: 0,
filenamePrefix: filenamePrefix,
keyAdapter: keyAdapter,
writerID: writerID,
kvStore: nil,
onClose: b.onClose,
closed: false,
}
}
// Writer is used to write data into external storage.
type Writer struct {
store storage.ExternalStorage
writerID int
currentSeq int
filenamePrefix string
keyAdapter common.KeyAdapter
kvStore *KeyValueStore
rc *rangePropertiesCollector
memSizeLimit uint64
kvBuffer *membuf.Buffer
writeBatch []common.KvPair
onClose OnCloseFunc
closed bool
// Statistic information per batch.
batchSize uint64
// Statistic information per writer.
minKey tidbkv.Key
maxKey tidbkv.Key
totalSize uint64
}
// AppendRows appends rows to the external storage.
// Note that this method is NOT thread-safe.
func (w *Writer) AppendRows(ctx context.Context, _ []string, rows encode.Rows) error {
kvs := kv.Rows2KvPairs(rows)
keyAdapter := w.keyAdapter
for _, pair := range kvs {
w.batchSize += uint64(len(pair.Key) + len(pair.Val))
buf := w.kvBuffer.AllocBytes(keyAdapter.EncodedLen(pair.Key, pair.RowID))
key := keyAdapter.Encode(buf[:0], pair.Key, pair.RowID)
val := w.kvBuffer.AddBytes(pair.Val)
w.writeBatch = append(w.writeBatch, common.KvPair{Key: key, Val: val})
if w.batchSize >= w.memSizeLimit {
if err := w.flushKVs(ctx); err != nil {
return err
}
}
}
return nil
}
// IsSynced implements the backend.EngineWriter interface.
func (w *Writer) IsSynced() bool {
return false
}
// Close closes the writer.
func (w *Writer) Close(ctx context.Context) (backend.ChunkFlushStatus, error) {
if w.closed {
return status(false), errors.Errorf("writer %d has been closed", w.writerID)
}
w.closed = true
defer w.kvBuffer.Destroy()
err := w.flushKVs(ctx)
if err != nil {
return status(false), err
}
logutil.Logger(ctx).Info("close writer",
zap.Int("writerID", w.writerID),
zap.String("minKey", hex.EncodeToString(w.minKey)),
zap.String("maxKey", hex.EncodeToString(w.maxKey)))
w.writeBatch = nil
w.onClose(&WriterSummary{
WriterID: w.writerID,
Seq: w.currentSeq,
Min: w.minKey,
Max: w.maxKey,
TotalSize: w.totalSize,
})
return status(true), nil
}
func (w *Writer) recordMinMax(newMin, newMax tidbkv.Key, size uint64) {
if len(w.minKey) == 0 || newMin.Cmp(w.minKey) < 0 {
w.minKey = newMin.Clone()
}
if len(w.maxKey) == 0 || newMax.Cmp(w.maxKey) > 0 {
w.maxKey = newMax.Clone()
}
w.totalSize += size
}
type status bool
// Flushed implements the backend.ChunkFlushStatus interface.
func (s status) Flushed() bool {
return bool(s)
}
func (w *Writer) flushKVs(ctx context.Context) (err error) {
if len(w.writeBatch) == 0 {
return nil
}
logger := logutil.Logger(ctx)
dataWriter, statWriter, err := w.createStorageWriter(ctx)
if err != nil {
return err
}
ts := time.Now()
var savedBytes uint64
defer func() {
w.currentSeq++
err1, err2 := dataWriter.Close(ctx), statWriter.Close(ctx)
if err != nil {
return
}
if err1 != nil {
logger.Error("close data writer failed", zap.Error(err))
err = err1
return
}
if err2 != nil {
logger.Error("close stat writer failed", zap.Error(err))
err = err2
return
}
logger.Info("flush kv",
zap.Duration("time", time.Since(ts)),
zap.Uint64("bytes", savedBytes),
zap.Any("rate", float64(savedBytes)/1024.0/1024.0/time.Since(ts).Seconds()))
}()
slices.SortFunc(w.writeBatch[:], func(i, j common.KvPair) int {
return bytes.Compare(i.Key, j.Key)
})
w.kvStore, err = NewKeyValueStore(ctx, dataWriter, w.rc, w.writerID, w.currentSeq)
if err != nil {
return err
}
var kvSize uint64
for _, pair := range w.writeBatch {
err = w.kvStore.AddKeyValue(pair.Key, pair.Val)
if err != nil {
return err
}
kvSize += uint64(len(pair.Key)) + uint64(len(pair.Val))
}
w.kvStore.Close()
_, err = statWriter.Write(ctx, w.rc.encode())
if err != nil {
return err
}
w.recordMinMax(w.writeBatch[0].Key, w.writeBatch[len(w.writeBatch)-1].Key, kvSize)
w.writeBatch = w.writeBatch[:0]
w.rc.reset()
w.kvBuffer.Reset()
savedBytes = w.batchSize
w.batchSize = 0
return nil
}
func (w *Writer) createStorageWriter(ctx context.Context) (data, stats storage.ExternalFileWriter, err error) {
dataPath := filepath.Join(w.filenamePrefix, strconv.Itoa(w.currentSeq))
dataWriter, err := w.store.Create(ctx, dataPath, nil)
if err != nil {
return nil, nil, err
}
statPath := filepath.Join(w.filenamePrefix+statSuffix, strconv.Itoa(w.currentSeq))
statsWriter, err := w.store.Create(ctx, statPath, nil)
if err != nil {
return nil, nil, err
}
return dataWriter, statsWriter, nil
}
|
package bslib
var faCachedValues = map[string]string{
"fab fa-500px": "",
"fab fa-accessible-icon": "",
"fab fa-accusoft": "",
"fas fa-address-book": "",
"fas fa-address-card": "",
"fas fa-adjust": "",
"fab fa-adn": "",
"fab fa-adversal": "",
"fab fa-affiliatetheme": "",
"fab fa-algolia": "",
"fas fa-align-center": "",
"fas fa-align-justify": "",
"fas fa-align-left": "",
"fas fa-align-right": "",
"fab fa-amazon": "",
"fas fa-ambulance": "",
"fas fa-american-sign-language-interpreting": "",
"fab fa-amilia": "",
"fas fa-anchor": "",
"fab fa-android": "",
"fab fa-angellist": "",
"fas fa-angle-double-down": "",
"fas fa-angle-double-left": "",
"fas fa-angle-double-right": "",
"fas fa-angle-double-up": "",
"fas fa-angle-down": "",
"fas fa-angle-left": "",
"fas fa-angle-right": "",
"fas fa-angle-up": "",
"fab fa-angrycreative": "",
"fab fa-angular": "",
"fab fa-app-store": "",
"fab fa-app-store-ios": "",
"fab fa-apper": "",
"fab fa-apple": "",
"fab fa-apple-pay": "",
"fas fa-archive": "",
"fas fa-arrow-alt-circle-down": "",
"fas fa-arrow-alt-circle-left": "",
"fas fa-arrow-alt-circle-right": "",
"fas fa-arrow-alt-circle-up": "",
"fas fa-arrow-circle-down": "",
"fas fa-arrow-circle-left": "",
"fas fa-arrow-circle-right": "",
"fas fa-arrow-circle-up": "",
"fas fa-arrow-down": "",
"fas fa-arrow-left": "",
"fas fa-arrow-right": "",
"fas fa-arrow-up": "",
"fas fa-arrows-alt": "",
"fas fa-arrows-alt-h": "",
"fas fa-arrows-alt-v": "",
"fas fa-assistive-listening-systems": "",
"fas fa-asterisk": "",
"fab fa-asymmetrik": "",
"fas fa-at": "",
"fab fa-audible": "",
"fas fa-audio-description": "",
"fab fa-autoprefixer": "",
"fab fa-avianex": "",
"fab fa-aviato": "",
"fab fa-aws": "",
"fas fa-backward": "",
"fas fa-balance-scale": "",
"fas fa-ban": "",
"fab fa-bandcamp": "",
"fas fa-barcode": "",
"fas fa-bars": "",
"fas fa-bath": "",
"fas fa-battery-empty": "",
"fas fa-battery-full": "",
"fas fa-battery-half": "",
"fas fa-battery-quarter": "",
"fas fa-battery-three-quarters": "",
"fas fa-bed": "",
"fas fa-beer": "",
"fab fa-behance": "",
"fab fa-behance-square": "",
"fas fa-bell": "",
"fas fa-bell-slash": "",
"fas fa-bicycle": "",
"fab fa-bimobject": "",
"fas fa-binoculars": "",
"fas fa-birthday-cake": "",
"fab fa-bitbucket": "",
"fab fa-bitcoin": "",
"fab fa-bity": "",
"fab fa-black-tie": "",
"fab fa-blackberry": "",
"fas fa-blind": "",
"fab fa-blogger": "",
"fab fa-blogger-b": "",
"fab fa-bluetooth": "",
"fab fa-bluetooth-b": "",
"fas fa-bold": "",
"fas fa-bolt": "",
"fas fa-bomb": "",
"fas fa-book": "",
"fas fa-bookmark": "",
"fas fa-braille": "",
"fas fa-briefcase": "",
"fab fa-btc": "",
"fas fa-bug": "",
"fas fa-building": "",
"fas fa-bullhorn": "",
"fas fa-bullseye": "",
"fab fa-buromobelexperte": "",
"fas fa-bus": "",
"fab fa-buysellads": "",
"fas fa-calculator": "",
"fas fa-calendar": "",
"fas fa-calendar-alt": "",
"fas fa-calendar-check": "",
"fas fa-calendar-minus": "",
"fas fa-calendar-plus": "",
"fas fa-calendar-times": "",
"fas fa-camera": "",
"fas fa-camera-retro": "",
"fas fa-car": "",
"fas fa-caret-down": "",
"fas fa-caret-left": "",
"fas fa-caret-right": "",
"fas fa-caret-square-down": "",
"fas fa-caret-square-left": "",
"fas fa-caret-square-right": "",
"fas fa-caret-square-up": "",
"fas fa-caret-up": "",
"fas fa-cart-arrow-down": "",
"fas fa-cart-plus": "",
"fab fa-cc-amex": "",
"fab fa-cc-apple-pay": "",
"fab fa-cc-diners-club": "",
"fab fa-cc-discover": "",
"fab fa-cc-jcb": "",
"fab fa-cc-mastercard": "",
"fab fa-cc-paypal": "",
"fab fa-cc-stripe": "",
"fab fa-cc-visa": "",
"fab fa-centercode": "",
"fas fa-certificate": "",
"fas fa-chart-area": "",
"fas fa-chart-bar": "",
"fas fa-chart-line": "",
"fas fa-chart-pie": "",
"fas fa-check": "",
"fas fa-check-circle": "",
"fas fa-check-square": "",
"fas fa-chevron-circle-down": "",
"fas fa-chevron-circle-left": "",
"fas fa-chevron-circle-right": "",
"fas fa-chevron-circle-up": "",
"fas fa-chevron-down": "",
"fas fa-chevron-left": "",
"fas fa-chevron-right": "",
"fas fa-chevron-up": "",
"fas fa-child": "",
"fab fa-chrome": "",
"fas fa-circle": "",
"fas fa-circle-notch": "",
"fas fa-clipboard": "",
"fas fa-clock": "",
"fas fa-clone": "",
"fas fa-closed-captioning": "",
"fas fa-cloud": "",
"fas fa-cloud-download-alt": "",
"fas fa-cloud-upload-alt": "",
"fab fa-cloudscale": "",
"fab fa-cloudsmith": "",
"fab fa-cloudversify": "",
"fas fa-code": "",
"fas fa-code-branch": "",
"fab fa-codepen": "",
"fab fa-codiepie": "",
"fas fa-coffee": "",
"fas fa-cog": "",
"fas fa-cogs": "",
"fas fa-columns": "",
"fas fa-comment": "",
"fas fa-comment-alt": "",
"fas fa-comments": "",
"fas fa-compass": "",
"fas fa-compress": "",
"fab fa-connectdevelop": "",
"fab fa-contao": "",
"fas fa-copy": "",
"fas fa-copyright": "",
"fab fa-cpanel": "",
"fab fa-creative-commons": "",
"fas fa-credit-card": "",
"fas fa-crop": "",
"fas fa-crosshairs": "",
"fab fa-css3": "",
"fab fa-css3-alt": "",
"fas fa-cube": "",
"fas fa-cubes": "",
"fas fa-cut": "",
"fab fa-cuttlefish": "",
"fab fa-d-and-d": "",
"fab fa-dashcube": "",
"fas fa-database": "",
"fas fa-deaf": "",
"fab fa-delicious": "",
"fab fa-deploydog": "",
"fab fa-deskpro": "",
"fas fa-desktop": "",
"fab fa-deviantart": "",
"fab fa-digg": "",
"fab fa-digital-ocean": "",
"fab fa-discord": "",
"fab fa-discourse": "",
"fab fa-dochub": "",
"fab fa-docker": "",
"fas fa-dollar-sign": "",
"fas fa-dot-circle": "",
"fas fa-download": "",
"fab fa-draft2digital": "",
"fab fa-dribbble": "",
"fab fa-dribbble-square": "",
"fab fa-dropbox": "",
"fab fa-drupal": "",
"fab fa-dyalog": "",
"fab fa-earlybirds": "",
"fab fa-edge": "",
"fas fa-edit": "",
"fas fa-eject": "",
"fas fa-ellipsis-h": "",
"fas fa-ellipsis-v": "",
"fab fa-ember": "",
"fab fa-empire": "",
"fas fa-envelope": "mail,email,e-mail",
"fas fa-envelope-open": "",
"fas fa-envelope-square": "",
"fab fa-envira": "",
"fas fa-eraser": "",
"fab fa-erlang": "",
"fab fa-etsy": "",
"fas fa-euro-sign": "",
"fas fa-exchange-alt": "",
"fas fa-exclamation": "",
"fas fa-exclamation-circle": "",
"fas fa-exclamation-triangle": "",
"fas fa-expand": "",
"fas fa-expand-arrows-alt": "",
"fab fa-expeditedssl": "",
"fas fa-external-link-alt": "",
"fas fa-external-link-square-alt": "",
"fas fa-eye": "",
"fas fa-eye-dropper": "",
"fas fa-eye-slash": "",
"fab fa-facebook": "",
"fab fa-facebook-f": "",
"fab fa-facebook-messenger": "",
"fab fa-facebook-square": "",
"fas fa-fast-backward": "",
"fas fa-fast-forward": "",
"fas fa-fax": "",
"fas fa-female": "",
"fas fa-fighter-jet": "",
"fas fa-file": "",
"fas fa-file-alt": "",
"fas fa-file-archive": "",
"fas fa-file-audio": "",
"fas fa-file-code": "",
"fas fa-file-excel": "",
"fas fa-file-image": "",
"fas fa-file-pdf": "",
"fas fa-file-powerpoint": "",
"fas fa-file-video": "",
"fas fa-file-word": "",
"fas fa-film": "",
"fas fa-filter": "",
"fas fa-fire": "",
"fas fa-fire-extinguisher": "",
"fab fa-firefox": "",
"fab fa-first-order": "",
"fab fa-firstdraft": "",
"fas fa-flag": "",
"fas fa-flag-checkered": "",
"fas fa-flask": "",
"fab fa-flickr": "",
"fab fa-fly": "",
"fas fa-folder": "",
"fas fa-folder-open": "",
"fas fa-font": "",
"fab fa-font-awesome": "",
"fab fa-font-awesome-alt": "",
"fab fa-font-awesome-flag": "",
"fab fa-fonticons": "",
"fab fa-fonticons-fi": "",
"fab fa-fort-awesome": "",
"fab fa-fort-awesome-alt": "",
"fab fa-forumbee": "",
"fas fa-forward": "",
"fab fa-foursquare": "",
"fab fa-free-code-camp": "",
"fab fa-freebsd": "",
"fas fa-frown": "",
"fas fa-futbol": "",
"fas fa-gamepad": "",
"fas fa-gavel": "",
"fas fa-gem": "",
"fas fa-genderless": "",
"fab fa-get-pocket": "",
"fab fa-gg": "",
"fab fa-gg-circle": "",
"fas fa-gift": "",
"fab fa-git": "",
"fab fa-git-square": "",
"fab fa-github": "",
"fab fa-github-alt": "",
"fab fa-github-square": "",
"fab fa-gitkraken": "",
"fab fa-gitlab": "",
"fab fa-gitter": "",
"fas fa-glass-martini": "",
"fab fa-glide": "",
"fab fa-glide-g": "",
"fas fa-globe": "",
"fab fa-gofore": "",
"fab fa-goodreads": "",
"fab fa-goodreads-g": "",
"fab fa-google": "",
"fab fa-google-drive": "",
"fab fa-google-play": "",
"fab fa-google-plus": "",
"fab fa-google-plus-g": "",
"fab fa-google-plus-square": "",
"fab fa-google-wallet": "",
"fas fa-graduation-cap": "",
"fab fa-gratipay": "",
"fab fa-grav": "",
"fab fa-gripfire": "",
"fab fa-grunt": "",
"fab fa-gulp": "",
"fas fa-h-square": "",
"fab fa-hacker-news": "",
"fab fa-hacker-news-square": "",
"fas fa-hand-lizard": "",
"fas fa-hand-paper": "",
"fas fa-hand-peace": "",
"fas fa-hand-point-down": "",
"fas fa-hand-point-left": "",
"fas fa-hand-point-right": "",
"fas fa-hand-point-up": "",
"fas fa-hand-pointer": "",
"fas fa-hand-rock": "",
"fas fa-hand-scissors": "",
"fas fa-hand-spock": "",
"fas fa-handshake": "",
"fas fa-hashtag": "",
"fas fa-hdd": "",
"fas fa-heading": "",
"fas fa-headphones": "",
"fas fa-heart": "",
"fas fa-heartbeat": "",
"fab fa-hire-a-helper": "",
"fas fa-history": "",
"fas fa-home": "",
"fab fa-hooli": "",
"fas fa-hospital": "",
"fab fa-hotjar": "",
"fas fa-hourglass": "",
"fas fa-hourglass-end": "",
"fas fa-hourglass-half": "",
"fas fa-hourglass-start": "",
"fab fa-houzz": "",
"fab fa-html5": "",
"fab fa-hubspot": "",
"fas fa-i-cursor": "",
"fas fa-id-badge": "",
"fas fa-id-card": "",
"fas fa-image": "",
"fas fa-images": "",
"fab fa-imdb": "",
"fas fa-inbox": "",
"fas fa-indent": "",
"fas fa-industry": "",
"fas fa-info": "",
"fas fa-info-circle": "",
"fab fa-instagram": "",
"fab fa-internet-explorer": "",
"fab fa-ioxhost": "",
"fas fa-italic": "",
"fab fa-itunes": "",
"fab fa-itunes-note": "",
"fab fa-jenkins": "",
"fab fa-joget": "",
"fab fa-joomla": "",
"fab fa-js": "",
"fab fa-js-square": "",
"fab fa-jsfiddle": "",
"fas fa-key": "pass,password",
"fas fa-keyboard": "",
"fab fa-keycdn": "",
"fab fa-kickstarter": "",
"fab fa-kickstarter-k": "",
"fas fa-language": "",
"fas fa-laptop": "",
"fab fa-laravel": "",
"fab fa-lastfm": "",
"fab fa-lastfm-square": "",
"fas fa-leaf": "",
"fab fa-leanpub": "",
"fas fa-lemon": "",
"fab fa-less": "",
"fas fa-level-down-alt": "",
"fas fa-level-up-alt": "",
"fas fa-life-ring": "",
"fas fa-lightbulb": "",
"fab fa-line": "",
"fas fa-link": "",
"fab fa-linkedin": "",
"fab fa-linkedin-in": "",
"fab fa-linode": "",
"fab fa-linux": "",
"fas fa-lira-sign": "",
"fas fa-list": "",
"fas fa-list-alt": "",
"fas fa-list-ol": "",
"fas fa-list-ul": "",
"fas fa-location-arrow": "",
"fas fa-lock": "password",
"fas fa-lock-open": "",
"fas fa-long-arrow-alt-down": "",
"fas fa-long-arrow-alt-left": "",
"fas fa-long-arrow-alt-right": "",
"fas fa-long-arrow-alt-up": "",
"fas fa-low-vision": "",
"fab fa-lyft": "",
"fab fa-magento": "",
"fas fa-magic": "",
"fas fa-magnet": "",
"fas fa-male": "",
"fas fa-map": "",
"fas fa-map-marker": "",
"fas fa-map-marker-alt": "",
"fas fa-map-pin": "",
"fas fa-map-signs": "",
"fas fa-mars": "",
"fas fa-mars-double": "",
"fas fa-mars-stroke": "",
"fas fa-mars-stroke-h": "",
"fas fa-mars-stroke-v": "",
"fab fa-maxcdn": "",
"fab fa-medapps": "",
"fab fa-medium": "",
"fab fa-medium-m": "",
"fas fa-medkit": "",
"fab fa-medrt": "",
"fab fa-meetup": "",
"fas fa-meh": "",
"fas fa-mercury": "",
"fas fa-microchip": "",
"fas fa-microphone": "",
"fas fa-microphone-slash": "",
"fab fa-microsoft": "",
"fas fa-minus": "",
"fas fa-minus-circle": "",
"fas fa-minus-square": "",
"fab fa-mix": "",
"fab fa-mixcloud": "",
"fab fa-mizuni": "",
"fas fa-mobile": "",
"fas fa-mobile-alt": "",
"fab fa-modx": "",
"fab fa-monero": "",
"fas fa-money-bill-alt": "",
"fas fa-moon": "",
"fas fa-motorcycle": "",
"fas fa-mouse-pointer": "",
"fas fa-music": "",
"fab fa-napster": "",
"fas fa-neuter": "",
"fas fa-newspaper": "",
"fab fa-nintendo-switch": "",
"fab fa-node": "",
"fab fa-node-js": "",
"fab fa-npm": "",
"fab fa-ns8": "",
"fab fa-nutritionix": "",
"fas fa-object-group": "",
"fas fa-object-ungroup": "",
"fab fa-odnoklassniki": "",
"fab fa-odnoklassniki-square": "",
"fab fa-opencart": "",
"fab fa-openid": "",
"fab fa-opera": "",
"fab fa-optin-monster": "",
"fab fa-osi": "",
"fas fa-outdent": "",
"fab fa-page4": "",
"fab fa-pagelines": "",
"fas fa-paint-brush": "",
"fab fa-palfed": "",
"fas fa-paper-plane": "",
"fas fa-paperclip": "",
"fas fa-paragraph": "",
"fas fa-paste": "",
"fab fa-patreon": "",
"fas fa-pause": "",
"fas fa-pause-circle": "",
"fas fa-paw": "",
"fab fa-paypal": "",
"fas fa-pen-square": "",
"fas fa-pencil-alt": "",
"fas fa-percent": "",
"fab fa-periscope": "",
"fab fa-phabricator": "",
"fab fa-phoenix-framework": "",
"fas fa-phone": "",
"fas fa-phone-square": "",
"fas fa-phone-volume": "",
"fab fa-pied-piper": "",
"fab fa-pied-piper-alt": "",
"fab fa-pied-piper-pp": "",
"fab fa-pinterest": "",
"fab fa-pinterest-p": "",
"fab fa-pinterest-square": "",
"fas fa-plane": "",
"fas fa-play": "",
"fas fa-play-circle": "",
"fab fa-playstation": "",
"fas fa-plug": "",
"fas fa-plus": "",
"fas fa-plus-circle": "",
"fas fa-plus-square": "",
"fas fa-podcast": "",
"fas fa-pound-sign": "",
"fas fa-power-off": "",
"fas fa-print": "",
"fab fa-product-hunt": "",
"fab fa-pushed": "",
"fas fa-puzzle-piece": "",
"fab fa-python": "",
"fab fa-qq": "",
"fas fa-qrcode": "",
"fas fa-question": "",
"fas fa-question-circle": "",
"fab fa-quora": "",
"fas fa-quote-left": "",
"fas fa-quote-right": "",
"fas fa-random": "",
"fab fa-ravelry": "",
"fab fa-react": "",
"fab fa-rebel": "",
"fas fa-recycle": "",
"fab fa-red-river": "",
"fab fa-reddit": "",
"fab fa-reddit-alien": "",
"fab fa-reddit-square": "",
"fas fa-redo": "",
"fas fa-redo-alt": "",
"fas fa-registered": "",
"fab fa-rendact": "",
"fab fa-renren": "",
"fas fa-reply": "",
"fas fa-reply-all": "",
"fab fa-replyd": "",
"fab fa-resolving": "",
"fas fa-retweet": "",
"fas fa-road": "",
"fas fa-rocket": "",
"fab fa-rocketchat": "",
"fab fa-rockrms": "",
"fas fa-rss": "",
"fas fa-rss-square": "",
"fas fa-ruble-sign": "",
"fas fa-rupee-sign": "",
"fab fa-safari": "",
"fab fa-sass": "",
"fas fa-save": "",
"fab fa-schlix": "",
"fab fa-scribd": "",
"fas fa-search": "",
"fas fa-search-minus": "",
"fas fa-search-plus": "",
"fab fa-searchengin": "",
"fab fa-sellcast": "",
"fab fa-sellsy": "",
"fas fa-server": "",
"fab fa-servicestack": "",
"fas fa-share": "",
"fas fa-share-alt": "",
"fas fa-share-alt-square": "",
"fas fa-share-square": "",
"fas fa-shekel-sign": "",
"fas fa-shield-alt": "",
"fas fa-ship": "",
"fab fa-shirtsinbulk": "",
"fas fa-shopping-bag": "",
"fas fa-shopping-basket": "",
"fas fa-shopping-cart": "",
"fas fa-shower": "",
"fas fa-sign-in-alt": "",
"fas fa-sign-language": "",
"fas fa-sign-out-alt": "",
"fas fa-signal": "",
"fab fa-simplybuilt": "",
"fab fa-sistrix": "",
"fas fa-sitemap": "",
"fab fa-skyatlas": "",
"fab fa-skype": "",
"fab fa-slack": "",
"fab fa-slack-hash": "",
"fas fa-sliders-h": "",
"fab fa-slideshare": "",
"fas fa-smile": "",
"fab fa-snapchat": "",
"fab fa-snapchat-ghost": "",
"fab fa-snapchat-square": "",
"fas fa-snowflake": "",
"fas fa-sort": "",
"fas fa-sort-alpha-down": "",
"fas fa-sort-alpha-up": "",
"fas fa-sort-amount-down": "",
"fas fa-sort-amount-up": "",
"fas fa-sort-down": "",
"fas fa-sort-numeric-down": "",
"fas fa-sort-numeric-up": "",
"fas fa-sort-up": "",
"fab fa-soundcloud": "",
"fas fa-space-shuttle": "",
"fab fa-speakap": "",
"fas fa-spinner": "",
"fab fa-spotify": "",
"fas fa-square": "",
"fab fa-stack-exchange": "",
"fab fa-stack-overflow": "",
"fas fa-star": "",
"fas fa-star-half": "",
"fab fa-staylinked": "",
"fab fa-steam": "",
"fab fa-steam-square": "",
"fab fa-steam-symbol": "",
"fas fa-step-backward": "",
"fas fa-step-forward": "",
"fas fa-stethoscope": "",
"fab fa-sticker-mule": "",
"fas fa-sticky-note": "",
"fas fa-stop": "",
"fas fa-stop-circle": "",
"fab fa-strava": "",
"fas fa-street-view": "",
"fas fa-strikethrough": "",
"fab fa-stripe": "",
"fab fa-stripe-s": "",
"fab fa-studiovinari": "",
"fab fa-stumbleupon": "",
"fab fa-stumbleupon-circle": "",
"fas fa-subscript": "",
"fas fa-subway": "",
"fas fa-suitcase": "",
"fas fa-sun": "",
"fab fa-superpowers": "",
"fas fa-superscript": "",
"fab fa-supple": "",
"fas fa-sync": "",
"fas fa-sync-alt": "",
"fas fa-table": "",
"fas fa-tablet": "",
"fas fa-tablet-alt": "",
"fas fa-tachometer-alt": "",
"fas fa-tag": "",
"fas fa-tags": "",
"fas fa-tasks": "",
"fas fa-taxi": "",
"fab fa-telegram": "",
"fab fa-telegram-plane": "",
"fab fa-tencent-weibo": "",
"fas fa-terminal": "",
"fas fa-text-height": "",
"fas fa-text-width": "",
"fas fa-th": "",
"fas fa-th-large": "",
"fas fa-th-list": "",
"fab fa-themeisle": "",
"fas fa-thermometer-empty": "",
"fas fa-thermometer-full": "",
"fas fa-thermometer-half": "",
"fas fa-thermometer-quarter": "",
"fas fa-thermometer-three-quarters": "",
"fas fa-thumbs-down": "",
"fas fa-thumbs-up": "",
"fas fa-thumbtack": "",
"fas fa-ticket-alt": "",
"fas fa-times": "",
"fas fa-times-circle": "",
"fas fa-tint": "",
"fas fa-toggle-off": "",
"fas fa-toggle-on": "",
"fas fa-trademark": "",
"fas fa-train": "",
"fas fa-transgender": "",
"fas fa-transgender-alt": "",
"fas fa-trash": "",
"fas fa-trash-alt": "",
"fas fa-tree": "",
"fab fa-trello": "",
"fab fa-tripadvisor": "",
"fas fa-trophy": "",
"fas fa-truck": "",
"fas fa-tty": "",
"fab fa-tumblr": "",
"fab fa-tumblr-square": "",
"fas fa-tv": "",
"fab fa-twitch": "",
"fab fa-twitter": "",
"fab fa-twitter-square": "",
"fab fa-typo3": "",
"fab fa-uber": "",
"fab fa-uikit": "",
"fas fa-umbrella": "",
"fas fa-underline": "",
"fas fa-undo": "",
"fas fa-undo-alt": "",
"fab fa-uniregistry": "",
"fas fa-universal-access": "",
"fas fa-university": "",
"fas fa-unlink": "",
"fas fa-unlock": "",
"fas fa-unlock-alt": "",
"fab fa-untappd": "",
"fas fa-upload": "",
"fab fa-usb": "",
"fas fa-user": "",
"fas fa-user-circle": "",
"fas fa-user-md": "",
"fas fa-user-plus": "",
"fas fa-user-secret": "",
"fas fa-user-times": "",
"fas fa-users": "",
"fab fa-ussunnah": "",
"fas fa-utensil-spoon": "",
"fas fa-utensils": "",
"fab fa-vaadin": "",
"fas fa-venus": "",
"fas fa-venus-double": "",
"fas fa-venus-mars": "",
"fab fa-viacoin": "",
"fab fa-viadeo": "",
"fab fa-viadeo-square": "",
"fab fa-viber": "",
"fas fa-video": "",
"fab fa-vimeo": "",
"fab fa-vimeo-square": "",
"fab fa-vimeo-v": "",
"fab fa-vine": "",
"fab fa-vk": "",
"fab fa-vnv": "",
"fas fa-volume-down": "",
"fas fa-volume-off": "",
"fas fa-volume-up": "",
"fab fa-vuejs": "",
"fab fa-weibo": "",
"fab fa-weixin": "",
"fab fa-whatsapp": "",
"fab fa-whatsapp-square": "",
"fas fa-wheelchair": "",
"fab fa-whmcs": "",
"fas fa-wifi": "",
"fab fa-wikipedia-w": "",
"fas fa-window-close": "",
"fas fa-window-maximize": "",
"fas fa-window-minimize": "",
"fas fa-window-restore": "",
"fab fa-windows": "",
"fas fa-won-sign": "",
"fab fa-wordpress": "",
"fab fa-wordpress-simple": "",
"fab fa-wpbeginner": "",
"fab fa-wpexplorer": "",
"fab fa-wpforms": "",
"fas fa-wrench": "",
"fab fa-xbox": "",
"fab fa-xing": "",
"fab fa-xing-square": "",
"fab fa-y-combinator": "",
"fab fa-yahoo": "",
"fab fa-yandex": "",
"fab fa-yandex-international": "",
"fab fa-yelp": "",
"fas fa-yen-sign": "",
"fab fa-yoast": "",
"fab fa-youtube": "",
}
|
// Copyright 2022 PingCAP, Inc. Licensed under Apache-2.0.
package utils
import (
"context"
"crypto/tls"
"os"
"sync"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
backuppb "github.com/pingcap/kvproto/pkg/brpb"
"github.com/pingcap/log"
berrors "github.com/pingcap/tidb/br/pkg/errors"
"github.com/pingcap/tidb/br/pkg/logutil"
pd "github.com/tikv/pd/client"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/backoff"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/keepalive"
)
const (
dialTimeout = 30 * time.Second
resetRetryTimes = 3
)
// Pool is a lazy pool of gRPC channels.
// When `Get` called, it lazily allocates new connection if connection not full.
// If it's full, then it will return allocated channels round-robin.
type Pool struct {
mu sync.Mutex
conns []*grpc.ClientConn
next int
cap int
newConn func(ctx context.Context) (*grpc.ClientConn, error)
}
func (p *Pool) takeConns() (conns []*grpc.ClientConn) {
p.mu.Lock()
defer p.mu.Unlock()
p.conns, conns = nil, p.conns
p.next = 0
return conns
}
// Close closes the conn pool.
func (p *Pool) Close() {
for _, c := range p.takeConns() {
if err := c.Close(); err != nil {
log.Warn("failed to close clientConn", zap.String("target", c.Target()), zap.Error(err))
}
}
}
// Get tries to get an existing connection from the pool, or make a new one if the pool not full.
func (p *Pool) Get(ctx context.Context) (*grpc.ClientConn, error) {
p.mu.Lock()
defer p.mu.Unlock()
if len(p.conns) < p.cap {
c, err := p.newConn(ctx)
if err != nil {
return nil, err
}
p.conns = append(p.conns, c)
return c, nil
}
conn := p.conns[p.next]
p.next = (p.next + 1) % p.cap
return conn, nil
}
// NewConnPool creates a new Pool by the specified conn factory function and capacity.
func NewConnPool(capacity int, newConn func(ctx context.Context) (*grpc.ClientConn, error)) *Pool {
return &Pool{
cap: capacity,
conns: make([]*grpc.ClientConn, 0, capacity),
newConn: newConn,
mu: sync.Mutex{},
}
}
type StoreManager struct {
pdClient pd.Client
grpcClis struct {
mu sync.Mutex
clis map[uint64]*grpc.ClientConn
}
keepalive keepalive.ClientParameters
tlsConf *tls.Config
}
func (mgr *StoreManager) GetKeepalive() keepalive.ClientParameters {
return mgr.keepalive
}
// NewStoreManager create a new manager for gRPC connections to stores.
func NewStoreManager(pdCli pd.Client, kl keepalive.ClientParameters, tlsConf *tls.Config) *StoreManager {
return &StoreManager{
pdClient: pdCli,
grpcClis: struct {
mu sync.Mutex
clis map[uint64]*grpc.ClientConn
}{clis: make(map[uint64]*grpc.ClientConn)},
keepalive: kl,
tlsConf: tlsConf,
}
}
func (mgr *StoreManager) PDClient() pd.Client {
return mgr.pdClient
}
func (mgr *StoreManager) getGrpcConnLocked(ctx context.Context, storeID uint64) (*grpc.ClientConn, error) {
failpoint.Inject("hint-get-backup-client", func(v failpoint.Value) {
log.Info("failpoint hint-get-backup-client injected, "+
"process will notify the shell.", zap.Uint64("store", storeID))
if sigFile, ok := v.(string); ok {
file, err := os.Create(sigFile)
if err != nil {
log.Warn("failed to create file for notifying, skipping notify", zap.Error(err))
}
if file != nil {
file.Close()
}
}
time.Sleep(3 * time.Second)
})
store, err := mgr.pdClient.GetStore(ctx, storeID)
if err != nil {
return nil, errors.Trace(err)
}
opt := grpc.WithTransportCredentials(insecure.NewCredentials())
if mgr.tlsConf != nil {
opt = grpc.WithTransportCredentials(credentials.NewTLS(mgr.tlsConf))
}
ctx, cancel := context.WithTimeout(ctx, dialTimeout)
bfConf := backoff.DefaultConfig
bfConf.MaxDelay = time.Second * 3
addr := store.GetPeerAddress()
if addr == "" {
addr = store.GetAddress()
}
conn, err := grpc.DialContext(
ctx,
addr,
opt,
grpc.WithBlock(),
grpc.WithConnectParams(grpc.ConnectParams{Backoff: bfConf}),
grpc.WithKeepaliveParams(mgr.keepalive),
)
cancel()
if err != nil {
return nil, berrors.ErrFailedToConnect.Wrap(err).GenWithStack("failed to make connection to store %d", storeID)
}
return conn, nil
}
func (mgr *StoreManager) WithConn(ctx context.Context, storeID uint64, f func(*grpc.ClientConn)) error {
if ctx.Err() != nil {
return errors.Trace(ctx.Err())
}
mgr.grpcClis.mu.Lock()
defer mgr.grpcClis.mu.Unlock()
if conn, ok := mgr.grpcClis.clis[storeID]; ok {
// Find a cached backup client.
f(conn)
return nil
}
conn, err := mgr.getGrpcConnLocked(ctx, storeID)
if err != nil {
return errors.Trace(err)
}
// Cache the conn.
mgr.grpcClis.clis[storeID] = conn
f(conn)
return nil
}
// ResetBackupClient reset the connection for backup client.
func (mgr *StoreManager) ResetBackupClient(ctx context.Context, storeID uint64) (backuppb.BackupClient, error) {
if ctx.Err() != nil {
return nil, errors.Trace(ctx.Err())
}
mgr.grpcClis.mu.Lock()
defer mgr.grpcClis.mu.Unlock()
if conn, ok := mgr.grpcClis.clis[storeID]; ok {
// Find a cached backup client.
log.Info("Reset backup client", zap.Uint64("storeID", storeID))
err := conn.Close()
if err != nil {
log.Warn("close backup connection failed, ignore it", zap.Uint64("storeID", storeID))
}
delete(mgr.grpcClis.clis, storeID)
}
var (
conn *grpc.ClientConn
err error
)
for retry := 0; retry < resetRetryTimes; retry++ {
conn, err = mgr.getGrpcConnLocked(ctx, storeID)
if err != nil {
log.Warn("failed to reset grpc connection, retry it",
zap.Int("retry time", retry), logutil.ShortError(err))
time.Sleep(time.Duration(retry+3) * time.Second)
continue
}
mgr.grpcClis.clis[storeID] = conn
break
}
if err != nil {
return nil, errors.Trace(err)
}
return backuppb.NewBackupClient(conn), nil
}
// Close closes all client in Mgr.
func (mgr *StoreManager) Close() {
if mgr == nil {
return
}
mgr.grpcClis.mu.Lock()
for _, cli := range mgr.grpcClis.clis {
err := cli.Close()
if err != nil {
log.Error("fail to close Mgr", zap.Error(err))
}
}
mgr.grpcClis.mu.Unlock()
}
func (mgr *StoreManager) TLSConfig() *tls.Config {
if mgr == nil {
return nil
}
return mgr.tlsConf
}
|
package git
import (
"strings"
"testing"
)
func TestPatch(t *testing.T) {
t.Parallel()
repo := createTestRepo(t)
defer cleanupTestRepo(t, repo)
_, originalTreeId := seedTestRepo(t, repo)
originalTree, err := repo.LookupTree(originalTreeId)
checkFatal(t, err)
_, newTreeId := updateReadme(t, repo, "file changed\n")
newTree, err := repo.LookupTree(newTreeId)
checkFatal(t, err)
opts := &DiffOptions{
OldPrefix: "a",
NewPrefix: "b",
}
diff, err := repo.DiffTreeToTree(originalTree, newTree, opts)
checkFatal(t, err)
patch, err := diff.Patch(0)
checkFatal(t, err)
patchStr, err := patch.String()
checkFatal(t, err)
if strings.Index(patchStr, "diff --git a/README b/README\nindex 257cc56..820734a 100644\n--- a/README\n+++ b/README\n@@ -1 +1 @@\n-foo\n+file changed") == -1 {
t.Fatalf("patch was bad")
}
}
|
package pythagorean
type Triplet [3]int
func Range(min, max int) []Triplet {
slice := []Triplet{}
for a := min; a <= max; a++ {
for b := a; b <= max; b++ {
sumOfSquares := a * a + b * b
root := squareRoot(sumOfSquares)
if min <= root && root <= max && root * root == sumOfSquares {
slice = append(slice, Triplet{a, b, root})
}
}
}
return slice
}
func isSquare(n int) bool {
root := squareRoot(n)
return root * root == n
}
func squareRoot(n int) int {
root := 1
for root * root < n {
root++
}
return root
}
func Sum(p int) []Triplet {
slice := []Triplet{}
for a := 1; a <= p; a++ {
for b := a; b <= p - a; b++ {
if a * a + b * b == (p - a - b) * (p - a - b) {
slice = append(slice, Triplet{a, b, p - a - b})
}
}
}
return slice
}
|
package MonitorInfo
type InterfacesInfo struct {
BandwidthMaps map[string]BandwidthInfo `json:"bandwidthMaps"`
RemoteIpMaps map[string]string `json:"remoteIpMaps,omitempty"`
}
type BandwidthInfo struct {
Rx float64 `json:"rx"`
Tx float64 `json:"tx"`
}
type ConnectionInfo struct {
PacketLoss float64 `json:"loss"` // 0~100%
DelayTime float64 `json:"delay"` // ms
}
type UPFMonitorData struct {
// CpuUsage float64 `json:"cpuUsage"` // 0~100%
PacketRates map[string]BandwidthInfo `json:"packetRate"` // interfaceName -> Mbps
ConnectionInfos map[string]*ConnectionInfo `json:"connections"` // Link's Remote IP -> Traffic info
}
|
package main
import (
"bytes"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"flag"
"io/ioutil"
"math/big"
"strings"
"time"
)
var (
extKeyUsage = flag.String("extKeyUsage", "server", "server or client")
)
func run(name string) error {
key, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return err
}
serialNumber, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))
if err != nil {
return err
}
now := time.Now()
template := &x509.Certificate{
SerialNumber: serialNumber,
NotBefore: now,
NotAfter: now.AddDate(1, 0, 0),
Subject: pkix.Name{CommonName: name},
BasicConstraintsValid: true,
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment,
}
switch strings.ToLower(*extKeyUsage) {
case "client":
template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}
case "server":
template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}
}
cert, err := x509.CreateCertificate(rand.Reader, template, template, &key.PublicKey, key)
if err != nil {
return err
}
// key in der format
err = ioutil.WriteFile(name+".key", x509.MarshalPKCS1PrivateKey(key), 0600)
if err != nil {
return err
}
// cert in der format
err = ioutil.WriteFile(name+".crt", cert, 0666)
if err != nil {
return err
}
buf := &bytes.Buffer{}
b, err := x509.MarshalPKCS8PrivateKey(key)
if err != nil {
return err
}
err = pem.Encode(buf, &pem.Block{Type: "PRIVATE KEY", Bytes: b})
if err != nil {
return err
}
err = pem.Encode(buf, &pem.Block{Type: "CERTIFICATE", Bytes: cert})
if err != nil {
return err
}
// key and cert in PKCS#8 PEM format for Azure Key Vault.
return ioutil.WriteFile(name+".pem", buf.Bytes(), 0600)
}
func main() {
flag.Parse()
if err := run(flag.Arg(0)); err != nil {
panic(err)
}
}
|
package handlers
import (
"encoding/json"
"github.com/roger-king/go-ecommerce/pkg/models"
"github.com/roger-king/go-ecommerce/pkg/utilities"
"net/http"
)
func FindProductsController(w http.ResponseWriter, req *http.Request) {
products, err := models.AllProducts()
if err != nil {
utilities.RespondWithError(w, http.StatusBadRequest, "Error getting products")
}
// TODO: Handle Errors on Service side.
utilities.RespondWithJSON(w, http.StatusOK, products)
return
}
func CreateProductsController(w http.ResponseWriter, req *http.Request) {
var p models.Product
decoder := json.NewDecoder(req.Body)
if err := decoder.Decode(&p); err != nil {
utilities.RespondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer req.Body.Close()
models.CreateProduct(p)
utilities.RespondWithJSON(w, http.StatusCreated, p)
}
|
package stuff
import (
"fmt"
"log"
"os"
"strings"
"github.com/PuerkitoBio/goquery"
)
var _ Scraper = PhoneScoopScraper
var PhoneScoopScraper = func() []Vitalstats {
allDevices := make([]Vitalstats, 0, 2500)
doc, err := goquery.NewDocument("http://www.phonescoop.com/phones/index_all.php")
if err != nil {
log.Fatal(err)
}
devicesSelection := doc.Find("p.phone")
devicesCount := len(devicesSelection.Nodes)
devicesSelection.Each(func(i int, s *goquery.Selection) {
vitals := Vitalstats{}
vitals.Name = s.Find("a").Text()
vitals.Link = s.Find("a").AttrOr("href", "")
doc2, err := goquery.NewDocument("http://www.phonescoop.com/phones/" + vitals.Link)
if err != nil {
log.Fatal(err)
}
doc2.Find("div#content table.hgrid tr").Each(func(_ int, s *goquery.Selection) {
// assume two columns; first is labels, second is values.
rowCells := s.Find("td")
key := strings.TrimSpace(rowCells.First().Text())
val := strings.TrimSpace(rowCells.Next().Text())
// fmt.Fprintf(os.Stderr, ">>> k: %q ;; v: %q\n", key, val)
switch key {
case "Battery":
vitals.Power = val
}
// NOT DETECTABLE HERE:
// - release date
// - simple "phablet" labels (though as we've discovered, that's pretty crapshoot anyway)
// - CM version
})
// attempt to normalize battery type reports. phonescoop is pretty consistent, but also freetexts it.
switch {
case strings.Contains(vitals.Power, "Non-removable"):
vitals.BatteryRem = "no"
case strings.Contains(vitals.Power, "Removable"):
vitals.BatteryRem = "yes"
default:
fmt.Fprintf(os.Stderr, "> couldn't detect battery removability from %q\n", vitals.Power)
vitals.BatteryRem = "unk"
}
allDevices = append(allDevices, vitals)
fmt.Fprintf(os.Stderr, "scanned device %d/%d\n", i, devicesCount)
})
return allDevices
}
|
package statsd
import (
"testing"
"github.com/atlassian/gostatsd"
"github.com/atlassian/gostatsd/pb"
"github.com/stretchr/testify/require"
)
func TestHttpForwarderV2Translation(t *testing.T) {
t.Parallel()
metrics := []*gostatsd.Metric{
{
Name: "TestHttpForwarderTranslation.gauge",
Value: 12345,
Tags: gostatsd.Tags{"TestHttpForwarderTranslation.gauge.tag1", "TestHttpForwarderTranslation.gauge.tag2"},
Hostname: "TestHttpForwarderTranslation.gauge.host",
Rate: 1,
Type: gostatsd.GAUGE,
},
{
Name: "TestHttpForwarderTranslation.gaugerate",
Value: 12346,
Tags: gostatsd.Tags{"TestHttpForwarderTranslation.gaugerate.tag1", "TestHttpForwarderTranslation.gaugerate.tag2"},
Hostname: "TestHttpForwarderTranslation.gaugerate.host",
Rate: 0.1, // ignored
Type: gostatsd.GAUGE,
},
{
Name: "TestHttpForwarderTranslation.counter",
Value: 12347,
Tags: gostatsd.Tags{"TestHttpForwarderTranslation.counter.tag1", "TestHttpForwarderTranslation.counter.tag2"},
Hostname: "TestHttpForwarderTranslation.counter.host",
Rate: 1,
Type: gostatsd.COUNTER,
},
{
Name: "TestHttpForwarderTranslation.counterrate",
Value: 12348,
Tags: gostatsd.Tags{"TestHttpForwarderTranslation.counterrate.tag1", "TestHttpForwarderTranslation.counterrate.tag2"},
Hostname: "TestHttpForwarderTranslation.counterrate.host",
Rate: 0.1, // multiplied out
Type: gostatsd.COUNTER,
},
{
Name: "TestHttpForwarderTranslation.timer",
Value: 12349,
Tags: gostatsd.Tags{"TestHttpForwarderTranslation.timer.tag1", "TestHttpForwarderTranslation.timer.tag2"},
Hostname: "TestHttpForwarderTranslation.timer.host",
Rate: 1,
Type: gostatsd.TIMER,
},
{
Name: "TestHttpForwarderTranslation.timerrate",
Value: 12350,
Tags: gostatsd.Tags{"TestHttpForwarderTranslation.timerrate.tag1", "TestHttpForwarderTranslation.timerrate.tag2"},
Hostname: "TestHttpForwarderTranslation.timerrate.host",
Rate: 0.1, // propagated
Type: gostatsd.TIMER,
},
{
Name: "TestHttpForwarderTranslation.set",
StringValue: "12351",
Tags: gostatsd.Tags{"TestHttpForwarderTranslation.set.tag1", "TestHttpForwarderTranslation.set.tag2"},
Hostname: "TestHttpForwarderTranslation.set.host",
Rate: 1,
Type: gostatsd.SET,
},
{
Name: "TestHttpForwarderTranslation.setrate",
StringValue: "12352",
Tags: gostatsd.Tags{"TestHttpForwarderTranslation.setrate.tag1", "TestHttpForwarderTranslation.setrate.tag2"},
Hostname: "TestHttpForwarderTranslation.setrate.host",
Rate: 0.1, // ignored
Type: gostatsd.SET,
},
}
mm := gostatsd.NewMetricMap()
for _, metric := range metrics {
mm.Receive(metric)
}
pbMetrics := translateToProtobufV2(mm)
expected := &pb.RawMessageV2{
Gauges: map[string]*pb.GaugeTagV2{
"TestHttpForwarderTranslation.gauge": {
TagMap: map[string]*pb.RawGaugeV2{
"TestHttpForwarderTranslation.gauge.tag1,TestHttpForwarderTranslation.gauge.tag2,s:TestHttpForwarderTranslation.gauge.host": {
Tags: []string{"TestHttpForwarderTranslation.gauge.tag1", "TestHttpForwarderTranslation.gauge.tag2"},
Hostname: "TestHttpForwarderTranslation.gauge.host",
Value: 12345,
},
},
},
"TestHttpForwarderTranslation.gaugerate": {
TagMap: map[string]*pb.RawGaugeV2{
"TestHttpForwarderTranslation.gaugerate.tag1,TestHttpForwarderTranslation.gaugerate.tag2,s:TestHttpForwarderTranslation.gaugerate.host": {
Tags: []string{"TestHttpForwarderTranslation.gaugerate.tag1", "TestHttpForwarderTranslation.gaugerate.tag2"},
Hostname: "TestHttpForwarderTranslation.gaugerate.host",
Value: 12346,
},
},
},
},
Counters: map[string]*pb.CounterTagV2{
"TestHttpForwarderTranslation.counter": {
TagMap: map[string]*pb.RawCounterV2{
"TestHttpForwarderTranslation.counter.tag1,TestHttpForwarderTranslation.counter.tag2,s:TestHttpForwarderTranslation.counter.host": {
Tags: []string{"TestHttpForwarderTranslation.counter.tag1", "TestHttpForwarderTranslation.counter.tag2"},
Hostname: "TestHttpForwarderTranslation.counter.host",
Value: 12347,
},
},
},
"TestHttpForwarderTranslation.counterrate": {
TagMap: map[string]*pb.RawCounterV2{
"TestHttpForwarderTranslation.counterrate.tag1,TestHttpForwarderTranslation.counterrate.tag2,s:TestHttpForwarderTranslation.counterrate.host": {
Tags: []string{"TestHttpForwarderTranslation.counterrate.tag1", "TestHttpForwarderTranslation.counterrate.tag2"},
Hostname: "TestHttpForwarderTranslation.counterrate.host",
Value: 123480, // rate is multipled out
},
},
},
},
Timers: map[string]*pb.TimerTagV2{
"TestHttpForwarderTranslation.timer": {
TagMap: map[string]*pb.RawTimerV2{
"TestHttpForwarderTranslation.timer.tag1,TestHttpForwarderTranslation.timer.tag2,s:TestHttpForwarderTranslation.timer.host": {
Tags: []string{"TestHttpForwarderTranslation.timer.tag1", "TestHttpForwarderTranslation.timer.tag2"},
Hostname: "TestHttpForwarderTranslation.timer.host",
SampleCount: 1,
Values: []float64{12349},
},
},
},
"TestHttpForwarderTranslation.timerrate": {
TagMap: map[string]*pb.RawTimerV2{
"TestHttpForwarderTranslation.timerrate.tag1,TestHttpForwarderTranslation.timerrate.tag2,s:TestHttpForwarderTranslation.timerrate.host": {
Tags: []string{"TestHttpForwarderTranslation.timerrate.tag1", "TestHttpForwarderTranslation.timerrate.tag2"},
Hostname: "TestHttpForwarderTranslation.timerrate.host",
SampleCount: 10,
Values: []float64{12350},
},
},
},
},
Sets: map[string]*pb.SetTagV2{
"TestHttpForwarderTranslation.set": {
TagMap: map[string]*pb.RawSetV2{
"TestHttpForwarderTranslation.set.tag1,TestHttpForwarderTranslation.set.tag2,s:TestHttpForwarderTranslation.set.host": {
Tags: []string{"TestHttpForwarderTranslation.set.tag1", "TestHttpForwarderTranslation.set.tag2"},
Hostname: "TestHttpForwarderTranslation.set.host",
Values: []string{"12351"},
},
},
},
"TestHttpForwarderTranslation.setrate": {
TagMap: map[string]*pb.RawSetV2{
"TestHttpForwarderTranslation.setrate.tag1,TestHttpForwarderTranslation.setrate.tag2,s:TestHttpForwarderTranslation.setrate.host": {
Tags: []string{"TestHttpForwarderTranslation.setrate.tag1", "TestHttpForwarderTranslation.setrate.tag2"},
Hostname: "TestHttpForwarderTranslation.setrate.host",
Values: []string{"12352"},
},
},
},
},
}
//require.EqualValues(t, expected, pbMetrics)
require.EqualValues(t, expected.Gauges, pbMetrics.Gauges)
require.EqualValues(t, expected.Counters, pbMetrics.Counters)
require.EqualValues(t, expected.Timers, pbMetrics.Timers)
require.EqualValues(t, expected.Sets, pbMetrics.Sets)
}
func BenchmarkHttpForwarderV2TranslateAll(b *testing.B) {
metrics := []*gostatsd.Metric{}
for i := 0; i < 1000; i++ {
metrics = append(metrics, &gostatsd.Metric{
Name: "bench.metric",
Value: 123.456,
StringValue: "123.456",
Tags: gostatsd.Tags{"tag1", "tag2"},
Hostname: "hostname",
SourceIP: "sourceip",
Timestamp: 10,
Type: 1 + gostatsd.MetricType(i%4), // Use all types
})
}
mm := gostatsd.NewMetricMap()
for _, metric := range metrics {
mm.Receive(metric)
}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
translateToProtobufV2(mm)
}
}
|
// Copyright (c) 2020 twihike. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package structconv
import (
"errors"
"reflect"
"strings"
)
var (
requiredTagValue = "required"
convTagValue = "conv"
)
type fieldInfo struct {
Meta reflect.StructField
Value reflect.Value
Child reflect.Value
ChildOK bool
Collections []reflect.Type
}
type decodeTagInfo struct {
OK bool
Key string
Required bool
Omitted bool
Conv bool
}
// checkStructPtr checks the struct pointer.
func checkStructPtr(structPtr interface{}) (reflect.Value, error) {
pv := reflect.ValueOf(structPtr)
if pv.Kind() != reflect.Ptr {
err := errors.New("structconv: structPtr must be a struct pointer")
return pv, err
}
sv := pv.Elem()
if sv.Kind() != reflect.Struct {
err := errors.New("structconv: structPtr must be a struct pointer")
return sv, err
}
return sv, nil
}
// walkStructFields walks the structure tree, calling walkFn for each field
// in the tree, including root.
func walkStructFields(s reflect.Value, walkFn func(fieldInfo)) {
sv := s
st := sv.Type()
for i := 0; i < sv.NumField(); i++ {
fv := sv.Field(i)
fm := st.Field(i)
if !fv.CanSet() {
continue
}
child, ok := followStruct(fv, false)
collections := followStructCollectionsTypes(fv)
fi := fieldInfo{
Meta: fm,
Value: fv,
Child: child,
ChildOK: ok,
Collections: collections,
}
walkFn(fi)
}
}
func followStruct(fv reflect.Value, init bool) (reflect.Value, bool) {
// Follow the pointer.
v := fv
for v.Kind() == reflect.Ptr {
if v.IsNil() {
if !init {
break
}
if v.Type().Elem().Kind() != reflect.Struct {
break
}
// Initialize struct pointer.
v.Set(reflect.New(v.Type().Elem()))
}
v = v.Elem()
}
if v.Kind() != reflect.Struct {
var v reflect.Value
return v, false
}
return v, true
}
func followStructCollectionsTypes(rv reflect.Value) []reflect.Type {
var collections []reflect.Type
rt := rv.Type()
for {
switch rt.Kind() {
case reflect.Slice, reflect.Array:
collections = append(collections, rt)
case reflect.Ptr:
if rt.Elem().Kind() != reflect.Struct {
return nil
}
case reflect.Struct:
return collections
default:
return nil
}
rt = rt.Elem()
}
}
// initStruct initializes the struct pointer.
func initStruct(structPtr interface{}) error {
sv, err := checkStructPtr(structPtr)
if err != nil {
return err
}
doInitStruct(sv)
return nil
}
func doInitStruct(sv reflect.Value) {
for i := 0; i < sv.NumField(); i++ {
fv := sv.Field(i)
if !fv.CanSet() {
continue
}
if cv, ok := followStruct(fv, true); ok {
doInitStruct(cv)
}
}
}
// parseDecodeTag parses the tag for decoding.
func parseDecodeTag(f reflect.StructField, tagName string) (decodeTagInfo, error) {
var result decodeTagInfo
tagStr, ok := f.Tag.Lookup(tagName)
if !ok {
return result, nil
}
result.OK = true
tags := strings.Split(tagStr, ",")
for i, v := range tags {
if i == 0 {
if v == "-" {
result.Omitted = true
} else {
result.Key = v
}
continue
}
switch v {
case requiredTagValue:
result.Required = true
case convTagValue:
result.Conv = true
}
}
return result, nil
}
|
package routers
import (
"quickstart/controllers"
"github.com/astaxie/beego"
"github.com/astaxie/beego/context"
)
func init() {
beego.Router("/", &controllers.MainController{})
beego.Router("/api/list", &controllers.TestController{}, "*:List")
beego.Router("/person/:last/:first", &controllers.TestController{})
beego.AutoRouter(&controllers.TestController{})
ns :=
beego.NewNamespace("/v1",
beego.NSNamespace("/shop",
beego.NSGet("/:id", func(ctx *context.Context) {
ctx.Output.Body([]byte(ctx.Request.Host))
}),
),
beego.NSNamespace("/order",
beego.NSGet("/:id", func(ctx *context.Context) {
ctx.Output.Body([]byte("orderinfo"))
}),
),
beego.NSNamespace("/crm",
beego.NSGet("/:id", func(ctx *context.Context) {
ctx.Output.Body([]byte("crminfo"))
}),
),
)
beego.AddNamespace(ns)
}
|
package main
import (
"fmt"
"time"
)
type Stopwatch struct {
start, stop time.Time // no need for lap, see mark
mark time.Duration // mark is the duration from the start that the most recent lap was started
laps []time.Duration //
}
// New creates a new stopwatch with starting time offset by
// a user defined value. Negative offsets result in a countdown
// prior to the start of the stopwatch.
func New(offset time.Duration, active bool) *Stopwatch {
var sw Stopwatch
sw.Reset(offset, active)
return &sw
}
// Reset allows the re-use of a Stopwatch instead of creating
// a new one.
func (s *Stopwatch) Reset(offset time.Duration, active bool) {
now := time.Now()
s.start = now.Add(-offset)
if active {
s.stop = time.Time{}
} else {
s.stop = now
}
s.mark = 0
s.laps = nil
}
// Active returns true if the stopwatch is active (counting up)
func (s *Stopwatch) Active() bool {
return s.stop.IsZero()
}
// Stop makes the stopwatch stop counting up
func (s *Stopwatch) Stop() {
if s.Active() {
s.stop = time.Now()
}
}
// Start intiates, or resumes the counting up process
func (s *Stopwatch) Start() {
if !s.Active() {
diff := time.Now().Sub(s.stop)
s.start = s.start.Add(diff)
s.stop = time.Time{}
}
}
// Elapsed time is the time the stopwatch has been active
func (s *Stopwatch) ElapsedTime() time.Duration {
if s.Active() {
return time.Since(s.start)
}
return s.stop.Sub(s.start)
}
// LapTime is the time since the start of the lap
func (s *Stopwatch) LapTime() time.Duration {
return s.ElapsedTime() - s.mark
}
// Lap starts a new lap, and returns the length of
// the previous one.
func (s *Stopwatch) Lap() (lap time.Duration) {
lap = s.ElapsedTime() - s.mark
s.mark = s.ElapsedTime()
s.laps = append(s.laps, lap)
return
}
// Laps returns a slice of completed lap times
func (s *Stopwatch) Laps() (laps []time.Duration) {
laps = make([]time.Duration, len(s.laps))
copy(laps, s.laps)
return
}
func main() {
s := New(-5*time.Second, false)
t1 := time.Tick(time.Second)
t2 := time.After(time.Second * 25 / 10)
var t3 <-chan time.Time
for i := 0; i < 20; i++ {
// at first time instance after start, run lap ticker
if s.ElapsedTime() >= 0 && t3 == nil {
fmt.Println("Starting lap ticker @", s.ElapsedTime())
t3 = time.Tick(time.Second * 175 / 100)
}
select {
case <-t1:
fmt.Printf("Elapsed: %s, Lap: %s, Laps: %s\n", s.ElapsedTime(), s.LapTime(), s.Laps())
case <-t2:
fmt.Println("Starting Stopwatch")
s.Start()
case <-t3:
fmt.Printf("Lap complete at %s, Lap was %s\n", s.ElapsedTime(), s.Lap())
}
}
}
|
package repositories
import (
"io"
"log"
)
func closeConnection(conn io.Closer) {
if err := conn.Close(); err != nil {
// Do we want to crash the application in this case?
log.Fatalln(err)
}
}
|
package main
import (
"encoding/xml"
"io"
"strconv"
"strings"
"time"
"fmt"
)
type Message struct {
Id string
Type string
Timestamp time.Time
Notify []string
}
func main() {
x := `<message from="01234567890@s.whatsapp.net"
id="1339831077-7"
type="chat"
timestamp="1339848755">
<notify xmlns="urn:xmpp:whatsapp"
name="Koen" />
<request xmlns="urn:xmpp:receipts" />
<body>Hello</body>
</message>`
d := xml.NewDecoder(strings.NewReader(x))
messages := []*Message{}
var current *Message
for {
token, err := d.Token()
if err == io.EOF {
break
}
if err != nil {
panic(err)
}
switch t := token.(type) {
case xml.StartElement:
switch t.Name.Local {
case "message":
current = &Message{}
for _, attr := range t.Attr {
switch attr.Name.Local {
case "id":
current.Id = attr.Value
case "type":
current.Type = attr.Value
case "timestamp":
i, _ := strconv.Atoi(attr.Value)
current.Timestamp = time.Unix(int64(i), 0)
}
}
messages = append(messages, current)
case "notify":
for _, attr := range t.Attr {
switch attr.Name.Local {
case "name":
current.Notify = append(current.Notify, attr.Value)
}
}
}
case xml.EndElement:
case xml.CharData:
case xml.Comment:
case xml.ProcInst:
case xml.Directive:
}
}
for _, msg := range messages {
fmt.Println(msg)
}
}
|
package main
import (
"fmt"
"net/http"
"github.com/labstack/echo"
"github.com/labstack/echo/middleware"
"github.com/labstack/echo/engine/standard"
"github.com/marmelab/snake-solver-server/lib"
)
func main() {
type Data struct {
Width int `json:"width"`
Height int `json:"height"`
Snake [][2]int `json:"snake"`
Apple [2]int `json:"apple"`
}
e := echo.New()
e.Use(middleware.CORSWithConfig(middleware.CORSConfig{
AllowOrigins: []string{"http://localhost:9000", "http://0.0.0.0:9000"},
AllowCredentials: true,
}))
e.POST("/", func(c echo.Context) error {
d := new(Data)
if err := c.Bind(d); err != nil {
return err
}
path := computer.GetPath(d.Width, d.Height, d.Snake, d.Apple)
fmt.Println(path)
return c.JSON(http.StatusOK, path)
})
e.Run(standard.New(":1323"))
}
|
package main
import (
bt "../structs/btree"
que "../structs/queue"
"fmt"
)
//打印二叉树节点数据
func PrintTreeLayer(root *bt.BTree, queue *que.SliceQueue) int {
var maxSum, preMaxSum int
if root != nil {
queue.Push(root)
}
for !queue.IsEmpty() {
var ldata, rdata int
childNode := queue.Pop().(*bt.BTree)
fmt.Print(childNode.Data, " ")
if childNode.LeftChild != nil {
queue.Push(childNode.LeftChild)
ldata = childNode.LeftChild.Data.(int)
fmt.Print("left:", ldata, " ")
}
if childNode.RightChild != nil {
queue.Push(childNode.RightChild)
rdata = childNode.RightChild.Data.(int)
fmt.Print("right:", rdata, " ")
}
maxSum = childNode.Data.(int) + ldata + rdata
fmt.Println(maxSum)
if preMaxSum <= maxSum {
preMaxSum = maxSum
}
}
return preMaxSum
}
func main() {
defer func() {
if err := recover(); err != nil {
fmt.Println(err)
}
}()
queue := &que.SliceQueue{Slice: []interface{}{}}
data := []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
fmt.Println("数组:", data)
root := bt.ArrayToBTree(data, 0, len(data)-1)
fmt.Println(PrintTreeLayer(root, queue))
}
|
package database
import (
"log"
"testing"
)
func Test_db(t *testing.T) {
dbSvc := new(DataBaseSvc)
dbSvc.SetConnectionString("driver={SQL Server};server=xa-lsr-jimweiw7\\sqlserver14;database=ACCOUNT_000001_KAIKEI;user id=sa;pwd=xA123456;")
db := dbSvc.CreateDb()
var count int
row := db.QueryRow("select count(*) from bumon")
err := row.Scan(&count)
if err != nil {
log.Println("fetch the counts fail." + err.Error())
}
log.Println("the bumon count is", count)
}
func Test_SqlxDB(t *testing.T) {
dbSvc := &DataBaseSvc{}
dbSvc.SetConnectionString("driver={SQL Server};server=xa-lsr-jimweiw7\\sqlserver14;database=ACCOUNT_000001_KAIKEI;user id=sa;pwd=xA123456;")
db := dbSvc.CreateDb()
var count int
row := db.QueryRow("select count(*) from bumon")
err := row.Scan(&count)
if err != nil {
log.Println("fetch the counts fail." + err.Error())
}
log.Println("the bumon count is", count)
}
|
package main
func main() {
a := [...]int{
1, 2,
}
println(&a, &a[0], &a[1])
}
|
package test
import (
"fmt"
"gengine/builder"
"gengine/context"
"gengine/engine"
"reflect"
"strings"
"testing"
"time"
)
type Container struct {
//此处不带任何字段,就可以确保container指针的函数附带的是无状态函数(因为没有状态共享)
//这样注入的时候也可以少写很多代码
}
// Log
func (c *Container) LogModel() {
}
// nil
func (c *Container) IsNil(a interface{}) bool {
//暂时简写为这个
return reflect.ValueOf(a).IsValid()
}
// String
func (c *Container) EqualsIgnoreCase(left, right string) bool {
return strings.EqualFold(left, right)
}
func (c *Container) HasPrefix(str, prefix string) bool {
return strings.HasPrefix(str, prefix)
}
func (c *Container) HasSuffix(str, suffix string) bool {
return strings.HasSuffix(str, suffix)
}
// Contains
func (c *Container) ContainsForArray(arr []interface{}, item interface{}) bool {
if len(arr) == 0 {
return false
}
for _, value := range arr {
if value == item {
return true
}
}
return false
}
func (c *Container) ContainsForString(str, item string) bool {
return strings.Contains(str, item)
}
// Time
func (c *Container) CurrentTimeOfMs() int64 {
return time.Now().UnixNano() / 1e6
}
func getRequest() Request {
m := make(map[string]Request)
return m["x"]
}
type Request struct {
}
var ruleInitTest = `
rule "rule_init_test" "rule_init" salience 0
begin
println(contextInt["a"])
println(container.CurrentTimeOfMs())
//println(contextInt["b"]) //此处如果是基础类型,如果没有对应的key,也应该返回对应的默认值
println(container.EqualsIgnoreCase("a", "b"))
println(container.IsNil(getRequest()))
end
`
func TestRuleInitTest(t *testing.T) {
contextInt := make(map[string]int)
contextString := make(map[string]string)
contextInt["a"] = 1
dataContext := context.NewDataContext()
dataContext.Add("contextInt", &contextInt)
dataContext.Add("contextString", &contextString)
dataContext.Add("println", fmt.Println)
dataContext.Add("getRequest", getRequest)
//简化为这一行,就可以注入附着的所有函数
container := &Container{}
dataContext.Add("container", container)
ruleBuilder := builder.NewRuleBuilder(dataContext)
err := ruleBuilder.BuildRuleFromString(ruleInitTest)
if err != nil {
panic(err)
}
eng := engine.NewGengine()
err = eng.Execute(ruleBuilder, true)
if err != nil {
panic(err)
}
}
//pool 模式
func TestRuleInitTest_pool(t *testing.T) {
//init和execute是异步的
//init 放置于对象初始化方法中
//execute放置于service中
//init,apis放置一些与请求无关的状态函数
apis := make(map[string]interface{})
apis["println"] = fmt.Println
apis["container"] = &Container{}
apis["getRequest"] = getRequest
pool, e1 := engine.NewGenginePool(1, 2, 1, ruleInitTest, apis)
if e1 != nil {
panic(e1)
}
//这里注入与请求相关的API,可以很好的隔离状态
data := make(map[string]interface{})
contextInt := make(map[string]int)
contextInt["a"] = 1
contextString := make(map[string]string)
data["contextInt"] = &contextInt
data["contextString"] = &contextString
//execute 需要用什么模式,就调用什么方法
e2, _ := pool.ExecuteRulesWithMultiInputWithSpecifiedEM(data)
if e2 != nil {
panic(e2)
}
}
|
package flags
type Flags struct {
Verbose bool
InputPath string
OutputPath string
Clean bool
}
|
// +build !debug
/*
* Copyright (c) 2018 QLC Chain Team
*
* This software is released under the MIT License.
* https://opensource.org/licenses/MIT
*/
package log
import (
"encoding/json"
"fmt"
"path/filepath"
"sync"
"time"
"github.com/qlcchain/go-qlc/common/util"
"github.com/qlcchain/go-qlc/config"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"gopkg.in/natefinch/lumberjack.v2"
)
const (
logfile = "qlc.log"
)
var (
once sync.Once
lumlog lumberjack.Logger
logger, _ = zap.NewDevelopment()
Root *zap.SugaredLogger
)
func init() {
production, _ := zap.NewProduction()
Root = production.Sugar()
}
func InitLog(config *config.Config) error {
var initErr error
once.Do(func() {
logFolder := config.LogDir()
err := util.CreateDirIfNotExist(logFolder)
if err != nil {
initErr = err
}
logfile, _ := filepath.Abs(filepath.Join(logFolder, logfile))
lumlog = lumberjack.Logger{
Filename: logfile,
MaxSize: 10, // megabytesÒ
MaxBackups: 10,
MaxAge: 28, // days
Compress: true,
LocalTime: true,
}
var logCfg zap.Config
err = json.Unmarshal([]byte(`{
"level": "error",
"outputPaths": ["stdout"],
"errorOutputPaths": ["stderr"],
"encoding": "json",
"encoderConfig": {
"messageKey": "message",
"levelKey": "level",
"levelEncoder": "lowercase"
}
}`), &logCfg)
if err != nil {
initErr = err
fmt.Println(err)
}
err = logCfg.Level.UnmarshalText([]byte(config.LogLevel))
if err != nil {
initErr = err
fmt.Println(err)
}
logCfg.EncoderConfig = zap.NewProductionEncoderConfig()
logger, _ = logCfg.Build(zap.Hooks(lumberjackZapHook))
})
return initErr
}
//NewLogger create logger by name
func NewLogger(name string) *zap.SugaredLogger {
return logger.Sugar().Named(name)
}
func lumberjackZapHook(e zapcore.Entry) error {
_, err := lumlog.Write([]byte(fmt.Sprintf("%s %s [%s] %s %s\n", e.Time.Format(time.RFC3339Nano), e.Level.CapitalString(), e.LoggerName, e.Caller.TrimmedPath(), e.Message)))
return err
}
|
package main
import (
"fmt"
)
var arr = []interface{}{
"str", 1, nil,
}
func main() {
fl, ok := arr[1].(int)
fmt.Printf("%s %s", fl, ok)
}
|
package telemetry
import (
"net/http"
"runtime"
"time"
"github.com/pkg/errors"
"github.com/posthog/posthog-go"
uuid "github.com/satori/go.uuid"
"github.com/sirupsen/logrus"
"github.com/batchcorp/plumber-schemas/build/go/protos/opts"
"github.com/batchcorp/plumber/options"
)
const (
APIURL = "https://telemetry.streamdal.com"
)
type ITelemetry interface {
Enqueue(c posthog.Capture) error
}
type Config struct {
Token string
PlumberID string
CLIOptions *opts.CLIOptions
// optional
RoundTripper http.RoundTripper
}
type Telemetry struct {
Client posthog.Client
cfg *Config
log *logrus.Entry
}
type NoopTelemetry struct{}
func New(cfg *Config) (*Telemetry, error) {
if err := validateConfig(cfg); err != nil {
return nil, errors.Wrap(err, "unable to validate config")
}
client, err := posthog.NewWithConfig(cfg.Token, posthog.Config{
Endpoint: APIURL,
Transport: cfg.RoundTripper, // posthog lib will instantiate a default roundtripper if nil
BatchSize: 1,
Interval: 250 * time.Millisecond,
Logger: &NoopLogger{},
})
if err != nil {
return nil, errors.Wrap(err, "unable to create telemetry client")
}
return &Telemetry{
Client: client,
cfg: cfg,
log: logrus.WithField("pkg", "telemetry"),
}, nil
}
func validateConfig(cfg *Config) error {
if cfg == nil {
return errors.New("config cannot be nil")
}
if cfg.Token == "" {
return errors.New("config.Token cannot be empty")
}
if cfg.PlumberID == "" {
return errors.New("config.PlumberID cannot be empty")
}
if cfg.CLIOptions == nil {
return errors.New("CLIOptions cannot be nil")
}
return nil
}
func (t *Telemetry) Enqueue(c posthog.Capture) error {
if c.Event == "" {
err := errors.New("Event cannot be empty")
t.log.Warningf("unable to track analytic event: %s", err)
return err
}
// This should _usually_ be already set to plumber ID
if c.DistinctId == "" {
c.DistinctId = uuid.NewV4().String()
}
if c.Properties == nil {
c.Properties = make(map[string]interface{})
}
if _, ok := c.Properties["debug"]; !ok {
c.Properties["debug"] = t.cfg.CLIOptions.Global.Debug
}
if _, ok := c.Properties["quiet"]; !ok {
c.Properties["debug"] = t.cfg.CLIOptions.Global.Quiet
}
if _, ok := c.Properties["version"]; !ok {
c.Properties["version"] = options.VERSION
}
if _, ok := c.Properties["os"]; !ok {
c.Properties["os"] = runtime.GOOS
}
if _, ok := c.Properties["arch"]; !ok {
c.Properties["arch"] = runtime.GOARCH
}
if _, ok := c.Properties["plumber_id"]; !ok {
c.Properties["plumber_id"] = t.cfg.PlumberID
}
err := t.Client.Enqueue(c)
if err != nil {
t.log.Warningf("unable to send telemetry event: %s", err)
return errors.Wrap(err, "unable to send telemetry event")
}
return nil
}
func (t *NoopTelemetry) Enqueue(_ posthog.Capture) error {
return nil
}
|
package hi
import (
//"../hello"
"fmt"
"math"
"math/rand"
//"os"
)
var print = fmt.Println
//var printAll = fmt.Println
//
//func test(name string, age int) (helloName string, helloAge int) {
// return name + string(age), age
//}
//
//func returnInt() int {
// return 1
//}
//
//func helloTest(name string, age int)(returnName string, returnInt int) {
// return name, age
//}
//
//func result() (value int) {
// const name = "hello world"
// fmt.Println(name)
// age := 1
// _ = age
// return 1
//}
//
//func init() {
// fmt.Println("init package")
// fmt.Println(returnInt())
//}
//func hi() {
//const (
// a = iota
// b
// c
// d
// e
//)
//
//fmt.Println(result())
//hello.SayHello()
//helloTest("hello", 1)
//printAll("hello world")
//fmt.Println(a)
//
//a, b := 1, "hello"
//fmt.Println(a)
//fmt.Println(b)
//var c int
//c = 1
//fmt.Println(c)
// fmt.Println(os.Getuid())
// fmt.Println(os.Getenv("GOOS"))
// fmt.Printf("%s hhaha %d", "hello", 1)
// test := 1
// fmt.Println(test)
//}
//
//var print = fmt.Println
//var a = "G"
//
//func hi() {
// n()
// m()
// n()
//}
//
//func n() { print(a) }
//
//func m() {
// a := "O"
// print(a)
//}
//
//var a string
//
//func hi() {
// a = "G"
// print(a)
// f1()
//}
//
//func f1() {
// a := "O"
// print(a)
// f2()
//}
//
//func f2() {
// print(a)
//}
func main() {
a, b := 1, 1
var c = 2.3
print(a == b)
print(c)
print(math.Floor(c))
//var d int32
//var e = 5
//d += 5
//d := 1
//const e = 3
name := 1 + 1i
print(name)
print(math.MaxInt8)
type ByteSize float64
const (
_ = iota // 通过赋值给空白标识符来忽略值
KB ByteSize = 1 << (10 * iota)
MB
GB
TB
PB
EB
ZB
YB
)
fmt.Println(TB)
print(rand.Intn(100))
for i := 0; i < 1000; i++ {
print(i)
}
type Rope string
var role Rope = "hello world"
print(role)
var test Rope = "hi world"
print(test + role)
print(len(test))
}
|
package ast
// HasJoin is an AST node with join table.
type HasJoin interface {
AddJoin(Join)
}
|
package model
import (
"Seaman/utils"
"time"
)
type TplAppDataRoleT struct {
Id int64 `xorm:"pk autoincr BIGINT(20)"`
Name string `xorm:"not null comment('角色名称') VARCHAR(128)"`
Desp string `xorm:"not null comment('角色描述') VARCHAR(128)"`
Status int64 `xorm:"not null comment('状态') BIGINT(20)"`
Defaultin string `xorm:"not null default '0' comment('系统内置标识(0否1是)') CHAR(1)"`
Code string `xorm:"comment('编码') VARCHAR(128)"`
TenantId string `xorm:"comment('多租户ID') VARCHAR(32)"`
AppName string `xorm:"not null comment('应用名') VARCHAR(32)"`
AppScope string `xorm:"comment('系统群名') VARCHAR(32)"`
CreateDate time.Time `xorm:"comment('创建时间') DATETIME"`
LastUpdateDate time.Time `xorm:"comment('最后修改时间') DATETIME"`
}
/**
* 将数据库查询出来的结果进行格式组装成request请求需要的json字段格式
*/
func (tplAppDataRoleT *TplAppDataRoleT) tplAppDataRoleTToRespDesc() interface{} {
respInfo := map[string]interface{}{
"id": tplAppDataRoleT.Id,
"name": tplAppDataRoleT.Name,
"desp": tplAppDataRoleT.Desp,
"defaultin": tplAppDataRoleT.Defaultin,
"code": tplAppDataRoleT.Code,
"tenant_id": tplAppDataRoleT.TenantId,
"app_name": tplAppDataRoleT.AppName,
"app_scope": tplAppDataRoleT.AppScope,
"create_date": utils.FormatDatetime(tplAppDataRoleT.CreateDate),
"last_update_date": utils.FormatDatetime(tplAppDataRoleT.LastUpdateDate),
}
return respInfo
}
|
package main
import (
"fmt"
)
type User struct {
Id int
Name, Location string
}
func (u *User) Greetings() string {
return fmt.Sprintf("Hi %s from %s", u.Name, u.Location)
}
type Player struct {
User
GameId int
}
func main() {
p := Player{
User{
Id: 123213,
Name: "Matt",
Location: "LA",
},
123213,
}
fmt.Printf("Id: %d, Name: %s, Location: %s, GameId: %d \n", p.Id, p.Location, p.Location, p.GameId)
fmt.Println(p.Greetings())
}
|
package main
func payloadHandler(data PayloadCollection) {
for _, payload := range data.Payloads {
go payload.UploadToS3() // <----- DON'T DO THIS
}
}
|
package main
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/client_golang/prometheus/promhttp"
"net/http"
"time"
)
func recordMetric() {
for {
counterOps.Inc()
time.Sleep(1*time.Second)
}
}
func recordHistogramMetric() {
for {
histogramOps.With(prometheus.Labels{"code": "200"}).Observe(0.2)
time.Sleep(1*time.Second)
}
}
var(
counterOps = promauto.NewCounter(prometheus.CounterOpts{
Namespace: "try_prometheus",
Name: "counter",
Help: "just counter metric",
})
histogramOps = promauto.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "try_prometheus",
Name: "histogram",
Help: "just histogram",
Buckets: prometheus.DefBuckets,
}, []string{"code"})
httpCounterOps = promauto.NewCounter(prometheus.CounterOpts{
Namespace: "test",
Name: "http",
})
)
func StartPrometheusHandler() {
go func() {
http.Handle("/metrics", promhttp.Handler())
http.ListenAndServe(":2112", nil)
}()
}
func handler(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("hello world"))
httpCounterOps.Inc()
}
func main() {
//prometheus.CounterOpts{
// Name: "counter",
// Help: "just metric for gettinc ounter",
//}
StartPrometheusHandler()
http.HandleFunc("/test", handler)
http.ListenAndServe(":8080", nil)
}
|
package miniporte
import (
"log"
"github.com/cenkalti/backoff"
irc "github.com/fluffle/goirc/client"
"github.com/oz/miniporte/epistoli"
link "github.com/oz/miniporte/link"
)
// Bot stores the state of our bot, and its configuration.
type Bot struct {
Chans []string
Config *irc.Config
Client *irc.Conn
Ctl (chan string)
}
// New initializes a new Bot, ready to connect to IRC.
func New(server, nick, name, ident string, chans []string) *Bot {
cfg := irc.NewConfig(nick)
cfg.SSL = true
cfg.Me.Name = name
cfg.Me.Ident = ident
cfg.Server = server
cfg.NewNick = func(n string) string { return n + "_" }
return &Bot{
Chans: chans,
Config: cfg,
Client: irc.Client(cfg),
Ctl: make(chan string),
}
}
func (b *Bot) joinChannels() {
log.Println("Joining channels", b.Chans)
for _, c := range b.Chans {
b.Client.Join(c)
}
}
func (b *Bot) Run() {
b.initializeHandlers()
go b.Connect()
b.commandLoop()
log.Println("Bot quitting...")
}
func (b *Bot) onMessage(msg *irc.Line) {
// Ignore non-public messages
if !msg.Public() {
return
}
l := link.New(epistoli.New())
if err := l.MustExtract(msg.Text()); err != nil {
// No links.
return
}
l.Tags = append(l.Tags, msg.Nick, msg.Target())
go func() {
if err := l.Save(); err != nil {
if l.Pub {
b.Client.Privmsg(msg.Target(), "Oops! "+err.Error())
}
return
}
if l.Pub {
b.Client.Privmsg(msg.Target(), "Saved!")
}
}()
}
func (b *Bot) initializeHandlers() {
// Connected
b.Client.HandleFunc("connected", func(conn *irc.Conn, line *irc.Line) {
log.Println("Connected!")
b.joinChannels()
})
// Disconnected
b.Client.HandleFunc("disconnected", func(conn *irc.Conn, line *irc.Line) {
log.Println("Disconnected")
b.Ctl <- "disconnected"
})
// PRIVMSG
b.Client.HandleFunc("PRIVMSG", func(conn *irc.Conn, line *irc.Line) {
b.onMessage(line)
})
}
// Connect connect the bot to an IRC server.
func (b *Bot) Connect() error {
log.Println("Connecting to", b.Config.Server)
return b.Client.Connect()
}
func (b *Bot) connect() {
if err := backoff.Retry(b.Connect, backoff.NewExponentialBackOff()); err != nil {
log.Printf("Connection error: %s\n", err)
b.Ctl <- "connection-error"
}
}
// Connection loop
func (b *Bot) commandLoop() {
for {
for cmd := range b.Ctl {
switch cmd {
case "disconnected", "connection-error":
log.Println("Disconnected:", cmd)
go b.connect()
default:
log.Println("Ignoring command", cmd)
}
}
}
}
|
/*
* Copyright 2020-present Open Networking Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package utils
import (
"context"
"sync"
"testing"
"time"
)
func TestRequestQueueOrdering(t *testing.T) {
rq := NewRequestQueue()
// acquire lock immediately, so our requests will queue up
if err := rq.WaitForGreenLight(context.Background()); err != nil {
t.Error(err)
return
}
doneOrder := make([]int, 0, 10)
wg := sync.WaitGroup{}
wg.Add(10)
// queue up 10 requests
for i := 0; i < 10; i++ {
go func(i int) {
if err := rq.WaitForGreenLight(context.Background()); err != nil {
t.Error(err)
}
doneOrder = append(doneOrder, i)
rq.RequestComplete()
wg.Done()
}(i)
// ensure that the last request is queued before starting the next one
time.Sleep(time.Millisecond)
}
// complete the first process
rq.RequestComplete()
wg.Wait()
// verify that the processes completed in the correct order
for i := 0; i < 10; i++ {
if doneOrder[i] != i {
t.Errorf("Thread %d executed at time %d, should have been %d", doneOrder[i], i, doneOrder[i])
}
}
}
func TestRequestQueueCancellation(t *testing.T) {
rq := NewRequestQueue()
// acquire lock immediately, so our requests will queue up
if err := rq.WaitForGreenLight(context.Background()); err != nil {
t.Error(err)
return
}
wg := sync.WaitGroup{}
wg.Add(10)
willCancelContext, cancel := context.WithCancel(context.Background())
// queue up 10 requests
for i := 0; i < 10; i++ {
go func(i int) {
// will cancel processes 0, 1, 4, 5, 8, 9
willCancel := (i/2)%2 == 0
ctx := context.Background()
if willCancel {
ctx = willCancelContext
}
if err := rq.WaitForGreenLight(ctx); err != nil {
if !willCancel || err != context.Canceled {
t.Errorf("wait gave unexpected error %s", err)
} //else cancellation was expected
} else {
if willCancel {
t.Error("this should have been canceled")
} //else completed as expected
rq.RequestComplete()
}
wg.Done()
}(i)
}
// cancel processes
cancel()
// wait a moment for the cancellations to go through
time.Sleep(time.Millisecond)
// release the lock, and allow the processes to complete
rq.RequestComplete()
// wait for all processes to complete
wg.Wait()
}
|
package main
import (
"testing"
)
func BenchmarkInsert(b *testing.B) {
for i := 0; i < b.N; i++ {
b.Run("insert", func(bp *testing.B) { insert() })
}
}
func BenchmarkRead(b *testing.B) {
for i := 0; i < b.N; i++ {
b.Run("read", func(bp *testing.B) { read("ca5bd769-7b2c-40af-9d0b-5e370091ba8a") })
}
}
func BenchmarkParalelInsert(b *testing.B) {
b.RunParallel(func(bt *testing.PB) {
for bt.Next() {
insert()
}
})
}
func BenchmarkParalelRead(b *testing.B) {
b.RunParallel(func(bt *testing.PB) {
for bt.Next() {
read("ca5bd769-7b2c-40af-9d0b-5e370091ba8a")
}
})
}
|
package main
import (
"fmt"
"reflect"
"unsafe"
)
func main() {
str := "123455" //分配到只读内存段
fmt.Println("[]byte str:", []byte(str))
//var sli []byte
//获取字符串底层数组的指针
//strPtr := unsafe.Pointer(&(sli[0]))
//获取str中 Data指针
//p := (*reflect.StringHeader)(unsafe.Pointer(&str))
//获取字符串指向的底层数组地址
// strptr := unsafe.Pointer(p.Data)
// fmt.Printf("p:%p\n", strptr) //str指向的底层数组地址
//fmt.Println("p Len", *(*string)(strptr))
//fmt.Printf("&str[0]:%p", &(str[0])) //&(str[0] not allowed
//将string底层数组指针直接赋给切片
// sli = *(*[]byte)(strptr)
// fmt.Println("sli", sli)
b := StringToSlice(str)
//b[0] = 10 // unexpected fault address 0x4cd587,只读内存段不可写
fmt.Println(b)
b2 := []byte(str)
b2[0] = 10
fmt.Println(b2)
//b[0] = 10
}
func StringToSlice(s string) (b []byte) {
pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b))
pstring := (*reflect.StringHeader)(unsafe.Pointer(&s))
pbytes.Data = pstring.Data //直接将string底层数组指针指向byte
pbytes.Len = pstring.Len
pbytes.Cap = pstring.Len
return
}
|
/*
* Simplified interface for cloning an existing server.
*/
package main
import (
"encoding/hex"
"flag"
"fmt"
"log"
"os"
"path"
"strings"
"time"
"github.com/grrtrr/clcv2"
"github.com/grrtrr/clcv2/clcv2cli"
"github.com/grrtrr/exit"
"github.com/olekukonko/tablewriter"
)
func main() {
var net = flag.String("net", "", "ID or name of the Network to use (if different from source)")
var hwGroup = flag.String("g", "", "UUID or name (if unique) of the HW group to clone this server into")
var primDNS = flag.String("dns1", "8.8.8.8", "Primary DNS to use")
var secDNS = flag.String("dns2", "8.8.4.4", "Secondary DNS to use")
var numCpu = flag.Int("cpu", 0, "Number of Cpus to use (if different from source VM)")
var memGB = flag.Int("memory", 0, "Amount of memory in GB (if different from source VM")
var seed = flag.String("name", "", "The 4-6 character seed for the name of the cloned server")
var desc = flag.String("desc", "", "Description of the cloned server")
var ttl = flag.Duration("ttl", 0, "Time span (counting from time of creation) until server gets deleted")
var extraDrv = flag.Int("drive", 0, "Extra storage (in GB) to add to server as a raw disk")
var wasStopped bool
var maxAttempts = 1
var url, reqID string
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "usage: %s [options] <Source-Server-Name>\n", path.Base(os.Args[0]))
flag.PrintDefaults()
}
flag.Parse()
if flag.NArg() != 1 {
flag.Usage()
os.Exit(0)
}
client, err := clcv2cli.NewCLIClient()
if err != nil {
exit.Fatal(err.Error())
}
// First get the details of the source server
log.Printf("Obtaining details of source server %s ...", flag.Arg(0))
src, err := client.GetServer(flag.Arg(0))
if err != nil {
exit.Fatalf("failed to list details of source server %q: %s", flag.Arg(0), err)
}
if wasStopped = src.Details.PowerState == "stopped"; wasStopped {
// The source server must be powered on
log.Printf("%s is powered-off - powering on ...", src.Name)
reqID, err := client.PowerOnServer(src.Name)
if err != nil {
exit.Fatalf("failed to power on source server %s: %s", src.Name, err)
}
log.Printf("Waiting for %s to power on (status ID: %s) ...", src.Name, reqID)
if _, err = client.AwaitCompletion(reqID); err != nil {
exit.Fatalf("failed to await completion of %s: %s", reqID, err)
}
// When the server is being powered on, it can take up to 5 minutes until
// the backend is able to clone it; it requires the server to be fully booted.
maxAttempts = 9
time.Sleep(1 * time.Minute)
}
// We need the credentials, too
log.Printf("Obtaining %s credentials ...", src.Name)
credentials, err := client.GetServerCredentials(src.Name)
if err != nil {
exit.Fatalf("failed to obtain the credentials of server %q: %s", src.Name, err)
}
req := clcv2.CreateServerReq{
Name: *seed,
Cpu: src.Details.Cpu,
MemoryGB: src.Details.MemoryMb >> 10,
GroupId: src.GroupId,
SourceServerId: src.Name,
PrimaryDns: *primDNS,
SecondaryDns: *secDNS,
Password: credentials.Password,
SourceServerPassword: credentials.Password,
Type: src.Type,
}
if *seed == "" {
if l := len(src.Name); l > 10 { // use same naming as original by default
req.Name = strings.TrimRight(src.Name[7:l-1], "0")
} else {
req.Name = "CLONE"
}
}
if *numCpu != 0 {
req.Cpu = *numCpu
}
if *memGB != 0 {
req.MemoryGB = *memGB
}
if *desc != "" {
req.Description = *desc
} else if src.Description == "" {
req.Description = fmt.Sprintf("Clone of %s", src.Name)
} else {
req.Description = fmt.Sprintf("%s (cloned from %s)", src.Description, src.Name)
}
if *extraDrv != 0 {
req.AdditionalDisks = append(req.AdditionalDisks,
clcv2.ServerAdditionalDisk{SizeGB: uint32(*extraDrv), Type: "raw"})
}
if *ttl != 0 { /* Date/time that the server should be deleted. */
req.Ttl = new(time.Time)
*req.Ttl = time.Now().Add(*ttl)
}
/* hwGroup may be hex uuid or group name */
if *hwGroup != "" {
req.GroupId = *hwGroup
if _, err := hex.DecodeString(*hwGroup); err != nil {
log.Printf("Resolving ID of Hardware Group %q in %s ...", *hwGroup, src.LocationId)
if group, err := client.GetGroupByName(*hwGroup, src.LocationId); err != nil {
exit.Fatalf("failed to resolve group name %q: %s", *hwGroup, err)
} else if group == nil {
exit.Errorf("No group named %q was found in %s", *hwGroup, src.LocationId)
} else {
req.GroupId = group.Id
}
}
}
/* net is supposed to be a (hex) ID, but allow network names, too */
if *net == "" {
log.Printf("Determining network ID used by %s ...", src.Name)
nets, err := client.GetServerNets(src)
if err != nil {
exit.Fatalf("failed to query networks of %s: %s", src.Name, err)
}
if len(nets) == 0 {
// No network information found for the server, even though it has an IP.
// This can happen when the server is owned by a sub-account, and uses a
// network that is owned by the parent account. In this case, the sub-account
// is prevented from querying details of the parent account, due to insufficient
// permission.
if parentAccount := client.RegisteredAccountAlias(); client.AccountAlias != parentAccount {
var savedAlias = client.AccountAlias
log.Printf("Network ID not visible under %q account - trying %q instead ...", savedAlias, parentAccount)
client.AccountAlias = parentAccount
if nets, err = client.GetServerNets(src); err != nil {
exit.Fatalf("failed to query networks of %s using %q account: %s", src.Name, parentAccount, err)
}
// Restore Account Alias for remainder of program
client.AccountAlias = savedAlias
}
if len(nets) == 0 {
log.Printf("Unable to determine Network ID - querying %s deployable networks ...", src.LocationId)
capa, err := client.GetDeploymentCapabilities(src.LocationId)
if err != nil {
exit.Fatalf("failed to determine %s Deployment Capabilities: %s", src.LocationId, err)
}
fmt.Println("Please specify the network ID for the clone manually via -net, using this information:")
table := tablewriter.NewWriter(os.Stdout)
table.SetAutoFormatHeaders(false)
table.SetAlignment(tablewriter.ALIGN_LEFT)
table.SetAutoWrapText(false)
table.SetHeader([]string{"Name", "Type", "Account", "Network ID"})
for _, net := range capa.DeployableNetworks {
table.Append([]string{net.Name, net.Type, net.AccountID, net.NetworkId})
}
table.Render()
os.Exit(0)
}
}
if len(nets) != 1 {
// FIXME: print server networks
exit.Errorf("please specify which network to use (%s uses %d)", src.Name, len(nets))
} else {
log.Printf("Using %s network %s, with gateway %s", nets[0].Type, nets[0].Cidr, nets[0].Gateway)
req.NetworkId = nets[0].Id
}
} else if _, err := hex.DecodeString(*net); err != nil { // not a HEX ID, treat as group name
log.Printf("Resolving network ID of %q in %s ...", *net, src.LocationId)
if netw, err := client.GetNetworkIdByName(*net, src.LocationId); err != nil {
exit.Fatalf("failed to resolve network name %q: %s", *net, err)
} else if netw == nil {
exit.Fatalf("unable to resolve network name %q in %s - maybe use hex ID?", *net, src.LocationId)
} else {
req.NetworkId = netw.Id
}
} else { // HEX ID, use directly
req.NetworkId = *net
}
log.Printf("Cloning %s ...", src.Name)
for i := 1; ; i++ {
url, reqID, err = client.CreateServer(&req)
if err == nil || i == maxAttempts || strings.Index(err.Error(), "body.sourceServerId") > 0 {
break
}
log.Printf("attempt %d/%d failed (%s) - retrying ...", i, maxAttempts, strings.TrimSpace(err.Error()))
time.Sleep(1 * time.Minute)
}
if err != nil {
exit.Fatalf("failed to create server: %s", err)
}
status, err := client.PollStatus(reqID, 5*time.Second)
if err != nil {
exit.Fatalf("failed to poll %s status: %s", reqID, err)
}
server, err := client.GetServerByURI(url)
if err != nil {
log.Fatalf("failed to query server details at %s: %s", url, err)
} else if status == clcv2.Failed {
exit.Fatalf("failed to clone %s (will show up as 'under construction')", server.Name)
}
log.Printf("New server name: %s\n", server.Name)
log.Printf("Server Password: \"%s\"\n", credentials.Password)
}
|
package mysql
import (
"database/sql"
"github.com/Tanibox/tania-core/src/user/repository"
"github.com/Tanibox/tania-core/src/user/storage"
)
type UserAuthRepositoryMysql struct {
DB *sql.DB
}
func NewUserAuthRepositoryMysql(db *sql.DB) repository.UserAuthRepository {
return &UserAuthRepositoryMysql{DB: db}
}
func (s *UserAuthRepositoryMysql) Save(userAuth *storage.UserAuth) <-chan error {
result := make(chan error)
go func() {
total := 0
err := s.DB.QueryRow(`SELECT COUNT(USER_UID)
FROM USER_AUTH WHERE USER_UID = ?`, userAuth.UserUID.Bytes()).Scan(&total)
if err != nil {
result <- err
}
if total > 0 {
_, err := s.DB.Exec(`UPDATE USER_AUTH
SET ACCESS_TOKEN = ?, TOKEN_EXPIRES = ?, CREATED_DATE = ?, LAST_UPDATED = ?
WHERE USER_UID = ?`,
userAuth.AccessToken, userAuth.TokenExpires,
userAuth.CreatedDate, userAuth.LastUpdated,
userAuth.UserUID.Bytes())
if err != nil {
result <- err
}
} else {
_, err := s.DB.Exec(`INSERT INTO USER_AUTH
(USER_UID, ACCESS_TOKEN, TOKEN_EXPIRES, CREATED_DATE, LAST_UPDATED)
VALUES (?,?,?,?,?)`,
userAuth.UserUID.Bytes(), userAuth.AccessToken, userAuth.TokenExpires,
userAuth.CreatedDate, userAuth.LastUpdated)
if err != nil {
result <- err
}
}
result <- nil
close(result)
}()
return result
}
|
// การประกาศตัวแปร
package main
import "fmt"
func main() {
// ตัวแปร myAge รับค่าเป็น int
var myAge int = 200
// ถ้าใช้ := ไม่ต้องประกาศ var
// := คือ รับค่าโดยไม่ต้องกำหนดประเภท
myAge2 := 500
var something bool = true
// age1 = 35 and age2 = 44
age1, age2 := 35, 44
fmt.Println("Value Variable = ", myAge)
fmt.Println("Value Variable = ", myAge2)
fmt.Println("Someting", something)
fmt.Println(age1, age2)
}
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validate
import (
"runtime"
"github.com/kubernetes-sigs/cri-tools/pkg/framework"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
)
// Container test constants
var (
echoHelloCmd []string
sleepCmd []string
checkSleepCmd []string
shellCmd []string
pauseCmd []string
logDefaultCmd []string
loopLogDefaultCmd []string
echoHelloOutput string
checkPathCmd func(string) []string
// Linux defaults
echoHelloLinuxCmd = []string{"echo", "-n", "hello"}
sleepLinuxCmd = []string{"sleep", "4321"}
checkSleepLinuxCmd = []string{"sh", "-c", "pgrep sleep || true"}
shellLinuxCmd = []string{"/bin/sh"}
pauseLinuxCmd = []string{"sh", "-c", "top"}
logDefaultLinuxCmd = []string{"echo", defaultLog}
loopLogDefaultLinuxCmd = []string{"sh", "-c", "while true; do echo " + defaultLog + "; sleep 1; done"}
echoHelloLinuxOutput = "hello"
checkPathLinuxCmd = func(path string) []string { return []string{"ls", "-A", path} }
// Windows defaults
echoHelloWindowsCmd = []string{"powershell", "-c", "echo hello"}
sleepWindowsCmd = []string{"powershell", "-c", "sleep", "4321"}
checkSleepWindowsCmd = []string{"powershell", "-c", "tasklist | findstr sleep; exit 0"}
shellWindowsCmd = []string{"cmd", "/Q"}
pauseWindowsCmd = []string{"powershell", "-c", "ping -t localhost"}
logDefaultWindowsCmd = []string{"powershell", "-c", "echo '" + defaultLog + "'"}
loopLogDefaultWindowsCmd = []string{"powershell", "-c", "while($true) { echo '" + defaultLog + "'; sleep 1; }"}
echoHelloWindowsOutput = "hello\r\n"
checkPathWindowsCmd = func(path string) []string { return []string{"powershell", "-c", "ls", path} }
)
var _ = framework.AddBeforeSuiteCallback(func() {
if runtime.GOOS != "windows" || framework.TestContext.IsLcow {
echoHelloCmd = echoHelloLinuxCmd
sleepCmd = sleepLinuxCmd
checkSleepCmd = checkSleepLinuxCmd
shellCmd = shellLinuxCmd
pauseCmd = pauseLinuxCmd
logDefaultCmd = logDefaultLinuxCmd
loopLogDefaultCmd = loopLogDefaultLinuxCmd
echoHelloOutput = echoHelloLinuxOutput
checkPathCmd = checkPathLinuxCmd
} else {
echoHelloCmd = echoHelloWindowsCmd
sleepCmd = sleepWindowsCmd
checkSleepCmd = checkSleepWindowsCmd
shellCmd = shellWindowsCmd
pauseCmd = pauseWindowsCmd
logDefaultCmd = logDefaultWindowsCmd
loopLogDefaultCmd = loopLogDefaultWindowsCmd
echoHelloOutput = echoHelloWindowsOutput
checkPathCmd = checkPathWindowsCmd
}
})
// Image test constants
const (
testImageUserUID = "gcr.io/cri-tools/test-image-user-uid"
imageUserUID = int64(1002)
testImageUserUsername = "gcr.io/cri-tools/test-image-user-username"
imageUserUsername = "www-data"
testImageUserUIDGroup = "gcr.io/cri-tools/test-image-user-uid-group"
imageUserUIDGroup = int64(1003)
testImageUserUsernameGroup = "gcr.io/cri-tools/test-image-user-username-group"
imageUserUsernameGroup = "www-data"
// Linux defaults
testLinuxImageWithoutTag = "gcr.io/cri-tools/test-image-latest"
testLinuxImageWithTag = "gcr.io/cri-tools/test-image-tag:test"
testLinuxImageWithDigest = "gcr.io/cri-tools/test-image-digest@sha256:9179135b4b4cc5a8721e09379244807553c318d92fa3111a65133241551ca343"
testLinuxImageWithAllReferences = "gcr.io/cri-tools/test-image-tag:all"
// Windows defaults
testWindowsImageWithoutTag = "gcr.io/cri-tools/win-test-image-latest"
testWindowsImageWithTag = "gcr.io/cri-tools/win-test-image-tag:test"
testWindowsImageWithDigest = "gcr.io/cri-tools/win-test-image-digest@sha256:ed127b3a098d6ada53fff1b33ab3ea421dc7ebb06e0c2abded9d3e84bb6842b0"
testWindowsImageWithAllReferences = "gcr.io/cri-tools/win-test-image-tag:all"
)
var (
// image reference without tag
testImageWithoutTag string
// name-tagged reference for test image
testImageWithTag string
// digested reference for test image
testImageWithDigest string
// image used to test all kinds of references.
testImageWithAllReferences string
// image list where different tags refer to different images
testDifferentTagDifferentImageList []string
// image list where different tags refer to the same image
testDifferentTagSameImageList []string
// pod sandbox to use when pulling images
testImagePodSandbox *runtimeapi.PodSandboxConfig
// Linux defaults
testLinuxDifferentTagDifferentImageList = []string{
"gcr.io/cri-tools/test-image-1:latest",
"gcr.io/cri-tools/test-image-2:latest",
"gcr.io/cri-tools/test-image-3:latest",
}
testLinuxDifferentTagSameImageList = []string{
"gcr.io/cri-tools/test-image-tags:1",
"gcr.io/cri-tools/test-image-tags:2",
"gcr.io/cri-tools/test-image-tags:3",
}
// Windows defaults
testWindowsDifferentTagDifferentImageList = []string{
"gcr.io/cri-tools/win-test-image-1:latest",
"gcr.io/cri-tools/win-test-image-2:latest",
"gcr.io/cri-tools/win-test-image-3:latest",
}
testWindowsDifferentTagSameImageList = []string{
"gcr.io/cri-tools/win-test-image-tags:1",
"gcr.io/cri-tools/win-test-image-tags:2",
"gcr.io/cri-tools/win-test-image-tags:3",
}
)
var _ = framework.AddBeforeSuiteCallback(func() {
if runtime.GOOS != "windows" || framework.TestContext.IsLcow {
testImageWithoutTag = testLinuxImageWithoutTag
testImageWithTag = testLinuxImageWithTag
testImageWithDigest = testLinuxImageWithDigest
testImageWithAllReferences = testLinuxImageWithAllReferences
testDifferentTagDifferentImageList = testLinuxDifferentTagDifferentImageList
testDifferentTagSameImageList = testLinuxDifferentTagSameImageList
} else {
testImageWithoutTag = testWindowsImageWithoutTag
testImageWithTag = testWindowsImageWithTag
testImageWithDigest = testWindowsImageWithDigest
testImageWithAllReferences = testWindowsImageWithAllReferences
testDifferentTagDifferentImageList = testWindowsDifferentTagDifferentImageList
testDifferentTagSameImageList = testWindowsDifferentTagSameImageList
}
testImagePodSandbox = &runtimeapi.PodSandboxConfig{
Labels: framework.DefaultPodLabels,
}
})
// Networking test constants
const (
resolvConfigPath = "/etc/resolv.conf"
defaultDNSServer string = "10.10.10.10"
defaultDNSSearch string = "google.com"
defaultDNSOption string = "ndots:8"
webServerContainerPort int32 = 80
// The following host ports must not be in-use when running the test.
webServerHostPortForPortMapping int32 = 12000
webServerHostPortForPortForward int32 = 12001
webServerHostPortForHostNetPortFroward int32 = 12002
// The port used in hostNetNginxImage (See images/hostnet-nginx/)
webServerHostNetContainerPort int32 = 12003
// Linux defaults
webServerLinuxImage = "nginx"
hostNetWebServerLinuxImage = "gcr.io/cri-tools/hostnet-nginx-" + runtime.GOARCH
// Windows defaults
webServerWindowsImage = "mcr.microsoft.com/windows/servercore/iis:windowsservercore-ltsc2019"
hostNetWebServerWindowsImage = "mcr.microsoft.com/windows/servercore/iis:windowsservercore-ltsc2019"
)
var (
webServerImage string
hostNetWebServerImage string
getDNSConfigCmd []string
getDNSConfigContent []string
getHostnameCmd []string
// Linux defaults
getDNSConfigLinuxCmd = []string{"cat", resolvConfigPath}
getDNSConfigLinuxContent = []string{
"nameserver " + defaultDNSServer,
"search " + defaultDNSSearch,
"options " + defaultDNSOption,
}
getHostnameLinuxCmd = []string{"hostname"}
// Windows defaults
// Windows doesn't support ndots options.
// https://kubernetes.io/docs/setup/production-environment/windows/intro-windows-in-kubernetes/#dns-limitations
getDNSConfigWindowsCmd = []string{"powershell", "/c", "ipconfig /all"}
getDNSConfigWindowsContent = []string{
"DNS Servers . . . . . . . . . . . : " + defaultDNSServer,
"DNS Suffix Search List. . . . . . : " + defaultDNSSearch,
}
getHostnameWindowsCmd = []string{"powershell", "/c", "$env:computername"}
)
var _ = framework.AddBeforeSuiteCallback(func() {
if runtime.GOOS != "windows" || framework.TestContext.IsLcow {
webServerImage = webServerLinuxImage
hostNetWebServerImage = hostNetWebServerLinuxImage
getDNSConfigCmd = getDNSConfigLinuxCmd
getDNSConfigContent = getDNSConfigLinuxContent
getHostnameCmd = getHostnameLinuxCmd
} else {
webServerImage = webServerWindowsImage
hostNetWebServerImage = hostNetWebServerWindowsImage
getDNSConfigCmd = getDNSConfigWindowsCmd
getDNSConfigContent = getDNSConfigWindowsContent
getHostnameCmd = getHostnameWindowsCmd
}
})
// Streaming test constants
const (
defaultStreamServerAddress string = "127.0.0.1:10250"
defaultStreamServerScheme string = "http"
// Linux defaults
attachEchoHelloLinuxOutput = "hello"
// Windows defaults
attachEchoHelloWindowsOutput = "hello\r\n\r\nC:\\>"
)
var (
attachEchoHelloOutput string
)
var _ = framework.AddBeforeSuiteCallback(func() {
if runtime.GOOS != "windows" || framework.TestContext.IsLcow {
attachEchoHelloOutput = attachEchoHelloLinuxOutput
} else {
attachEchoHelloOutput = attachEchoHelloWindowsOutput
}
})
|
/*
* @File: models.movie.go
* @Description: Defines Movie information will be returned to the clients
* @Author: Nguyen Truong Duong (seedotech@gmail.com)
*/
package models
import "gopkg.in/mgo.v2/bson"
// Movie information
type Movie struct {
ID bson.ObjectId `bson:"_id" json:"id"`
Name string `bson:"name" json:"name"`
URL string `bson:"url" json:"url"`
CoverImage string `bson:"coverImage" json:"coverImage"`
Description string `bson:"description" json:"description"`
}
// AddMovie information
type AddMovie struct {
Name string `json:"name" example:"Movie Name"`
URL string `json:"url" example:"Movie URL"`
CoverImage string `json:"coverImage" example:"Movie Cover Image"`
Description string `json:"description" example:"Movie Description"`
}
|
package server
import (
"net"
"net/http"
"net/http/httputil"
"net/url"
"time"
)
type override struct {
Header string
Match string
Host string
Path string
}
type config struct {
Path string
Host string
Override override
}
// BindProxy ...
func BindProxy() {
proxy := &httputil.ReverseProxy{
Director: func(r *http.Request) {
URL := r.URL.Query().Get("host")
if URL == "" {
URL = "r4---sn-8p8v-bg0sl.googlevideo.com"
}
parsedURL, _ := url.Parse(URL)
originHost := parsedURL.Host
r.Header.Add("X-Forwarded-Host", r.Host)
r.Header.Add("X-Origin-Host", originHost)
r.Host = originHost
r.URL.Host = originHost
r.URL.Scheme = "https"
},
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 50 * time.Second,
}).Dial,
},
}
http.HandleFunc("/videoplayback", func(w http.ResponseWriter, r *http.Request) {
enableCors(&w, r)
proxy.ServeHTTP(w, r)
})
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.