text
stringlengths 11
4.05M
|
|---|
package streams
type Ticker struct {
CurrencyPair string
Last float64
LowestAsk float64
HighestBid float64
PercentChange float64
BaseVolume float64
QuoteVolume float64
IsFrozen bool
High float64
Low float64
}
|
package middleware
import (
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"net/http"
)
func JaegerMiddleWare(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request){
spanCtx, _ := opentracing.GlobalTracer().Extract(opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(r.Header))
serverSpan := opentracing.GlobalTracer().StartSpan(r.RequestURI, ext.RPCServerOption(spanCtx))
defer serverSpan.Finish()
ext.SpanKindRPCClient.Set(serverSpan)
ext.HTTPUrl.Set(serverSpan, r.URL.String())
ext.HTTPMethod.Set(serverSpan, r.Method)
// Inject the span context into the headers
opentracing.GlobalTracer().Inject(serverSpan.Context(), opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(r.Header))
handler.ServeHTTP(w, r)
})
}
|
package main
import (
"context"
"encoding/json"
"github.com/gin-gonic/gin"
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
openlog "github.com/opentracing/opentracing-go/log"
"strconv"
)
// InitRoutes creates a gin router
func InitRoutes() *gin.Engine {
r := gin.Default()
//pingpong
r.GET("/ping", PingPong)
//redis
r.GET("/api/v1/highscores/:user/:playList/", RouteShowHighScore)
r.GET("/api/v1/highscores/", RouteShowAllHighScore)
return r
}
// RouteShowAllHighScore route to get the global count
func RouteShowAllHighScore(c *gin.Context) {
spanCtx, _ := opentracing.GlobalTracer().Extract(opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(c.Request.Header))
span := opentracing.GlobalTracer().StartSpan("ShowGlobalHighScore", ext.RPCServerOption(spanCtx))
defer span.Finish()
ctx := context.Background()
ctx = opentracing.ContextWithSpan(ctx, span)
span.LogFields(
openlog.String(method, c.Request.Method),
openlog.String(path, c.Request.URL.Path),
openlog.String(host, c.Request.Host),
)
highScores, code := ShowHighScores(ctx, GLOBALNAME, GLOBALLIST, c)
if highScores != nil {
respondWithJSON(ctx, c, code, highScores)
}
span.Finish()
}
// RouteShowHighScore route for returning a user and playlist highscore
func RouteShowHighScore(c *gin.Context) {
spanCtx, _ := opentracing.GlobalTracer().Extract(opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(c.Request.Header))
span := opentracing.GlobalTracer().StartSpan("ShowHighScore", ext.RPCServerOption(spanCtx))
defer span.Finish()
ctx := context.Background()
ctx = opentracing.ContextWithSpan(ctx, span)
span.LogFields(
openlog.String(method, c.Request.Method),
openlog.String(path, c.Request.URL.Path),
openlog.String(host, c.Request.Host),
)
user := c.Param("user")
playlist := c.Param("playList")
highScores, code := ShowHighScores(ctx, user, playlist, c)
if highScores != nil {
respondWithJSON(ctx, c, code, highScores)
}
span.Finish()
}
// respondWithJSON returns formed JSON
func respondWithJSON(ctx context.Context, c *gin.Context, code int, payload interface{}) {
span, _ := opentracing.StartSpanFromContext(ctx, "Response")
span.SetTag(method, "ResponseWriter")
response, _ := json.Marshal(payload)
c.Writer.Header().Set("Content-Type", "application/json")
c.Writer.WriteHeader(code)
c.JSON(code, payload)
span.LogFields(
openlog.String(statusCode, strconv.Itoa(code)),
openlog.String(spanBody, string(response)),
)
defer span.Finish()
}
|
package honeycombio
import (
"context"
"errors"
"fmt"
)
// Triggers describes all the trigger-related methods that the Honeycomb API
// supports.
//
// API docs: https://docs.honeycomb.io/api/triggers/
type Triggers interface {
// List all triggers present in this dataset.
List(ctx context.Context, dataset string) ([]Trigger, error)
// Get a trigger by its ID. Returns ErrNotFound if there is no trigger with
// the given ID in this dataset.
Get(ctx context.Context, dataset string, id string) (*Trigger, error)
// Create a new trigger in this dataset. When creating a new trigger ID
// may not be set.
Create(ctx context.Context, dataset string, t *Trigger) (*Trigger, error)
// Update an existing trigger. Missing (optional) fields will set to their
// respective defaults and not the currently existing values. Except for
// the disabled flag, which will retain its existing value when omitted.
Update(ctx context.Context, dataset string, t *Trigger) (*Trigger, error)
// Delete a trigger from the dataset.
Delete(ctx context.Context, dataset string, id string) error
}
// triggers implements Triggers.
type triggers struct {
client *Client
}
// Compile-time proof of interface implementation by type triggers.
var _ Triggers = (*triggers)(nil)
// Trigger represents a Honeycomb trigger.
//
// API docs: https://docs.honeycomb.io/api/triggers/#fields-on-a-trigger
type Trigger struct {
ID string `json:"id,omitempty"`
// Name of the trigger. This field is required.
Name string `json:"name"`
// Description is displayed on the triggers page.
Description string `json:"description,omitempty"`
// State of the trigger, if disabled is true the trigger will not run.
Disabled bool `json:"disabled,omitempty"`
// Query of the trigger. This field is required. The query must respect the
// properties described with and validated by MatchesTriggerSubset.
// Additionally, time_range of the query can be at most 1 day and may not
// be greater than 4 times the frequency.
Query *QuerySpec `json:"query"`
// Threshold. This fild is required.
Threshold *TriggerThreshold `json:"threshold"`
// Frequency describes how often the trigger should run. Frequency is an
// interval in seconds, defaulting to 900 (15 minutes). Its value must be
// divisible by 60 and between 60 and 86400 (between 1 minute and 1 day).
Frequency int `json:"frequency,omitempty"`
// Recipients are notified when the trigger fires.
Recipients []TriggerRecipient `json:"recipients,omitempty"`
}
// TriggerThreshold represents the threshold of a trigger.
type TriggerThreshold struct {
Op TriggerThresholdOp `json:"op"`
Value float64 `json:"value"`
}
// TriggerThresholdOp the operator of the trigger threshold.
type TriggerThresholdOp string
// Declaration of trigger threshold ops.
const (
TriggerThresholdOpGreaterThan TriggerThresholdOp = ">"
TriggerThresholdOpGreaterThanOrEqual TriggerThresholdOp = ">="
TriggerThresholdOpLessThan TriggerThresholdOp = "<"
TriggerThresholdOpLessThanOrEqual TriggerThresholdOp = "<="
)
// TriggerThresholdOps returns an exhaustive list of trigger threshold ops.
func TriggerThresholdOps() []TriggerThresholdOp {
return []TriggerThresholdOp{
TriggerThresholdOpGreaterThan,
TriggerThresholdOpGreaterThanOrEqual,
TriggerThresholdOpLessThan,
TriggerThresholdOpLessThanOrEqual,
}
}
// TriggerRecipient represents a recipient that will receive a notification
// when the trigger fires.
//
// API docs: https://docs.honeycomb.io/api/triggers/#specifying-recipients
//
// Notes
//
// Recipients of type Slack should be specified by their ID. It is not possible
// to create a new recipient of type Slack using the API. Instead use the ID of
// a recipient of type Slack that was manually added to another trigger.
//
// Recipients of type webhook can be added by their name. If a webhook with
// this name does not exist yet (or if the name contains a typo), the Honeycomb
// API will not complain about this but the webhook will not be valid.
type TriggerRecipient struct {
// ID of the recipient.
ID string `json:"id,omitempty"`
// Type of the recipient.
Type TriggerRecipientType `json:"type"`
// Target of the trigger, this has another meaning depending on type:
// - email: an email address
// - marker: name of the marker
// - PagerDuty: N/A
// - Slack: name of a channel
// - Webhook: name of the webhook
Target string `json:"target,omitempty"`
}
// TriggerRecipientType holds all the possible trigger recipient types.
type TriggerRecipientType string
// Declaration of trigger recipient types
const (
TriggerRecipientTypeEmail TriggerRecipientType = "email"
TriggerRecipientTypeMarker TriggerRecipientType = "marker"
TriggerRecipientTypePagerDuty TriggerRecipientType = "pagerduty"
TriggerRecipientTypeSlack TriggerRecipientType = "slack"
TriggerRecipientTypeWebhook TriggerRecipientType = "webhook"
)
// TriggerRecipientTypes returns an exhaustive list of trigger recipient types.
func TriggerRecipientTypes() []TriggerRecipientType {
return []TriggerRecipientType{
TriggerRecipientTypeEmail,
TriggerRecipientTypeMarker,
TriggerRecipientTypePagerDuty,
TriggerRecipientTypeSlack,
TriggerRecipientTypeWebhook,
}
}
func (s *triggers) List(ctx context.Context, dataset string) ([]Trigger, error) {
var t []Trigger
err := s.client.performRequest(ctx, "GET", "/1/triggers/"+urlEncodeDataset(dataset), nil, &t)
return t, err
}
func (s *triggers) Get(ctx context.Context, dataset string, id string) (*Trigger, error) {
var t Trigger
err := s.client.performRequest(ctx, "GET", fmt.Sprintf("/1/triggers/%s/%s", urlEncodeDataset(dataset), id), nil, &t)
return &t, err
}
func (s *triggers) Create(ctx context.Context, dataset string, data *Trigger) (*Trigger, error) {
var t Trigger
err := s.client.performRequest(ctx, "POST", fmt.Sprintf("/1/triggers/%s", urlEncodeDataset(dataset)), data, &t)
return &t, err
}
func (s *triggers) Update(ctx context.Context, dataset string, data *Trigger) (*Trigger, error) {
var t Trigger
err := s.client.performRequest(ctx, "PUT", fmt.Sprintf("/1/triggers/%s/%s", urlEncodeDataset(dataset), data.ID), data, &t)
return &t, err
}
func (s *triggers) Delete(ctx context.Context, dataset string, id string) error {
return s.client.performRequest(ctx, "DELETE", fmt.Sprintf("/1/triggers/%s/%s", urlEncodeDataset(dataset), id), nil, nil)
}
// MatchesTriggerSubset checks that the given QuerySpec matches the strict
// subset required to be used in a trigger.
//
// The following properties must be valid:
//
// - the query must contain exactly one calculation
// - the HEATMAP calculation may not be used
// - only the following fields may be set: calculations, breakdown, filters, filter_combination and time_range
//
// For more information, refer to https://docs.honeycomb.io/api/triggers/#fields-on-a-trigger
func MatchesTriggerSubset(query *QuerySpec) error {
if len(query.Calculations) != 1 {
return errors.New("a trigger query should contain exactly one calculation")
}
if query.Calculations[0].Op == CalculationOpHeatmap {
return errors.New("a trigger query may not contain a HEATMAP calculation")
}
if query.Orders != nil {
return errors.New("orders is not allowed in a trigger query")
}
if query.Limit != nil {
return errors.New("limit is not allowed in a trigger query")
}
return nil
}
|
package app
import (
"encoding/json"
bam "github.com/cosmos/cosmos-sdk/baseapp"
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/x/auth"
"github.com/cosmos/cosmos-sdk/x/auth/genaccounts"
"github.com/cosmos/cosmos-sdk/x/bank"
"github.com/cosmos/cosmos-sdk/x/genutil"
"github.com/cosmos/cosmos-sdk/x/params"
"github.com/cosmos/cosmos-sdk/x/staking"
"github.com/hschoenburg/demoDapp/x/nameshake"
abci "github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
tmtypes "github.com/tendermint/tendermint/types"
"os"
)
const appName = "nameshake"
var (
DefaultCLIHome = os.ExpandEnv("$HOME/.gaiacli")
DefaultNodeHome = os.ExpandEnv("$HOME/.gaiad")
ModuleBasics sdk.ModuleBasicManager
)
func init() {
ModuleBasics = sdk.NewModuleBasicManager(
genaccounts.AppModuleBasic{},
genutil.AppModuleBasic{},
auth.AppModuleBasic{},
bank.AppModuleBasic{},
staking.AppModuleBasic{},
params.AppModuleBasic{},
)
}
type GenesisState map[string]json.RawMessage
type NameShakeApp struct {
*bam.BaseApp
cdc *codec.Codec
keyMain *sdk.KVStoreKey
keyAccount *sdk.KVStoreKey
keyNS *sdk.KVStoreKey
keyFeeCollection *sdk.KVStoreKey
keyParams *sdk.KVStoreKey
tkeyParams *sdk.TransientStoreKey
accountKeeper auth.AccountKeeper
bankKeeper bank.Keeper
feeCollectionKeeper auth.FeeCollectionKeeper
paramsKeeper params.Keeper
nsKeeper nameshake.Keeper
mm *sdk.ModuleManager
}
func NewNameShakeApp(logger log.Logger, db dbm.DB) *NameShakeApp {
cdc := MakeCodec()
bApp := bam.NewBaseApp(appName, logger, db, auth.DefaultTxDecoder(cdc))
var app = &NameShakeApp{
BaseApp: bApp,
cdc: cdc,
keyMain: sdk.NewKVStoreKey("main"),
keyAccount: sdk.NewKVStoreKey("acc"),
keyNS: sdk.NewKVStoreKey("ns"),
keyFeeCollection: sdk.NewKVStoreKey("fee_colletion"),
keyParams: sdk.NewKVStoreKey("params"),
tkeyParams: sdk.NewTransientStoreKey("transient_params"),
}
//param namespace
authSubSpace := app.paramsKeeper.Subspace(auth.DefaultParamspace)
bankSubSpace := app.paramsKeeper.Subspace(bank.DefaultParamspace)
//stakingSubSpace := app.paramsKeeper.Subspace(staking.DefaultParamspace)
//keepers
app.accountKeeper = auth.NewAccountKeeper(app.cdc, app.keyAccount, authSubSpace, auth.ProtoBaseAccount)
app.bankKeeper = bank.NewBaseKeeper(app.accountKeeper, bankSubSpace, bank.DefaultCodespace)
app.feeCollectionKeeper = auth.NewFeeCollectionKeeper(app.cdc, app.keyFeeCollection)
//stakingKeeper := staking.NewKeeper(app.cdc, app.keyStaking, app.tkeyStaking, app.bankKeeper, stakingSubSpace, staking.DefaultCodespace)
app.nsKeeper = nameshake.NewKeeper(app.bankKeeper, app.keyNS, app.cdc)
app.paramsKeeper = params.NewKeeper(app.cdc, app.keyParams, app.tkeyParams, params.DefaultCodespace)
app.accountKeeper = auth.NewAccountKeeper(
app.cdc,
app.keyAccount,
app.paramsKeeper.Subspace(auth.DefaultParamspace),
auth.ProtoBaseAccount,
)
app.bankKeeper = bank.NewBaseKeeper(
app.accountKeeper,
app.paramsKeeper.Subspace(bank.DefaultParamspace),
bank.DefaultCodespace,
)
app.feeCollectionKeeper = auth.NewFeeCollectionKeeper(cdc, app.keyFeeCollection)
app.mm = sdk.NewModuleManager(
genaccounts.NewAppModule(app.accountKeeper),
//genutil.NewAppModule(app.accountKeeper, app.stakingKeeper, app.BaseApp.DeliverTx),
auth.NewAppModule(app.accountKeeper, app.feeCollectionKeeper),
bank.NewAppModule(app.bankKeeper, app.accountKeeper),
//staking.NewAppModule(app.stakingKeeper, app.feeCollectionKeeper, app.accountKeeper),
nameshake.NewAppModule(app.nsKeeper),
)
app.mm.SetOrderBeginBlockers()
//app.mm.SetOrderEndBlockers(staking.ModuleName)
app.mm.SetOrderInitGenesis(genaccounts.ModuleName, auth.ModuleName, bank.ModuleName, genutil.ModuleName)
app.mm.RegisterRoutes(app.Router(), app.QueryRouter())
app.MountStores(app.keyMain, app.keyAccount, app.keyFeeCollection, app.keyParams, app.tkeyParams)
// what is an antehandler?
app.SetAnteHandler(auth.NewAnteHandler(app.accountKeeper, app.feeCollectionKeeper, auth.DefaultSigVerificationGasConsumer))
app.SetInitChainer(app.InitChainer)
app.MountStores(
app.keyMain,
app.keyAccount,
app.keyNS,
app.keyFeeCollection,
app.keyParams,
app.tkeyParams,
)
err := app.LoadLatestVersion(app.keyMain)
if err != nil {
cmn.Exit(err.Error())
}
return app
}
// is this declared elsewhere?
/*
type GenesisState struct {
AuthData auth.GenesisState `json:"auth"`
BankData bank.GenesisState `json:"bank"`
Accounts []*auth.BaseAccount `json:"accounts"`
}
*/
func (app *NameShakeApp) initChainer(ctx sdk.Context, req abci.RequestInitChain) abci.ResponseInitChain {
stateJSON := req.AppStateBytes
genesisState := new(GenesisState)
err := app.cdc.UnmarshalJSON(stateJSON, genesisState)
if err != nil {
panic(err)
}
/*
for _, acc := range genesisState.Accounts {
acc.AccountNumber = app.accountKeeper.GetNextAccountNumber(ctx)
app.accountKeeper.SetAccount(ctx, acc)
}
*/
//auth.InitGenesis(ctx, app.accountKeeper, app.feeCollectionKeeper, genesisState.AuthData)
//bank.InitGenesis(ctx, app.bankKeeper, genesisState.BankData)
return abci.ResponseInitChain{}
}
func (app *NameShakeApp) ExportAppStateAndValidators(forZeroHeight bool, jailWhiteList []string) (appState json.RawMessage, validators []tmtypes.GenesisValidator, err error) {
ctx := app.NewContext(true, abci.Header{})
accounts := []*auth.BaseAccount{}
appendAccountsFn := func(acc auth.Account) bool {
account := &auth.BaseAccount{
Address: acc.GetAddress(),
Coins: acc.GetCoins(),
}
accounts = append(accounts, account)
return false
}
app.accountKeeper.IterateAccounts(ctx, appendAccountsFn)
genState := app.mm.ExportGenesis(ctx)
appState, err = codec.MarshalJSONIndent(app.cdc, genState)
if err != nil {
return nil, nil, err
}
return appState, validators, err
}
// implement module interface?
func (app *NameShakeApp) BeginBlocker(ctx sdk.Context, req abci.RequestBeginBlock) abci.ResponseBeginBlock {
return app.mm.BeginBlock(ctx, req)
}
func (app *NameShakeApp) EndBlocker(ctx sdk.Context, req abci.RequestEndBlock) abci.ResponseEndBlock {
return app.mm.EndBlock(ctx, req)
}
func (app *NameShakeApp) InitChainer(ctx sdk.Context, req abci.RequestInitChain) abci.ResponseInitChain {
var genesisState GenesisState
app.cdc.MustUnmarshalJSON(req.AppStateBytes, &genesisState)
return app.mm.InitGenesis(ctx, genesisState)
}
func (app *NameShakeApp) LoadHeight(height int64) error {
return app.LoadVersion(height, app.keyMain)
}
func MakeCodec() *codec.Codec {
var cdc = codec.New()
ModuleBasics.RegisterCodec(cdc)
sdk.RegisterCodec(cdc)
codec.RegisterCrypto(cdc)
return cdc
}
|
package aoc2020
import (
"testing"
aoc "github.com/janreggie/aoc/internal"
"github.com/stretchr/testify/assert"
)
func TestDay02(t *testing.T) {
assert := assert.New(t)
testCases := []aoc.TestCase{
{
Details: "Y2020D02 sample input",
Input: day02sampleInput,
Result1: "2",
Result2: "1",
},
{
Details: "Y2020D02 my input",
Input: day02myInput,
Result1: "396",
Result2: "428",
},
}
for _, tt := range testCases {
tt.Test(Day02, assert)
}
}
|
package main
import (
"fmt"
)
func test(data map[int]string) {
// 删除其中一个元素
delete(data, 3)
}
func main() {
data := map[int]string{1: "go", 2: "java", 3: "javascript"}
fmt.Println("调用函数前:data = ", data)
// map类型作函数参数
test(data)
fmt.Println("调用函数后:data = ", data)
// 结果为:
// 调用函数前:data = map[1:go 2:java 3:javascript]
// 调用函数后:data = map[1:go 2:java]
// 结论:
// map作函数参数是引用传递
}
|
package memory
import (
"errors"
"sync"
"time"
"github.com/delgus/def-parser/internal/app"
)
// Cache struct cache
type Cache struct {
sync.RWMutex
items map[string]Item
defaultExpiration time.Duration
cleanupInterval time.Duration
}
// Item struct cache item
type Item struct {
Value *app.Site
Expiration int64
Created time.Time
}
// NewCache Initializing a new memory cache
func NewCache(defaultExpiration, cleanupInterval time.Duration) *Cache {
items := make(map[string]Item)
cache := Cache{
items: items,
defaultExpiration: defaultExpiration,
cleanupInterval: cleanupInterval,
}
cache.StartGC()
return &cache
}
// Set setting a cache by key
func (c *Cache) Set(key string, value *app.Site) {
expiration := time.Now().Add(c.defaultExpiration).UnixNano()
c.Lock()
defer c.Unlock()
c.items[key] = Item{
Value: value,
Expiration: expiration,
Created: time.Now(),
}
}
// Get getting a cache by key
func (c *Cache) Get(key string) (*app.Site, bool) {
c.RLock()
defer c.RUnlock()
item, found := c.items[key]
if !found {
return nil, false
}
if item.Expiration > 0 {
if time.Now().UnixNano() > item.Expiration {
return nil, false
}
}
return item.Value, true
}
// Delete cache by key
func (c *Cache) Delete(key string) error {
c.Lock()
defer c.Unlock()
if _, found := c.items[key]; !found {
return errors.New("key not found")
}
delete(c.items, key)
return nil
}
// StartGC start Garbage Collection
func (c *Cache) StartGC() {
go c.GC()
}
// GC Garbage Collection
func (c *Cache) GC() {
for {
<-time.After(c.cleanupInterval)
if c.items == nil {
return
}
if keys := c.expiredKeys(); len(keys) != 0 {
c.clearItems(keys)
}
}
}
// expiredKeys returns key list which are expired.
func (c *Cache) expiredKeys() (keys []string) {
c.RLock()
defer c.RUnlock()
for k, i := range c.items {
if time.Now().UnixNano() > i.Expiration && i.Expiration > 0 {
keys = append(keys, k)
}
}
return
}
// clearItems removes all the items which key in keys.
func (c *Cache) clearItems(keys []string) {
c.Lock()
defer c.Unlock()
for _, k := range keys {
delete(c.items, k)
}
}
|
--- vendor/github.com/modern-go/reflect2/unsafe_map.go.orig 2022-04-16 22:00:28 UTC
+++ vendor/github.com/modern-go/reflect2/unsafe_map.go
@@ -107,14 +107,6 @@ func (type2 *UnsafeMapType) Iterate(obj interface{}) M
return type2.UnsafeIterate(objEFace.data)
}
-func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator {
- return &UnsafeMapIterator{
- hiter: mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj)),
- pKeyRType: type2.pKeyRType,
- pElemRType: type2.pElemRType,
- }
-}
-
type UnsafeMapIterator struct {
*hiter
pKeyRType unsafe.Pointer
|
package app
import (
"errors"
"fmt"
"github.com/Mrs4s/MiraiGo/client"
"github.com/Mrs4s/MiraiGo/message"
"github.com/balrogsxt/xtbot-go/event"
"github.com/balrogsxt/xtbot-go/util"
"github.com/balrogsxt/xtbot-go/util/cache"
"github.com/balrogsxt/xtbot-go/util/entity"
"github.com/balrogsxt/xtbot-go/util/logger"
"github.com/balrogsxt/xtbot-go/util/msg"
"io/ioutil"
"os"
)
//机器人结构
type QQBot struct {
Handle *client.QQClient //核心QQ客户端协议模块
config entity.UserConfig //登录的用户数据
Cache cache.XtCache //缓存模块
}
//启动
func AppLinkStart() {
config, err := ParseUserConfig()
if err != nil {
ThrowException("登录配置文件处理失败:%s", err.Error())
}
bot := new(QQBot)
bot.Login(config)
}
//解析需要登录的机器人账户
func ParseUserConfig() (entity.UserConfig, error) {
var la entity.UserConfig
file, err := os.Open("./config.json")
if err != nil {
return la, errors.New("打开配置文件失败")
}
b, err := ioutil.ReadAll(file)
if err != nil {
return la, errors.New("读取配置文件失败")
}
if err := util.JsonDecode(string(b), &la); err != nil {
return la, errors.New("解析配置文件失败" + err.Error())
}
return la, nil
}
func (this *QQBot) Login(config entity.UserConfig) {
logger.Info("正在准备尝试登录QQ:[%d]...", config.QQ)
//创建一个新的QQ客户端
this.Handle = client.NewClient(config.QQ, config.Password)
//请求登录
res, err := this.Handle.Login()
if err != nil {
ThrowException("初始化登录客户端失败:%s", err.Error())
}
//判断登录是否需要验证处理
if !res.Success {
switch res.Error {
case client.OtherLoginError:
ThrowException("登录错误:%s【请检查账户密码是否正确】", res.ErrorMessage)
break
case client.SMSNeededError, client.NeedCaptcha, client.SMSOrVerifyNeededError:
fmt.Printf("\n%s\n\n", res.VerifyUrl)
ThrowException("请在浏览器打开验证链接,处理完成后重新启动: %s", res.ErrorMessage)
break
default:
ThrowException("未处理的异常: %s", res.ErrorMessage)
break
}
return
}
this.config = config
logger.Info("QQ:%d 已经登录成功", config.QQ)
logger.Info("已允许接收群组消息列表: %#v", config.AllowGroup)
//初始化缓存模块
this.registerCache()
//开始监听各项数据
this.registerEvent() //调用注册事件
//开启命令行输入进程顺便阻止退出
StartCommand()
}
//注册缓存模块
func (this *QQBot) registerCache() {
//默认使用redis缓存
this.Cache = new(cache.RedisCache)
err := this.Cache.Init(this.config)
if err != nil { //缓存模块初始化失败
ThrowException(err.Error())
}
logger.Info("缓存模块初始化成功")
}
//注册QQ事件
func (this *QQBot) registerEvent() {
//注册群聊消息事件
this.Handle.OnGroupMessage(func(qqClient *client.QQClient, ev *message.GroupMessage) {
isAllow := false
for _, item := range this.config.AllowGroup {
if item == ev.GroupCode {
isAllow = true
break
}
}
if !isAllow {
return
}
this.saveGroupQQ(ev.GroupCode, ev.Sender)
logger.Info("[群聊消息-> %d -> %s] %s", ev.GroupCode, ev.GroupName, ev.ToString())
handle := &msg.GroupHandle{
Handle: qqClient,
Event: ev,
MsgBuild: &msg.GroupMessageBuilder{
MessageBuilder: msg.MessageBuilder{
Handle: qqClient,
Event: ev,
Cache: this.Cache,
},
},
}
event.OnGroupMessageEvent(handle)
})
//注册群聊消息撤回事件
this.Handle.OnGroupMessageRecalled(func(qqClient *client.QQClient, msg *client.GroupMessageRecalledEvent) {
logger.Info("[群聊撤回 -> %d] %d", msg.GroupCode, msg.MessageId)
event.OnGroupMessageRecallEvent(qqClient, msg)
})
//注册私聊消息事件
this.Handle.OnPrivateMessage(func(qqClient *client.QQClient, msg *message.PrivateMessage) {
logger.Info("[私聊消息 -> %d] %s", msg.Sender.Uin, msg.ToString())
event.OnPrivateMessageEvent(qqClient, msg)
})
//断开连接
this.Handle.OnDisconnected(func(qqClient *client.QQClient, disconnectedEvent *client.ClientDisconnectedEvent) {
logger.Info("[断开连接] %s", disconnectedEvent.Message)
})
//更多事件,按需求写...
}
//缓存群内成员数据
func (this *QQBot) saveGroupQQ(groupId int64, sender *message.Sender) {
// todo 暂时收到群成员信息就写入缓存吧...
key := fmt.Sprintf("cache:group:qq:%d", groupId)
field := fmt.Sprintf("%d", sender.Uin)
//if flag := this.Cache.ExistsMap(key,field); !flag{
//写入缓存
this.Cache.SetMap(key, field, util.JsonEncode(&sender))
//}else{
//存在缓存,这个需要永久缓存,但是需要定期更新数据
//}
}
|
package chain
import "github.com/iotaledger/wasp/tools/wasp-cli/log"
func activateCmd(args []string) {
log.Check(MultiClient().ActivateChain(GetCurrentChainID()))
}
func deactivateCmd(args []string) {
log.Check(MultiClient().DeactivateChain(GetCurrentChainID()))
}
|
package index1
func Method1(i int) int {
num := 0
for j := 0; j <= i; j++ {
num += j
}
return num
}
|
package sctransaction
import (
"bytes"
"github.com/iotaledger/wasp/packages/coretypes"
"github.com/iotaledger/wasp/packages/vm/core/root"
"github.com/stretchr/testify/require"
"testing"
)
func TestWriteRead(t *testing.T) {
cid := coretypes.NewContractID(coretypes.ChainID{}, root.Interface.Hname())
rsec := NewRequestSectionByWallet(cid, coretypes.EntryPointInit).WithTransfer(nil)
var buf, buf1 bytes.Buffer
err := rsec.Write(&buf)
require.NoError(t, err)
err = rsec.Read(bytes.NewReader(buf.Bytes()))
require.NoError(t, err)
err = rsec.Write(&buf1)
require.NoError(t, err)
require.EqualValues(t, buf1.Bytes(), buf.Bytes())
}
|
package main
func main() {
}
func longestOnes(nums []int, k int) int {
left, right := 0, 0
zeroCount := 0
mx := 0
for right < len(nums) {
if nums[right] == 0 {
zeroCount++
}
right++
for zeroCount > k {
if nums[left] == 0 {
zeroCount--
}
left++
}
if t := right - left; t > mx {
mx = t
}
}
return mx
}
|
package store
import (
"encoding/json"
"io"
"io/ioutil"
"log"
"net/url"
"os"
"path/filepath"
"strings"
"tetra/lib/dbg"
"time"
)
var (
// writable directory
dat = filepath.Clean(detectDataPath())
// readonly directory, can be ""
res = filepath.Clean(detectResPath())
logfile io.WriteCloser
)
func detectResPath() string {
// TODO: implement
return "testdata"
}
func detectDataPath() string {
// TODO: implement
return "testdata"
}
// a fork writer for write to console and log file at the same time
type forkWriter struct {
w1 io.Writer
w2 io.Writer
}
func (w *forkWriter) Write(b []byte) (int, error) {
w.w2.Write(b)
return w.w1.Write(b)
}
func forkLog() error {
filename := time.Now().Format("log/2006-01-02.log")
var err error
logfile, err = Append(filename)
if err != nil {
log.SetOutput(os.Stderr)
dbg.Logf("failed to append to log file %s\n", filename)
return err
}
//logfile.Write(mmlog.Bytes())
log.SetOutput(&forkWriter{os.Stderr, logfile})
dbg.Logf("redirect log to json %s\n", filename)
return nil
}
func ResPath(sub string) string {
if sub == "" {
return res
}
if strings.HasPrefix(sub, "/") {
return res + sub
}
return res + "/" + sub
}
func DataPath(sub string) string {
if sub == "" {
return dat
}
if strings.HasPrefix(sub, "/") {
return dat + sub
}
return dat + "/" + sub
}
// Stat returns a FileInfo describing the named file.
// If there is an error, it will be of type *PathError.
func Stat(name string) (info os.FileInfo, err error) {
info, err = os.Stat(DataPath(name))
if err != nil && res != "" {
info, err = os.Stat(ResPath(name))
}
return
}
// MkdirAll creates a directory named path,
// along with any necessary parents, and returns nil,
// or else returns an error.
func MkdirAll(dir string) error {
return os.MkdirAll(DataPath(dir), 0777)
}
// ReadDir reads the directory named by dirname and returns
// a list of directory entries sorted by filename.
func ReadDir(dir string) ([]os.FileInfo, error) {
v1, e1 := ioutil.ReadDir(DataPath(dir))
if res == "" {
return v1, e1
}
v2, e2 := ioutil.ReadDir(ResPath(dir))
if e1 != nil && e2 != nil {
return nil, e1
}
if e1 == nil && e2 != nil {
return v1, nil
}
if e1 != nil && e2 == nil {
return v2, nil
}
set := make(map[string]bool)
for _, x := range v1 {
set[x.Name()] = true
}
for _, x := range v2 {
if _, ok := set[x.Name()]; !ok {
v1 = append(v1, x)
}
}
return v1, nil
}
// ReadFile reads the file named by filename and returns the contents.
// A successful call returns err == nil, not err == EOF. Because ReadFile
// reads the whole file, it does not treat an EOF from Read as an error
// to be reported.
func ReadFile(filename string) (b []byte, err error) {
b, err = ioutil.ReadFile(DataPath(filename))
if err != nil && res != "" {
b, err = ioutil.ReadFile(ResPath(filename))
}
return
}
// WriteFile writes data to a file named by filename.
// If the file does not exist, WriteFile creates it with permissions perm;
// otherwise WriteFile truncates it before writing.
func WriteFile(filename string, data []byte) error {
return ioutil.WriteFile(DataPath(filename), data, 0666)
}
// Append open file in append mode, create is not exist
func Append(filename string) (*os.File, error) {
return os.OpenFile(DataPath(filename), os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0666)
}
// Create creates the named file with mode 0666 (before umask), truncating
// it if it already exists. If successful, methods on the returned
// File can be used for I/O; the associated file descriptor has mode
// O_RDWR.
// If there is an error, it will be of type *PathError.
func Create(filename string) (*os.File, error) {
return os.Create(DataPath(filename))
}
// Open opens the named file for reading. If successful, methods on
// the returned file can be used for reading; the associated file
// descriptor has mode O_RDONLY.
// If there is an error, it will be of type *PathError.
func Open(filename string) (file *os.File, err error) {
file, err = os.Open(DataPath(filename))
if err != nil && res != "" {
file, err = os.Open(ResPath(filename))
}
return
}
// Truncate changes the size of the named file.
// If the file is a symbolic link, it changes the size of the link's target.
// If there is an error, it will be of type *PathError.
func Truncate(name string, size int64) error {
return os.Truncate(DataPath(name), size)
}
// Remove removes the named file or directory.
// If there is an error, it will be of type *PathError.
func Remove(name string) error {
return os.Remove(DataPath(name))
}
// SaveState save v to {store}/dir/name.json, use json.MarshalIndent.
func SaveState(dir, name string, v interface{}) (err error) {
name = dir + "/" + url.QueryEscape(name) + ".json"
defer func() {
if err == nil {
dbg.Logf("succeeded save state: %s\n", name)
} else {
dbg.Logf("failed save state: %s, error: %v\n", name, err)
}
}()
var data []byte
if x, ok := v.(interface {
State() ([]byte, error)
}); ok {
data, err = x.State()
} else {
data, err = json.MarshalIndent(v, "", " ")
if err != nil {
return
}
}
err = WriteFile(name, data)
return
}
// LoadState load v from {store}/dir/name.json, use json.Unmarshal
func LoadState(dir, name string, v interface{}) (err error) {
name = dir + "/" + url.QueryEscape(name) + ".json"
defer func() {
if err == nil {
dbg.Logf("succeeded load state: %s\n", name)
} else {
dbg.Logf("failed load state: %s, error: %v\n", name, err)
}
}()
data, err := ReadFile(name)
if err != nil {
return
}
if x, ok := v.(interface {
SetState([]byte) error
}); ok {
err = x.SetState(data)
} else {
err = json.Unmarshal(data, v)
}
return
}
|
package log
import (
// _ "github.com/wangfmD/rvs/log"
"errors"
"log"
"testing"
)
// TestT1 ...
func TestT1(t *testing.T) {
log.Println("ddd")
}
func ExampleNew() {
err := errors.New("emit macho dwarf: elf header corrupted")
if err != nil {
log.Println(err)
}
}
|
package main
import (
"fmt"
"io/ioutil"
)
func main() {
file, e := ioutil.ReadFile("./mytestgo/gotest/filetest/abc.txt")
if e != nil {
fmt.Println(e)
} else {
fmt.Println(string(file))
}
}
|
/* Copyright (c) 2016 Jason Ish
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package eve
import (
"errors"
"fmt"
"net"
"strconv"
"strings"
"time"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"github.com/jasonish/evebox/pcap"
)
// The Eve timestamp format - a slightly modified RFC3339Nano format.
const EveTimestampFormat = "2006-01-02T15:04:05.999999999Z0700"
func ParseTimestamp(timestamp string) (time.Time, error) {
return time.Parse(EveTimestampFormat, timestamp)
}
func FormatTimestamp(timestamp time.Time) string {
return timestamp.Format("2006-01-02T15:04:05.000000-0700")
}
func FormatTimestampUTC(timestamp time.Time) string {
return timestamp.UTC().Format("2006-01-02T15:04:05.000000Z")
}
// Given a protocol name as a string (could be a number), return the
// IPProtocol for that protocol.
func ProtoNumber(proto string) (layers.IPProtocol, error) {
switch strings.ToLower(proto) {
case "tcp":
return layers.IPProtocolTCP, nil
case "udp":
return layers.IPProtocolUDP, nil
case "icmp":
return layers.IPProtocolICMPv4, nil
case "ipv6-icmp":
return layers.IPProtocolICMPv6, nil
}
// Is the proto a number already?
if val, err := strconv.Atoi(proto); err == nil {
return layers.IPProtocol(val), nil
}
return 0, errors.New("unknown protocol")
}
// Convert the packet of an EveEvent to a PCAP file. A buffer
// representing a complete PCAP file is returned.
func EvePacket2Pcap(event EveEvent) ([]byte, error) {
return pcap.CreatePcap(event.Timestamp(), event.Packet(), layers.LinkTypeEthernet)
}
// Given an EvePacket, convert the payload to a PCAP faking out the
// headers as best we can.
//
// A buffer containing the 1 packet pcap file will be returned.
func EvePayloadToPcap(event EveEvent) ([]byte, error) {
buffer := gopacket.NewSerializeBuffer()
options := gopacket.SerializeOptions{
FixLengths: true,
ComputeChecksums: true,
}
payloadLayer := gopacket.Payload(event.Payload())
payloadLayer.SerializeTo(buffer, options)
srcIp := net.ParseIP(event.SrcIp())
if srcIp == nil {
return nil, fmt.Errorf("Failed to parse IP address %v.", event.SrcIp())
}
dstIp := net.ParseIP(event.DestIp())
if dstIp == nil {
return nil, fmt.Errorf("Failed to parse IP address %s.", event.DestIp())
}
proto, err := ProtoNumber(event.Proto())
if err != nil {
return nil, err
}
switch proto {
case layers.IPProtocolTCP:
// Could probably fake up a better TCP layer here.
tcpLayer := layers.TCP{
SrcPort: layers.TCPPort(event.SrcPort()),
DstPort: layers.TCPPort(event.DestPort()),
}
tcpLayer.SerializeTo(buffer, options)
break
case layers.IPProtocolUDP:
udpLayer := layers.UDP{
SrcPort: layers.UDPPort(event.SrcPort()),
DstPort: layers.UDPPort(event.DestPort()),
}
udpLayer.SerializeTo(buffer, options)
break
case layers.IPProtocolICMPv4:
icmpLayer := layers.ICMPv4{
TypeCode: layers.CreateICMPv4TypeCode(
event.IcmpType(), event.IcmpCode()),
Id: 0,
Seq: 0,
}
icmpLayer.SerializeTo(buffer, options)
break
case layers.IPProtocolICMPv6:
icmp6Layer := layers.ICMPv6{
TypeCode: layers.CreateICMPv6TypeCode(
event.IcmpType(), event.IcmpCode()),
}
icmp6Layer.SerializeTo(buffer, options)
break
default:
return nil, fmt.Errorf("Unsupported protocol %d.", proto)
}
isIp6 := dstIp.To4() == nil
if !isIp6 {
ipLayer := layers.IPv4{
SrcIP: srcIp,
DstIP: dstIp,
Version: 4,
Protocol: proto,
TTL: 64,
}
ipLayer.SerializeTo(buffer, options)
} else {
ip6Layer := layers.IPv6{
Version: 6,
SrcIP: srcIp,
DstIP: dstIp,
}
ip6Layer.SerializeTo(buffer, options)
}
return pcap.CreatePcap(event.Timestamp(),
buffer.Bytes(), layers.LinkTypeRaw)
}
|
package blob
import (
"golang.org/x/net/context"
"time"
"github.com/firefirestyle/engine-v01/prop"
m "github.com/firefirestyle/engine-v01/prop"
"google.golang.org/appengine"
"google.golang.org/appengine/blobstore"
"google.golang.org/appengine/datastore"
"google.golang.org/appengine/memcache"
)
func (obj *BlobManager) NewBlobItem(ctx context.Context, parent string, name string, blobKey string) *BlobItem {
ret := new(BlobItem)
ret.gaeObject = new(GaeObjectBlobItem)
{
p := prop.NewMiniPath(parent)
ret.gaeObject.Parent = p.GetDir()
}
ret.gaeObject.Name = name
ret.gaeObject.BlobKey = blobKey
ret.gaeObject.Updated = time.Now()
ret.gaeObject.Sign = blobKey
ret.gaeKey = datastore.NewKey(ctx, obj.config.Kind, obj.MakeStringId(parent, name, blobKey), 0, nil)
return ret
}
func (obj *BlobManager) NewBlobItemFromMemcache(ctx context.Context, keyId string) (*BlobItem, error) {
jsonSource, errGetJsonSource := memcache.Get(ctx, keyId)
if errGetJsonSource != nil {
return nil, errGetJsonSource
}
ret := new(BlobItem)
ret.gaeKey = datastore.NewKey(ctx, obj.config.Kind, keyId, 0, nil)
ret.gaeObject = new(GaeObjectBlobItem)
err := ret.SetParamFromJson(jsonSource.Value)
return ret, err
}
func (obj *BlobManager) NewBlobItemGaeKey(ctx context.Context, parent string, name string, sign string) *datastore.Key {
return obj.NewBlobItemGaeKeyFromStringId(ctx, obj.MakeStringId(parent, name, sign))
}
func (obj *BlobManager) NewBlobItemGaeKeyFromStringId(ctx context.Context, stringId string) *datastore.Key {
return datastore.NewKey(ctx, obj.config.Kind, stringId, 0, nil)
}
func (obj *BlobItem) updateMemcache(ctx context.Context) error {
userObjMemSource, err_toJson := obj.ToJson()
if err_toJson == nil {
userObjMem := &memcache.Item{
Key: obj.gaeKey.StringID(),
Value: []byte(userObjMemSource), //
}
memcache.Set(ctx, userObjMem)
}
return err_toJson
}
func (obj *BlobItem) saveDB(ctx context.Context) error {
_, e := datastore.Put(ctx, obj.gaeKey, obj.gaeObject)
obj.updateMemcache(ctx)
return e
}
func (obj *BlobManager) DeleteBlobItemFromStringId(ctx context.Context, stringId string) error {
keyInfo := obj.GetKeyInfoFromStringId(stringId)
blobKey := keyInfo.Sign
if blobKey != "" {
if nil != blobstore.Delete(ctx, appengine.BlobKey(blobKey)) {
Debug(ctx, "GOMIDATA in DeleteFromDBFromStringId : "+stringId)
}
}
return datastore.Delete(ctx, obj.NewBlobItemGaeKeyFromStringId(ctx, stringId))
}
type BlobItemKeyInfo struct {
Kind string
Parent string
Name string
Sign string
}
func (obj *BlobManager) GetKeyInfoFromStringId(stringId string) BlobItemKeyInfo {
propObj := m.NewMiniPropFromJson([]byte(stringId))
return BlobItemKeyInfo{
Kind: propObj.GetString("k", ""),
Parent: propObj.GetString("d", ""),
Name: propObj.GetString("f", ""),
Sign: propObj.GetString("s", ""),
}
}
func (obj *BlobManager) MakeStringId(parent string, name string, sign string) string {
propObj := m.NewMiniProp()
propObj.SetString("k", obj.config.Kind)
propObj.SetString("d", parent)
propObj.SetString("f", name)
propObj.SetString("s", sign)
return string(propObj.ToJson())
}
func (obj *BlobManager) MakeBlobId(parent string, name string) string {
propObj := m.NewMiniProp()
propObj.SetString("d", parent)
propObj.SetString("f", name)
return string(propObj.ToJson())
}
|
package common
import "fmt"
/*
将输入的date按照指定的partten格式化
@version 1.0 目前只支持正常的格式,其余变态格式暂不支持
如:yyyy,yyyy-MM,yyyy-MM-dd,yyyy-MM-dd HH,yyyy-MM-dd HH:mm,yyyy-MM-dd HH:mm:ss
@date string 要被格式化的日期
@partten stirng 指定格式
@author wangdy
return 返回格式化后的日期
*/
func TimFormat(date, partten string) string {
var ptnLength = len(partten)
var dateLength = len(date)
returnStr := date
switch partten {
case partten:
if ptnLength <= dateLength {
return Substr(date, 0, ptnLength)
} else {
var chaLen = ptnLength - dateLength
if dateLength == 4 {
if chaLen == 3 {
returnStr += "-01"
return returnStr
} else if chaLen == 6 {
returnStr += "-01-01"
return returnStr
} else if chaLen == 9 {
returnStr += "-01-01 00"
return returnStr
} else if chaLen == 12 {
returnStr += "-01-01 00:00"
return returnStr
} else if chaLen == 15 {
returnStr += "-01-01 00:00:00"
return returnStr
}
} else if dateLength == 7 {
if chaLen == 3 {
returnStr += "-01"
return returnStr
} else if chaLen == 6 {
returnStr += "-01 00"
return returnStr
} else if chaLen == 9 {
returnStr += "-01 00:00"
return returnStr
} else if chaLen == 12 {
returnStr += "-01 00:00:00"
return returnStr
}
} else if dateLength == 10 {
if chaLen == 3 {
returnStr += " 00"
return returnStr
} else if chaLen == 6 {
returnStr += " 00:00"
return returnStr
} else if chaLen == 9 {
returnStr += " 00:00:00"
return returnStr
}
} else if dateLength == 13 {
if chaLen == 3 {
returnStr += ":00"
return returnStr
} else if chaLen == 6 {
returnStr += ":00:00"
return returnStr
}
} else if dateLength == 16 && chaLen == 3 {
returnStr += ":00"
return returnStr
}
return "xxxxxxxxxx" + date
}
default:
fmt.Println("it's default case!")
return date
}
}
/*
按照格式截取字符串
@str string 要被截取的源字符串
@start int 开始索引,从0开始
@length int 要截取的长度
@author wangdy
return 返回截取后字符串
*/
func Substr(str string, start, length int) string {
rs := []rune(str)
rl := len(rs)
end := 0
if start < 0 {
start = rl - 1 + start
}
end = start + length
if start > end {
start, end = end, start
}
if start < 0 {
start = 0
}
if start > rl {
start = rl
}
if end < 0 {
end = 0
}
if end > rl {
end = rl
}
return string(rs[start:end])
}
|
package main
import (
"fmt"
"io"
"os"
"reflect"
"github.com/BurntSushi/toml"
)
type ComposerConfigFile struct {
Koji struct {
AllowedDomains []string `toml:"allowed_domains"`
CA string `toml:"ca"`
} `toml:"koji"`
Worker struct {
AllowedDomains []string `toml:"allowed_domains"`
CA string `toml:"ca"`
IdentityFilter []string `toml:"identity_filter"`
PGHost string `toml:"pg_host" env:"PGHOST"`
PGPort string `toml:"pg_port" env:"PGPORT"`
PGDatabase string `toml:"pg_database" env:"PGDATABASE"`
PGUser string `toml:"pg_user" env:"PGUSER"`
PGPassword string `toml:"pg_password" env:"PGPASSWORD"`
PGSSLMode string `toml:"pg_ssl_mode" env:"PGSSLMODE"`
} `toml:"worker"`
ComposerAPI struct {
IdentityFilter []string `toml:"identity_filter"`
} `toml:"composer_api"`
WeldrAPI struct {
DistroConfigs map[string]WeldrDistroConfig `toml:"distros"`
} `toml:"weldr_api"`
}
type WeldrDistroConfig struct {
ImageTypeDenyList []string `toml:"image_type_denylist"`
}
// weldrDistrosImageTypeDenyList returns a map of distro-specific Image Type
// deny lists for Weldr API.
func (c *ComposerConfigFile) weldrDistrosImageTypeDenyList() map[string][]string {
distrosImageTypeDenyList := map[string][]string{}
for distro, distroConfig := range c.WeldrAPI.DistroConfigs {
if distroConfig.ImageTypeDenyList != nil {
distrosImageTypeDenyList[distro] = append([]string{}, distroConfig.ImageTypeDenyList...)
}
}
return distrosImageTypeDenyList
}
func LoadConfig(name string) (*ComposerConfigFile, error) {
var c ComposerConfigFile
_, err := toml.DecodeFile(name, &c)
if err != nil {
return nil, err
}
err = loadConfigFromEnv(&c)
if err != nil {
return nil, err
}
return &c, nil
}
func loadConfigFromEnv(intf interface{}) error {
t := reflect.TypeOf(intf).Elem()
v := reflect.ValueOf(intf).Elem()
for i := 0; i < v.NumField(); i++ {
fieldT := t.Field(i)
fieldV := v.Field(i)
kind := fieldV.Kind()
switch kind {
case reflect.String:
key, ok := fieldT.Tag.Lookup("env")
if !ok {
continue
}
confV, ok := os.LookupEnv(key)
if !ok {
continue
}
fieldV.SetString(confV)
case reflect.Slice:
// no-op
continue
case reflect.Map:
// no-op
continue
case reflect.Struct:
err := loadConfigFromEnv(fieldV.Addr().Interface())
if err != nil {
return err
}
default:
return fmt.Errorf("Unsupported type: %s", kind)
}
}
return nil
}
func DumpConfig(c *ComposerConfigFile, w io.Writer) error {
return toml.NewEncoder(w).Encode(c)
}
|
package lib
import (
us "OkonmaV/userstorage"
"fmt"
"net/http"
"time"
"github.com/dgrijalva/jwt-go"
)
// Claims : fuck
type Claims struct {
Login string
IP string
UserAgent string
Uid string
jwt.StandardClaims
}
// CreateCookie : <
func CreateCookie(w http.ResponseWriter, r *http.Request, userstorage us.UsValid, login string) error {
ip := r.Header.Get("X-Real-IP")
expTime := time.Now().Add(10 * time.Minute)
uid, err := userstorage.GetUid(login)
if err != nil {
fmt.Println(err)
return err
}
claims := &Claims{
Login: login,
IP: ip,
UserAgent: r.UserAgent(),
Uid: uid,
}
jwtKey := []byte("so_secure")
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
tokenString, err := token.SignedString(jwtKey)
if err != nil {
return err
}
http.SetCookie(w, &http.Cookie{
Name: "koki",
Value: tokenString,
Expires: expTime,
})
return nil
}
|
package main
import (
"github.com/spf13/cobra"
"k8s-pod-mutator-webhook/internal/logger"
"k8s-pod-mutator-webhook/pkg/mutator"
"k8s-pod-mutator-webhook/pkg/webhook"
"os"
"os/signal"
"syscall"
)
var rootCmd = &cobra.Command{
Use: "k8s-pod-mutator-webhook",
Short: "Kubernetes Mutating Admission Webhook for Pods. Applies arbitrary changes to Pod manifests.",
Long: `
This webhook mutates a Pod's manifest by applying changes from a YAML file (a "patch"), which can contain virtually arbitrary changes
- e.g. adding containers/init-containers or volumes, changing metadata etc.
After successful mutation the Pod is marked with an annotation ("k8s-pod-mutator.io/mutated=true") to prevent repeated mutation.
By default, the webhook is reachable under "https://<service_name>:8443/mutate"
For more information regarding Admission Controllers, see https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/
`,
Run: func(cmd *cobra.Command, args []string) {
logger.SetLogLevel(cmd.Flag("log-level").Value.String())
serveWebhook()
},
}
var parameters = &struct {
serverSettings webhook.ServerSettings
mutationSettings mutator.MutationSettings
}{
serverSettings: webhook.ServerSettings{},
mutationSettings: mutator.MutationSettings{},
}
func serveWebhook() {
mutator, err := mutator.CreateMutator(parameters.mutationSettings)
if err != nil {
logger.Logger.Fatal(err.Error())
}
server, err := webhook.CreateServer(parameters.serverSettings, *mutator)
if err != nil {
logger.Logger.Fatal(err.Error())
}
go func() {
if err := server.Start(); err != nil {
logger.Logger.Fatal(err.Error())
}
}()
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)
<-signalChan
_ = server.Stop()
}
func init() {
rootCmd.PersistentFlags().String("log-level", "info", "panic | fatal | error | warn | info | debug | trace")
rootCmd.PersistentFlags().IntVar(¶meters.serverSettings.Port, "port", 8443, "Port to listen on for HTTP requests.")
rootCmd.PersistentFlags().BoolVar(¶meters.serverSettings.Tls, "tls", true, "Enables/Disables TLS.")
rootCmd.PersistentFlags().StringVar(¶meters.serverSettings.TlsCertFile, "tls-cert", "/etc/k8s-pod-mutator/certs/tls.crt", "Path to TLS cert. Has no effect when '--tls=false'.")
rootCmd.PersistentFlags().StringVar(¶meters.serverSettings.TlsKeyFile, "tls-key", "/etc/k8s-pod-mutator/certs/tls.key", "Path to TLS key. Has no effect when '--tls=false.'")
rootCmd.PersistentFlags().StringVar(¶meters.mutationSettings.PatchFile, "patch", "/etc/k8s-pod-mutator/config/patch.yaml", "Path to the YAML file containing the patch to be applied to eligible Pods (see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#pod-v1-core for help).")
}
func main() {
if err := rootCmd.Execute(); err != nil {
logger.Logger.Fatal(err.Error())
}
}
|
package namelist
var Fullname = "shivpratap"
|
package main
import (
"context"
"fmt"
"io"
"log"
"net"
"./colorspb"
"google.golang.org/grpc/reflection"
"google.golang.org/grpc"
)
type server struct{}
func (*server) Color(ctx context.Context, req *colorspb.ColorRequest) (*colorspb.ColorResponse, error) {
adjective := req.GetColors().GetAdjective()
baseColor := req.GetColors().GetBaseColor()
result := adjective + baseColor
res := &colorspb.ColorResponse{
Result: result,
}
return res, nil
}
func (*server) ColorEverywhere(stream colorspb.ColorService_ColorEverywhereServer) error {
fmt.Printf("You have invoked a stream of GREEN")
for {
req, err := stream.Recv()
if err == io.EOF {
return nil
}
if err != nil {
log.Fatalf("Error while reading colors: %v", err)
return err
}
shade := req.GetColoring().GetAdjective()
result := shade + " green!"
sendErr := stream.Send(&colorspb.ColorEverywhereResponse{
Result: result,
})
if sendErr != nil {
log.Fatalf("Error while sending greens: %v", err)
return err
}
}
}
func main() {
fmt.Println("Sit tight! Colors are coming.")
lis, err := net.Listen("tcp", "0.0.0.0:50051")
if err != nil {
log.Fatalf("Failed to listen: %v", err)
}
s := grpc.NewServer()
colorspb.RegisterColorServiceServer(s, &server{})
// Register reflection service on gRPC server
reflection.Register(s)
if err := s.Serve(lis); err != nil {
log.Fatalf("failed to serve: %v", err)
}
}
|
package main
import "fmt"
const (
_ = iota // not using the first iota value
// kb = 1024 -> 2 ^ 10 -> 1 shifted by 10 bits
kb = 1 << (iota * 10)
// mb = 1024 * kb -> 2 ^ 20 -> 1 shifted by 20 bits
mb = 1 << (iota * 10)
// gb = 1034 * mb -> 2 ^ 30 -> 1 shifted by 30 bits
gb = 1 << (iota * 10)
)
func main() {
fmt.Println("Normal Bit Shifting :: ")
x := 4
fmt.Printf("'x' Decimal Value :: %d :: Binary Value :: %b\n", x, x)
// LEFT SHIFT bits by 1
y := x << 1
fmt.Printf("'y' Decimal Value :: %d :: Binary Value :: %b\n", y, y)
fmt.Println("Using iota and bit shifting")
fmt.Printf("'kb' Decimal Value :: %d\t :: Binary Value :: %b\n", kb, kb)
fmt.Printf("'mb' Decimal Value :: %d\t :: Binary Value :: %b\n", mb, mb)
fmt.Printf("'gb' Decimal Value :: %d :: Binary Value :: %b\n", gb, gb)
}
|
package stank
import (
"mvdan.cc/sh/syntax"
"bufio"
"log"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
)
// LOWEREXTENSIONS2POSIXyNESS is a fairly exhaustive map of lowercase file extensions to whether or not they represent POSIX shell scripts.
// Newly minted extensions can be added by stank contributors.
var LOWEREXTENSIONS2POSIXyNESS = map[string]bool{
".sh": true,
".tsh": false,
".etsh": false,
".bash": true,
".bash4": true,
".bosh": true,
".yash": true,
".zsh": true,
".hsh": true,
".lksh": false,
".ksh": true,
".ksh88": true,
".pdksh": true,
".ksh93": true,
".mksh": true,
".oksh": true,
".rksh": true,
".dash": true,
".posh": true,
".ash": true,
".shrc": true,
".shinit": true,
".bash_profile": true,
".bashrc": true,
".bash_login": true,
".bash_logout": true,
".kshrc": true,
".zshenv": true,
".zprofile": true,
".zshrc": true,
".zlogin": true,
".zlogout": true,
".csh": false,
".cshrc": false,
".tcsh": false,
".tcshrc": false,
".fish": false,
".rc": false,
".ionrc": false,
".expect": false,
".py": false,
".pyw": false,
".pl": false,
".rb": false,
".php": false,
".lua": false,
".js": false,
".lisp": false,
".mf": false,
".exe": false,
".bin": false,
".cmd": false,
".bat": false,
".psh": false,
".vbs": false,
".ada": false,
".c": false,
".cl": false,
".e": false,
".erl": false,
".escript": false,
".fth": false,
".groovy": false,
".j": false,
".pike": false,
".rkt": false,
".scala": false,
".elv": false,
".sf": false,
".txr": false,
".zkl": false,
".txt": false,
".md": false,
".markdown": false,
".doc": false,
".docx": false,
".pdf": false,
".log": false,
".gitignore": false,
".gitmodules": false,
".gitkeep": false,
".xml": false,
".json": false,
".yml": false,
".yaml": false,
".conf": false,
".properties": false,
".svg": false,
".gif": false,
".jpg": false,
".jpeg": false,
".png": false,
".bmp": false,
".tiff": false,
".mp3": false,
".wav": false,
".mp4": false,
".mov": false,
".flv": false,
".swp": false,
".ds_store": false,
}
// LOWEREXTENSIONS2CONFIG is a fairly exhaustive map of lowercase file extensions to whether or not they represent shell script configurations.
// Newly minted extensions can be added by stank contributors.
var LOWEREXTENSIONS2CONFIG = map[string]bool{
".shrc": true,
".shinit": true,
".profile": true,
".bash_profile": true,
".bashrc": true,
".bash_login": true,
".bash_logout": true,
".ashrc": true,
".dashrc": true,
".kshrc": true,
".zshenv": true,
".zprofile": true,
".zshrc": true,
".zlogin": true,
".zlogout": true,
".cshrc": true,
".tcshrc": true,
".fishrc": true,
".rcrc": true,
".ionrc": true,
}
// LOWERFILENAMES2POSIXyNESS is a fairly exhaustive map of lowercase filenames to whether or not they represent POSIX shell scripts.
// Newly minted config filenames can be added by stank contributors.
var LOWERFILENAMES2POSIXyNESS = map[string]bool{
"shrc": true,
"shinit": true,
".profile": true,
"profile": true,
"login": true,
"logout": true,
"bash_login": true,
"bash_logout": true,
"zshenv": true,
"zprofile": true,
"zshrc": true,
"zlogin": true,
"zlogout": true,
"csh.login": false,
"csh.logout": false,
"tcsh.login": false,
"tcsh.logout": false,
"rcrc": false,
"makefile": false,
"readme": false,
"changelog": false,
"applypatch-msg.sample": false,
"commit-msg.sample": false,
"post-update.sample": false,
"pre-applypatch.sample": false,
"pre-commit.sample": false,
"pre-push.sample": false,
"pre-rebase.sample": false,
"pre-receive.sample": false,
"prepare-commit-msg.sample": false,
"update.sample": false,
"rc.elv": false,
"thumbs.db": false,
}
// LOWERFILENAMES2CONFIG is a fairly exhaustive map of lowercase filenames to whether or not they represent shell script configurations.
// Newly minted config filenames can be added by stank contributors.
var LOWERFILENAMES2CONFIG = map[string]bool{
"shrc": true,
"shinit": true,
"profile": true,
"login": true,
"logout": true,
"bash_login": true,
"bash_logout": true,
"zshenv": true,
"zprofile": true,
"zshrc": true,
"zlogin": true,
"zlogout": true,
"csh.login": true,
"csh.logout": true,
"tcsh.login": true,
"tcsh.logout": true,
"rcrc": true,
"rc.elv": true,
}
// LOWEREXTENSIONS2INTERPRETER is a fairly exhaustive map of lowercase file extensions to their corresponding interpreters.
// Newly minted config extensions can be added by stank contributors.
var LOWEREXTENSIONS2INTERPRETER = map[string]string{
".sh": "sh",
".shrc": "sh",
".shinit": "sh",
".bash": "bash",
".bashrc": "bash",
".zsh": "zsh",
".zshrc": "zsh",
".zlogin": "zsh",
".zlogout": "zsh",
".hsh": "hsh",
".ksh": "ksh",
".lkshrc": "lksh",
".kshrc": "ksh",
".ksh88": "ksh",
".pdksh": "pdksh",
".pdkshrc": "pdksh",
".ksh93": "ksh93",
".ksh93rc": "ksh93",
".mksh": "mksh",
".mkshrc": "mksh",
".dash": "dash",
".dashrc": "dash",
".poshrc": "posh",
"ash": "ash",
".ashrc": "ash",
".zshenv": "zsh",
".zprofile": "zsh",
".csh": "csh",
".cshrc": "csh",
".tcsh": "tcsh",
".tcshrc": "tcsh",
".fish": "fish",
".fishrc": "fish",
".rc": "rc",
".rcrc": "rc",
".ion": "ion",
".ionrc": "ion",
".profile": "sh",
".bash_profile": "bash",
".bash_login": "bash",
".bash_logout": "bash",
".zshprofile": "zsh",
".elv": "elvish",
".php": "php",
".lua": "lua",
".mf": "make",
".makefile": "make",
".gnumakefile": "gmake",
".bsdmakefile": "bmake",
".pmakefile": "pmake",
".awk": "awk",
".gawk": "gawk",
".sed": "sed",
}
// LOWERFILENAMES2INTERPRETER is a fairly exhaustive map of lowercase filenames to their corresponding interpreters.
// Newly minted config filenames can be added by stank contributors.
var LOWERFILENAMES2INTERPRETER = map[string]string{
".shrc": "sh",
".shinit": "sh",
".bashrc": "bash",
".zshrc": "zsh",
".zlogin": "zsh",
".zlogout": "zsh",
".lkshrc": "lksh",
".kshrc": "ksh",
".pdkshrc": "pdksh",
".ksh93rc": "ksh93",
".mkshrc": "mksh",
".dashrc": "dash",
".poshrc": "posh",
".ashrc": "ash",
".zshenv": "zsh",
".zprofile": "zsh",
".cshrc": "csh",
".tcshrc": "tcsh",
".fishrc": "fish",
".rcrc": "rc",
".ionrc": "ion",
"profile": "sh",
".login": "sh",
".logout": "sh",
"zshenv": "zsh",
"zprofile": "zsh",
"zshrc": "zsh",
"zlogin": "zsh",
"zlogout": "zsh",
"csh.login": "csh",
"csh.logout": "csh",
"tcsh.login": "tcsh",
"tcsh.logout": "tcsh",
"rc.elv": "elvish",
"makefile": "make",
"gnumakefile": "gmake",
"bsdmakefile": "bmake",
"pmakefile": "pmake",
}
// BOMS acts as a registry set of known Byte Order mark sequences.
// See https://en.wikipedia.org/wiki/Byte_order_mark for more information.
var BOMS = map[string]bool{
"\uFFBBBF": true,
"\uFEFF": true,
"\uFFFE": true,
"\u0000FEFF": true,
"\uFFFE0000": true,
"\u2B2F7638": true,
"\u2B2F7639": true,
"\u2B2F762B": true,
"\u2B2F762F": true,
// {byte(0x2B), byte(0x2F), byte(0x76), byte(0x38), byte(0x3D)}: true,
// {byte(0xF7), byte(0x64), byte(0x4C)}: true,
// {byte(0xDD), byte(0x73), byte(0x66), byte(0x73)}: true,
// {byte(0x0E), byte(0xFE), byte(0xFF)}: true,
// {byte(0xFB), byte(0xEE), byte(0x28)}: true,
// {byte(0x84), byte(0x31), byte(0x95), byte(0x33)}: true,
}
// INTERPRETERS2POSIXyNESS is a fairly exhaustive map of interpreters to whether or not the interpreter is a POSIX compatible shell.
// Newly minted interpreters can be added by stank contributors.
var INTERPRETERS2POSIXyNESS = map[string]bool{
"sh": true,
"tsh": false,
"etsh": false,
"bash": true,
"bash4": true,
"bosh": true,
"yash": true,
"zsh": true,
"hsh": true,
"lksh": false,
"ksh": true,
"ksh88": true,
"pdksh": true,
"ksh93": true,
"mksh": true,
"oksh": true,
"rksh": true,
"dash": true,
"posh": true,
"ash": true,
"csh": false,
"tcsh": false,
"fish": false,
"rc": false,
"python": false,
"jython": false,
"perl": false,
"perl6": false,
"ruby": false,
"jruby": false,
"php": false,
"lua": false,
"node": false,
"awk": false,
"gawk": false,
"sed": false,
"swift": false,
"tclsh": false,
"ion": false,
"elvish": false,
"expect": false,
"stash": false,
}
// FullBashInterpreters note when a shell has the basic modern bash features,
// as opposed to subsets such as ash, dash, posh, ksh, zsh.
var FullBashInterpreters = map[string]bool{
"bash": true,
"bash4": true,
}
// KshInterpreters note when a shell is a member of the modern ksh family.
var KshInterpreters = map[string]bool{
"ksh": true,
"ksh88": true,
"pdksh": true,
"ksh93": true,
"mksh": true,
"oksh": true,
"rksh": true,
}
// SniffConfig bundles together the various options when sniffing files for POSIXyNESS.
type SniffConfig struct {
EOLCheck bool
CRCheck bool
}
// ALTINTERPRETERS collects some alternative shell interpreters.
var ALTINTERPRETERS = map[string]bool{
"osh": true,
"lksh": true,
"csh": true,
"tcsh": true,
"fish": true,
"ion": true,
"rc": true,
"tsh": true,
"etsh": true,
"elvish": true,
}
// ALTEXTENSIONS collets some alternative shell script file extensions.
var ALTEXTENSIONS = map[string]bool{
".osh": true,
".lksh": true,
".csh": true,
".cshrc": true,
".tcsh": true,
".tcshrc": true,
".fish": true,
".fishrc": true,
".ion": true,
".ionrc": true,
".rc": true,
".rcrc": true,
".tsh": true,
".etsh": true,
".elv": true,
}
// ALTFILENAMES matches some alternative shell script profile filenames.
var ALTFILENAMES = map[string]bool{
"csh.login": true,
"csh.logout": true,
"rc.elv": true,
}
// IsAltShellScript returns whether a smell represents a non-POSIX, but nonetheless similar kind of lowlevel shell script language.
func IsAltShellScript(smell Smell) bool {
return ALTINTERPRETERS[smell.Interpreter] || ALTEXTENSIONS[smell.Extension] || ALTFILENAMES[smell.Filename]
}
// POSIXShCheckSyntax validates syntax for strict POSIX sh compliance.
func POSIXShCheckSyntax(smell Smell) error {
parser := syntax.NewParser(syntax.Variant(syntax.LangPOSIX))
fd, err := os.Open(smell.Path)
if err != nil {
return err
}
_, err = parser.Parse(bufio.NewReader(fd), smell.Path)
return err
}
// UnixCheckSyntax validates syntax for the wider UNIX shell family.
func UnixCheckSyntax(smell Smell) error {
cmd := exec.Command(smell.Interpreter, "-n", smell.Path)
return cmd.Run()
}
// PerlishCheckSyntax validates syntax for Perl, Ruby, and Node.js.
func PerlishCheckSyntax(smell Smell) error {
cmd := exec.Command(smell.Interpreter, "-c", smell.Path)
return cmd.Run()
}
// PHPCheckSyntax validates syntax for PHP.
func PHPCheckSyntax(smell Smell) error {
cmd := exec.Command(smell.Interpreter, "-l", smell.Path)
return cmd.Run()
}
// PythonCheckSyntax validates syntax for Python.
func PythonCheckSyntax(smell Smell) error {
cmd := exec.Command(smell.Interpreter, "-m", "py_compile", smell.Path)
return cmd.Run()
}
// GoCheckSyntax validates syntax for Go.
func GoCheckSyntax(smell Smell) error {
cmd := exec.Command("gofmt", "-e", smell.Path)
return cmd.Run()
}
// GNUAwkCheckSyntax validates syntax for GNU awk files.
func GNUAwkCheckSyntax(smell Smell) error {
cmd := exec.Command(smell.Interpreter, "--lint", "-f", smell.Path)
return cmd.Run()
}
// Interpreter2SyntaxValidator provides syntax validator delegates, if one is available.
var Interpreter2SyntaxValidator = map[string]func(Smell) error{
"generic-sh": POSIXShCheckSyntax,
"sh": POSIXShCheckSyntax,
"ash": UnixCheckSyntax,
"bash": UnixCheckSyntax,
"bash4": UnixCheckSyntax,
"dash": UnixCheckSyntax,
"posh": UnixCheckSyntax,
"elvish": UnixCheckSyntax,
"ksh": UnixCheckSyntax,
"ksh88": UnixCheckSyntax,
"ksh93": UnixCheckSyntax,
"mksh": UnixCheckSyntax,
"oksh": UnixCheckSyntax,
"pdksh": UnixCheckSyntax,
"rksh": UnixCheckSyntax,
"lksh": UnixCheckSyntax,
"bosh": UnixCheckSyntax,
"osh": UnixCheckSyntax,
"yash": UnixCheckSyntax,
"zsh": UnixCheckSyntax,
"csh": UnixCheckSyntax,
"tcsh": UnixCheckSyntax,
"rc": UnixCheckSyntax,
"fish": UnixCheckSyntax,
"make": UnixCheckSyntax,
"gmake": UnixCheckSyntax,
"bmake": UnixCheckSyntax,
"pmake": UnixCheckSyntax,
"perl": PerlishCheckSyntax,
"perl6": PerlishCheckSyntax,
"ruby": PerlishCheckSyntax,
"node": PerlishCheckSyntax,
"iojs": PerlishCheckSyntax,
"php": PHPCheckSyntax,
"python": PythonCheckSyntax,
"python3": PythonCheckSyntax,
"go": GoCheckSyntax,
"gawk": GNUAwkCheckSyntax,
}
// Sniff analyzes the holistic smell of a given file path,
// returning a Smell record of key indicators tending towards either POSIX compliance or noncompliance,
// including a flag for the final "POSIXy" trace scent of the file.
//
// For performance, if the scent of one or more attributes obviously indicates POSIX or nonPOSIX,
// Sniff() may short-circuit, setting the POSIXy flag and returning a record
// with some attributes set to zero value.
//
// Polyglot and multiline shebangs are technically possible in languages that do not support native POSIX-style shebang comments ( see https://rosettacode.org/wiki/Multiline_shebang ). However, Sniff() can reliably identify only ^#!.+$ POSIX-style shebangs, and will populate the Shebang field accordingly.
//
// If an I/O problem occurs during analysis, an error value will be set.
// Otherwise, the error value will be nil.
func Sniff(pth string, config SniffConfig) (Smell, error) {
// Attempt to short-circuit for directories
fi, err := os.Lstat(pth)
smell := Smell{Path: pth}
if err != nil {
return smell, err
}
mode := fi.Mode()
if mode.IsDir() {
smell.Directory = true
return smell, nil
}
smell.Permissions = mode.Perm()
smell.OwnerExecutable = smell.Permissions&0100 != 0
smell.Filename = path.Base(pth)
smell.Basename = filepath.Base(smell.Filename)
smell.Extension = filepath.Ext(smell.Filename)
// Attempt to short-circuit for Emacs swap files
if strings.HasSuffix(smell.Filename, "~") {
return smell, nil
}
extensionPOSIXy, extensionPOSIXyOK := LOWEREXTENSIONS2POSIXyNESS[strings.ToLower(smell.Extension)]
if extensionPOSIXyOK {
smell.POSIXy = extensionPOSIXy
}
filenamePOSIXy, filenamePOSIXyOK := LOWERFILENAMES2POSIXyNESS[strings.ToLower(smell.Filename)]
if filenamePOSIXyOK {
smell.POSIXy = filenamePOSIXy
}
smell.CoreConfiguration = LOWEREXTENSIONS2CONFIG[strings.ToLower(smell.Extension)] ||
LOWERFILENAMES2CONFIG[strings.ToLower(smell.Filename)]
smell.Library = (smell.CoreConfiguration || smell.Extension != "") && !smell.OwnerExecutable
smell.Symlink = fi.Mode()&os.ModeSymlink != 0
if smell.Symlink {
return smell, nil
}
extensionInterpreter, extensionInterpreterOK := LOWEREXTENSIONS2INTERPRETER[strings.ToLower(smell.Extension)]
if extensionInterpreterOK {
smell.Interpreter = extensionInterpreter
}
fd, err := os.Open(pth)
if err != nil {
return smell, err
}
defer func() {
err = fd.Close()
if err != nil {
log.Panic(err)
}
}()
//
// Check for BOMs
//
br := bufio.NewReader(fd)
maxBOMCheckLength := 5
if fi.Size() < 5 {
maxBOMCheckLength = int(fi.Size())
}
bs, err := br.Peek(maxBOMCheckLength)
if err != nil {
return smell, err
}
for i := 2; i < 6 && i < maxBOMCheckLength; i++ {
if BOMS[string(bs[:i])] {
smell.BOM = true
if _, err = br.Discard(i); err != nil {
return smell, err
}
break
}
}
LF := byte('\n')
// Attempt to find the first occurence of a line feed.
// CR-ended files and binary files will be read in their entirety.
line, err := br.ReadString(LF)
if err != nil {
return smell, err
}
// An error occurred while attempting to find the first occurence of a line feed in the file.
// This could mean one of several things:
//
// * The connection to the file was lost (network disruption, file movement, file deletion, etc.)
// * The file is completely empty.
// * The file is binary.
// * The file is CR-ended.
// * The file consists of a single line, without a line ending sequence.
//
// Only the cases of an empty file or single line without an ending could reasonably considered candidates for POSIX shell scripts. The former can only be evidenced as POSIX if a POSIXy extension is present, in which case the previous analysis instructions above would have short-circuited POSIXy: true. So we can now ignore the former and only check the latter.
//
// Note that stank currently ignores mixed line ending styles within a file.
//
if strings.HasSuffix(line, "\r\n") {
smell.LineEnding = "\r\n"
} else if strings.HasSuffix(line, "\n") {
smell.LineEnding = "\n"
} else if strings.HasSuffix(line, "\r") {
smell.LineEnding = "\r"
}
//
// Read the entire script in order to assess the presence/absence of a final POSIX end of line (\n) sequence.
//
if config.EOLCheck && fi.Size() > 0 {
fd2, err := os.Open(pth)
if err != nil {
log.Print(err)
return smell, nil
}
defer func() {
err := fd2.Close()
if err != nil {
log.Panic(err)
}
}()
maxEOLSequenceLength := int64(2)
if fi.Size() < 2 {
maxEOLSequenceLength = 1
}
eolBuf := make([]byte, maxEOLSequenceLength)
if _, err := fd2.ReadAt(eolBuf, fi.Size()-maxEOLSequenceLength); err != nil {
return smell, err
}
if eolBuf[maxEOLSequenceLength-1] == byte('\n') && (maxEOLSequenceLength < 2 || eolBuf[0] != byte('\r')) {
b := true
smell.FinalEOL = &b
}
}
// Recognize poorly written shell scripts that feature
// a POSIXy filename but lack a proper shebang line.
if !strings.HasPrefix(line, "#!") && !strings.HasPrefix(line, "!#") {
if smell.POSIXy && !extensionInterpreterOK {
smell.Interpreter = "generic-sh"
}
return smell, nil
}
smell.Shebang = strings.TrimRight(line, "\r\n")
// shebang minus the #! prefix.
command := strings.TrimSpace(smell.Shebang[2:])
// At this point, we have a script that is not obviously filenamed either a POSIX shell script file, nor obviously a nonPOSIX file. We have read the first line of the file, and determined that it is some sort of POSIX-style shebang.
// Example commonly encountered shebang forms:
//
// * #!/bin/bash
// * #!/usr/local/bin/bash
// * #!/usr/bin/env python
// * #!/usr/bin/env MathKernel -script
// * #!/bin/busybox python
// * #!someapplication
//
// Let's break these down.
//
// #!/bin/someinterpreter is the idiomatic way to shebang most POSIX shell scripts, especially those depending on very standard, established shells like bash, zsh, ksh, and so on, that are expected to be installed in /bin.
// #!/usr/local/bin/bash is acceptable for interpreters installed in custom locations, such as macOS users using Homebrew to provide bash v4 in /usr/local/bin.
// #!/usr/bin/env python is preferred for general purpose scripting languages like Python, Perl, Ruby, and Lua, that are installed somewhere on the system, but not necessarily in /bin on all systems. For example, rvm may place ruby in $HOME/.rvm/rubies/ruby-$RUBY_VERSION/bin. So the /usr/bin/env command prefix helps these languages interoperate with POSIX sh standards, allowing the interpreter to be used in the shebang without hardcoding any particular absolute path to the interpreter; the interpreter simply needs to be available somewhere in $PATH. When identifying the interpreter, We will need to be careful to strip out /usr/bin/env, if present.
// #!/usr/bin/env MathKernel -script and #!/bin/bash -euo pipefail constitute shebangs with flags to be passed to the interpreters. When identifying the interpreter, We will need to be careful to strip out flags meant for the interpreter, if present.
//
// Finally, #!bash, #!fish, #!python, etc. are technically allowed, though some systems may balk on the interpreter being relative to $PATH rather than an absolute file path. This form is no problem for identifying the stinky interpreter for our purposes, but the stank linter may emit a warning to use the more idiomatic shebangs #!/bin/bash, #!/usr/bin/env fish, #!/usr/bin/env python, etc.
commandParts := strings.Split(command, " ")
// Strip /usr/bin/env, if present
if commandParts[0] == "/usr/bin/env" {
commandParts = commandParts[1:]
}
// Strip /bin/busybox, if present
if commandParts[0] == "/bin/busybox" {
commandParts = commandParts[1:]
}
interpreterPath := commandParts[0]
// Strip out directory path, if any
interpreterFilename := filepath.Base(interpreterPath)
filenameInterpreter, filenameInterpreterOK := LOWERFILENAMES2INTERPRETER[strings.ToLower(interpreterFilename)]
// Identify the interpreter, or mark as generic, unknown sh interpreter.
if interpreterFilename == "" {
if filenameInterpreterOK {
smell.Interpreter = filenameInterpreter
} else if !extensionInterpreterOK {
smell.Interpreter = "generic-sh"
}
} else {
smell.Interpreter = interpreterFilename
smell.InterpreterFlags = commandParts[1:]
}
smell.Bash = FullBashInterpreters[smell.Interpreter]
smell.Ksh = KshInterpreters[smell.Interpreter]
// Compare interpreter against common POSIX and nonPOSIX names.
interpreterPOSIXy := INTERPRETERS2POSIXyNESS[interpreterFilename]
if interpreterPOSIXy && (!extensionPOSIXyOK || extensionPOSIXy) && (!filenamePOSIXyOK || filenamePOSIXy) {
smell.POSIXy = true
} else if IsAltShellScript(smell) {
smell.AltShellScript = true
}
if (smell.POSIXy || smell.AltShellScript) && config.CRCheck {
fd3, err := os.Open(pth)
defer func() {
err = fd3.Close()
if err != nil {
log.Panic(err)
}
}()
if err != nil {
return smell, err
}
br2 := bufio.NewReader(fd3)
CR := byte('\r')
_, err = br2.ReadString(CR)
smell.ContainsCR = err == nil
}
return smell, nil
}
|
package responses
// DNSZoneResponse represents a DNS zone response.
type DNSZoneResponse struct {
// Name is the domain name of the zone.
Name string
// ID is the zone's ID.
ID string
// InstanceID is the IBM Cloud Resource ID for the service instance where
// the DNS zone is managed.
InstanceID string
// InstanceCRN is the IBM Cloud Resource CRN for the service instance where
// the DNS zone is managed.
InstanceCRN string
// InstanceName is the display name of the service instance where the DNS zone
// is managed.
InstanceName string
// ResourceGroupID is the resource group ID of the service instance.
ResourceGroupID string
}
// EncryptionKeyResponse represents an encryption key response.
type EncryptionKeyResponse struct{}
|
package business
import "testing"
func TestStringStack_Len(t *testing.T) {
stack := newStringStack()
stack.push("1")
stack.push("20")
stack.push("300")
expected := 3
result := stack.len()
if expected != result {
t.Errorf("expected:%d but got instead:%d", expected, result)
}
}
func TestStringStack_Pop(t *testing.T) {
stack := newStringStack()
stack.push("1")
stack.push("20")
expected := "20"
result, err := stack.pop()
if err != nil {
t.Error(err)
}
if expected != result {
t.Errorf("expected:%s but got instead:%s", expected, result)
}
expected = "1"
result, err = stack.pop()
if err != nil {
t.Error(err)
}
if expected != result {
t.Errorf("expected:%s but got instead:%s", expected, result)
}
expected = ""
result, err = stack.pop()
if err == nil {
t.Error("expected error but got nil instead")
}
if expected != result {
t.Errorf("expected:%s but got instead:%s", expected, result)
}
}
func TestStringStack_Top(t *testing.T) {
stack := newStringStack()
stack.push("1")
stack.push("20")
expected := "20"
result := stack.top()
if expected != result {
t.Errorf("expected:%s but got instead:%s", expected, result)
}
stack.pop()
expected = "1"
result = stack.top()
if expected != result {
t.Errorf("expected:%s but got instead:%s", expected, result)
}
}
|
package pubsub
import (
"fmt"
"cloud.google.com/go/pubsub"
"golang.org/x/net/context"
"google.golang.org/api/option"
)
// PubSubInput
type PubSubInput struct {
CredentialsPath string
ProjectID string
}
// validate
func (in PubSubInput) validate() error {
messages := []string{}
if in.CredentialsPath == "" {
messages = append(messages, "CredentialsPath is missing")
}
if in.ProjectID == "" {
messages = append(messages, "ProjectID is missing")
}
if len(messages) > 0 {
errs := ""
for _, msg := range messages {
if errs != "" {
errs += " / "
}
errs += msg
}
return fmt.Errorf("validation error; %s", errs)
}
return nil
}
// Message
type Message struct {
Data []byte
Attributes map[string]string
}
// ClientIF
type ClientIF interface {
Publish(topic string, message *Message) (string, error)
Subscribe(subscriptionName string, messageProccesor func(message *Message) int) error
}
// Client implements ClientIF
type Client struct {
client *pubsub.Client
topics map[string]*pubsub.Topic
}
// New
func New(in PubSubInput) (ClientIF, error) {
err := in.validate()
if err != nil {
return nil, err
}
ctx := context.Background()
client, err := pubsub.NewClient(ctx, in.ProjectID, option.WithCredentialsFile(in.CredentialsPath))
if err != nil {
return nil, fmt.Errorf("Could not create pubsub Client; error: %v", err)
}
return &Client{
client: client,
}, nil
}
// Publish
func (c *Client) Publish(topicName string, message *Message) (string, error) {
topic := c.client.Topic(topicName)
if topic == nil {
return "", fmt.Errorf("Could not get topic; topic: %s", topicName)
}
ctx := context.Background()
result := topic.Publish(ctx, &pubsub.Message{
Data: message.Data,
Attributes: message.Attributes,
})
id, err := result.Get(ctx)
if err != nil {
return "", err
}
return id, nil
}
// Subscribe
func (c *Client) Subscribe(subscriptionName string, messageProcessor func(message *Message) int) error {
sub := c.client.Subscription(subscriptionName)
ctx := context.Background()
err := sub.Receive(ctx, func(ctx context.Context, msg *pubsub.Message) {
status := messageProcessor(&Message{
Data: msg.Data,
Attributes: msg.Attributes,
})
if status != 0 {
msg.Nack()
} else {
msg.Ack()
}
})
if err != nil {
return err
}
return nil
}
|
package core
import "strings"
// Filter is an interface to filter SQL
type Filter interface {
Do(sql string, dialect Dialect, table *Table) string
}
// QuoteFilter filter SQL replace ` to database's own quote character
type QuoteFilter struct {
}
func (s *QuoteFilter) Do(sql string, dialect Dialect, table *Table) string {
return strings.Replace(sql, "`", dialect.QuoteStr(), -1)
}
// IdFilter filter SQL replace (id) to primary key column name
type IdFilter struct {
}
type Quoter struct {
dialect Dialect
}
func NewQuoter(dialect Dialect) *Quoter {
return &Quoter{dialect}
}
func (q *Quoter) Quote(content string) string {
return q.dialect.QuoteStr() + content + q.dialect.QuoteStr()
}
func (i *IdFilter) Do(sql string, dialect Dialect, table *Table) string {
quoter := NewQuoter(dialect)
if table != nil && len(table.PrimaryKeys) == 1 {
sql = strings.Replace(sql, "`(id)`", quoter.Quote(table.PrimaryKeys[0]), -1)
sql = strings.Replace(sql, quoter.Quote("(id)"), quoter.Quote(table.PrimaryKeys[0]), -1)
return strings.Replace(sql, "(id)", quoter.Quote(table.PrimaryKeys[0]), -1)
}
return sql
}
|
package test
import (
. "exchange_websocket/okex_websocket"
"fmt"
"testing"
)
func TestSymbol(t *testing.T) {
okex := NewOkexSymbol()
fmt.Println(okex)
}
|
package repository
import (
"fmt"
"github.com/jmoiron/sqlx"
"github.com/rs/zerolog/log"
"sitemap/models/entity"
"time"
)
func NewSQLdbResumeRepo(Conn *sqlx.DB) *DbResumeRepo {
return &DbResumeRepo{
Conn: Conn,
}
}
type DbResumeRepo struct {
Conn *sqlx.DB
}
func (l *DbResumeRepo)Count() (int, error){
t := time.Now()
query := "SELECT COUNT(1) FROM RESUMES"
var id int
err := l.Conn.Get(&id, query)
log.Debug().Msg(fmt.Sprintf("query: %s , %d ms",query, time.Now().Sub(t).Milliseconds()))
if err != nil {
return 0, err
}
return id, nil
}
func (l *DbResumeRepo)ObjectsForSitemap(page int, limit int)(*[]entity.ResQuery,error){
t := time.Now()
offset:= (page - 1) *limit
query := fmt.Sprintf( "select id, updated_at from resumes limit %d OFFSET %d", limit, offset )
result := &[]entity.ResQuery{}
err := l.Conn.Select(result, query)
log.Debug().Msg(fmt.Sprintf("query: %s , %d ms",query, time.Now().Sub(t).Milliseconds()))
if err != nil {
return nil, err
}
return result, nil
}
|
package handler
import (
"fmt"
"net/http"
"time"
"github.com/chonla/oddsvr-api/httpclient"
"github.com/labstack/echo"
)
func (h *Handler) Gateway(c echo.Context) error {
code := c.QueryParam("code")
token, e := h.strava.ExchangeToken(code)
if e != nil {
return c.String(http.StatusInternalServerError, fmt.Sprint(e))
}
e = h.vr.SaveToken(token)
if e != nil {
return c.String(http.StatusInternalServerError, fmt.Sprint(e))
}
jwtToken, e := h.jwt.Generate(token)
if e != nil {
return c.String(http.StatusInternalServerError, fmt.Sprint(e))
}
jwtAge, _ := time.ParseDuration("168h")
jwtCookie := httpclient.NewCookie("token", jwtToken, jwtAge, "/")
c.SetCookie(jwtCookie)
idAge, _ := time.ParseDuration("168h")
idCookie := httpclient.NewCookie("me", fmt.Sprintf("%d", token.ID), idAge, "/")
c.SetCookie(idCookie)
return c.Redirect(http.StatusTemporaryRedirect, fmt.Sprintf("%s/vr", h.conf.FrontBaseURL))
}
|
package noteset
type Noteset struct {
id string
root int
noteWeights []float64
patternId string
patternNotes []int
}
func New(id string,
root int,
noteWeights []float64,
patternId string,
patternNotes []int) *Noteset {
return &Noteset{
id: id,
root: root,
noteWeights: noteWeights,
patternId: patternId,
patternNotes: patternNotes,
}
}
|
package error
// NewAPIError creates a new application error object with the provided message and errors
func NewAPIError(message string, apiErrorBodyList ...*APIErrorBody) *APIError {
return &APIError{
Message: message,
Body: apiErrorBodyList,
}
}
// APIError implements error
type APIError struct {
Message string
Body []*APIErrorBody
}
func (ae *APIError) Error() string {
return ae.Message
}
// APIErrorBody represents json structure of error in API response
type APIErrorBody struct {
Message string `json:"message,omitempty"`
Target string `json:"target,omitempty"`
}
// NewAPIErrorBody returns a new instance of APIError with the
// message and target.
func NewAPIErrorBody(message string, target string) *APIErrorBody {
return &APIErrorBody{
Message: message,
Target: target,
}
}
|
package main
import (
"fmt"
"log"
"net/http"
"github.com/graphql-go/graphql"
"github.com/graphql-go/handler"
"github.com/svey/skill-tree/gql"
"github.com/svey/skill-tree/postgres"
)
func main() {
// Initialize our api and return a pointer to our router for http.ListenAndServe
// and a pointer to our db to defer its closing when main() is finished
schema := initializeAPI()
h := handler.New(&handler.Config{
Schema: &schema,
Pretty: true,
GraphiQL: true,
})
http.Handle("/graphql", h)
http.ListenAndServe(":8080", nil)
}
func initializeAPI() graphql.Schema {
// Create a new connection to our pg database
db, err := postgres.New(
postgres.ConnString("localhost", 5432, "postgres", "go_graphql_db"),
)
if err != nil {
log.Fatal(err)
}
// Create our root query for graphql
rootQuery := gql.NewRoot(db)
// Create a new graphql schema, passing in the the root query
sc, err := graphql.NewSchema(
graphql.SchemaConfig{Query: rootQuery.Query},
)
if err != nil {
fmt.Println("Error creating schema: ", err)
}
return sc
}
|
package Person
import "Lab1/internal/pgk/model_of_person"
type ForRepository interface {
Get(*model_of_person.PersonRequest) (uint, int)
Read(uint) (*model_of_person.PersonResponse, int)
ReadAll() ([]*model_of_person.PersonResponse, int)
Update(*model_of_person.PersonRequest) int
Delete(uint) int
}
|
package main
import "fmt"
/*
最长子序列,记录长度为l[i]的子序列的最大值的最小值maxV[l[i]]
*/
func main() {
var n int
fmt.Scan(&n)
l := make([]int, n)
for i:=0; i<n; i++ {
fmt.Scan(&l[i])
}
fmt.Println(lis(l))
}
func lis(l[]int) (max int) {
if len(l) == 0 {
return
}
maxV := make([]int, len(l) + 1)
maxV[1] = l[0]
maxV[0] = min(l) - 1 //边界值
lis := make([]int ,len(l))
for i:=0; i<len(lis); i++ {
lis[i] = 1
}
maxL := 1 //maxV的最大值的最小值,默认是1
for i:=0; i<len(l); i++ {
j:=maxL
for ; j>=0; j-- {
if l[i]>maxV[j] {
lis[i] = j+1
break //j为从大到小,找到最大的j需break
}
}
if lis[i] > maxL {
maxL = lis[i]
maxV[lis[i]] = l[i]
} else if maxV[j] < l[i] && l[i] < maxV[j+1] {
maxV[j+1] = l[i]
}
}
max = maxL
return
}
func min(l[]int) (min int) {
min = l[0]
for i := range l {
if min > i {
min = i
}
}
return
}
|
package fondidocit
import (
"context"
"fmt"
"net/http"
"strings"
"github.com/PuerkitoBio/goquery"
"github.com/mmbros/quote/internal/quotegetter"
"github.com/mmbros/quote/internal/quotegetter/scrapers"
)
// scraper gets stock/fund prices from fondidoc.it
type scraper struct {
name string
client *http.Client
}
// NewQuoteGetter creates a new QuoteGetter
// that gets stock/fund prices from fondidoc.it
func NewQuoteGetter(name string, client *http.Client) quotegetter.QuoteGetter {
return scrapers.NewQuoteGetter(&scraper{name, client})
}
// Name returns the name of the scraper
func (s *scraper) Source() string {
return s.name
}
// Client returns the http.Client of the scraper
func (s *scraper) Client() *http.Client {
return s.client
}
// GetSearch creates the http.Request to get the search page for the specified `isin`.
// It returns the http.Response or nil if the scraper can build the url of the info page
// directly from the `isin`.
// The response document will be parsed by ParseSearch to extract the info url.
func (s *scraper) GetSearch(ctx context.Context, isin string) (*http.Request, error) {
url := fmt.Sprintf("https://www.fondidoc.it/Ricerca/Res?txt=%s&tipi=&societa=&pag=0&sort=&sortDir=&fldis=&nview=20&viewMode=anls&filters=&pir=0'", isin)
return http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
}
// ParseSearch parse the html of the search page to find the URL of the info page.
// `doc` can be nil if the url of the info page can be build directly from the `isin`.
// It returns the url of the info page.
func (s *scraper) ParseSearch(doc *goquery.Document, isin string) (string, error) {
/*
<tr>
<td>
<div style="position:relative;">
<button class="btn btn-default btn-xs" data-toggle="dropdown"><i class="glyphicon glyphicon-plus"></i></button>
<ul class="dropdown-menu">
<li><a href="/Confronto/Index/PIMDIEHI">Aggiungi a confronto</a></li>
</ul>
</div>
</td>
<td>
<a fidacode="PIMDIEHI" purl="IE00B4TG9K96_pimco-diversified-income-e-dis-eur-hdg" href="/d/Ana/PIMDIEHI/IE00B4TG9K96_pimco-diversified-income-e-dis-eur-hdg">
PIMCO Diversified Income E Dis EUR Hdg
</a>
</td>
<td>
IE00B4TG9K96
</td>
</tr>
*/
var url string
var found bool
doc.Find("tr").EachWithBreak(func(iTR int, sTR *goquery.Selection) bool {
sTR.Find("td").EachWithBreak(func(i int, s *goquery.Selection) bool {
switch i {
case 1:
url = s.Find("a").AttrOr("href", "")
case 2:
theIsin := strings.TrimSpace(s.Text())
found = (theIsin == isin) && (url != "")
return false
}
return true
})
return !found
})
if !found {
return "", scrapers.ErrNoResultFound
}
return url, nil
}
// GetInfo executes the http GET of the `url` of info page for the specified `isin`.
// `url` and `isin` must be defined.
// The response document will be parsed by ParseInfo to extract the info url.
func (s *scraper) GetInfo(ctx context.Context, isin, url string) (*http.Request, error) {
return http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
}
// ParseInfo is ...
func (s *scraper) ParseInfo(doc *goquery.Document, isin string) (*scrapers.ParseInfoResult, error) {
/*
<div class="page-header">
<a href="/Confronto/Index/PIMDIEHI" style="float:right;margin-top:10px;" class="btn btn-default btn-sm btn-primary" ><i class="glyphicon glyphicon-plus"></i> Confronta</a>
<h1>PIMCO Diversified Income E Dis EUR Hdg <small>IE00B4TG9K96</small></h1>
</div>
div.dett-cont dd
[0] Giornaliero
[1] Euro
[2] 22/09/2020
[3] 11,400
[4] -0,18%
*/
r := new(scrapers.ParseInfoResult)
r.DateLayout = "02/01/2006"
r.IsinStr = doc.Find("div.page-header small").Text()
doc.Find("div.dett-cont dd").EachWithBreak(func(i int, s *goquery.Selection) bool {
switch i {
case 1:
r.CurrencyStr = s.Text()
case 2:
r.DateStr = s.Text()
case 3:
r.PriceStr = s.Text()
return false
}
return true
})
if r.DateStr == "" && r.PriceStr == "" {
return r, scrapers.ErrNoResultFound
}
return r, nil
}
|
package byte_order
import (
"bytes"
"encoding/binary"
)
//func Int64ToBytes(num int64) []byte {
// buf := make([]byte, 8)
// binary.PutVarint(buf, num)
// return buf
//}
//
//func BytesToInt64(bytes []byte) int64 {
// ans, _ := binary.Varint(bytes)
// return ans
//}
//func Int64ToBytes(i int64) []byte {
// var buf = make([]byte, 8)
// binary.BigEndian.PutUint64(buf, uint64(i))
// return buf
//}
//func BytesToInt64(buf []byte) int64 {
// return int64(binary.BigEndian.Uint64(buf))
//}
// 大端字节序
// 1024 --> [0 0 0 0 0 0 4 0] (十六进制: Ox400,二进制: 100 0000 0000)
func Int64ToBytes(i int64) []byte {
b := make([]byte, 8)
v := uint64(i)
b[0] = byte(v >> 56)
b[1] = byte(v >> 48)
b[2] = byte(v >> 40)
b[3] = byte(v >> 32)
b[4] = byte(v >> 24)
b[5] = byte(v >> 16)
b[6] = byte(v >> 8)
b[7] = byte(v)
return b
}
// [0 0 0 0 0 0 4 0] (十六进制: Ox400,二进制: 100 0000 0000) --> 1024
func BytesToInt64(b []byte) int64 {
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
return int64(uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56)
}
func Int64ToBytes2(i int64) []byte {
bytesBuffer := bytes.NewBuffer([]byte{})
binary.Write(bytesBuffer, binary.BigEndian, i)
return bytesBuffer.Bytes()
}
func BytesToInt642(b []byte) int64 {
bytesBuffer := bytes.NewBuffer(b)
var x int64
binary.Read(bytesBuffer, binary.BigEndian, &x)
return x
}
// 小端字节序
func LInt64ToBytes(i int64) []byte {
bytesBuffer := bytes.NewBuffer([]byte{})
binary.Write(bytesBuffer, binary.LittleEndian, i)
return bytesBuffer.Bytes()
}
func LBytesToInt642(b []byte) int64 {
bytesBuffer := bytes.NewBuffer(b)
var x int64
binary.Read(bytesBuffer, binary.LittleEndian, &x)
return x
}
|
package tools
import (
"io/ioutil"
"os"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common/hexutil"
)
// LoadContract will open and decode a contracts
// Application Blockchain Interface and Binary files.
func LoadContract(abiPath, binPath string) (abi.ABI, []byte, error) {
// load ABI
abiFile, err := os.Open(abiPath)
if err != nil {
return abi.ABI{}, nil, err
}
abiObject, err := abi.JSON(abiFile)
if err != nil {
return abiObject, nil, err
}
//load and decode bin
binRaw, err := ioutil.ReadFile(binPath)
if err != nil {
return abiObject, nil, err
}
binData, err := hexutil.Decode("0x" + string(binRaw))
return abiObject, binData, err
}
|
package lmdb
import (
"encoding/binary"
"encoding/hex"
"os"
"github.com/Secured-Finance/dione/blockchain/database"
types2 "github.com/Secured-Finance/dione/blockchain/types"
"github.com/fxamacker/cbor/v2"
"github.com/ledgerwatch/lmdb-go/lmdb"
)
const (
DefaultBlockDataPrefix = "blockdata_"
DefaultBlockHeaderPrefix = "header_"
DefaultMetadataIndexName = "metadata"
LatestBlockHeightKey = "latest_block_height"
)
type Database struct {
dbEnv *lmdb.Env
db lmdb.DBI
metadataIndex *Index
heightIndex *Index
}
func NewDatabase(path string) (*Database, error) {
db := &Database{}
// configure lmdb env
env, err := lmdb.NewEnv()
if err != nil {
return nil, err
}
err = env.SetMaxDBs(1)
if err != nil {
return nil, err
}
err = env.SetMapSize(100 * 1024 * 1024 * 1024) // 100 GB
if err != nil {
return nil, err
}
err = os.MkdirAll(path, 0755)
if err != nil {
return nil, err
}
err = env.Open(path, 0, 0755)
if err != nil {
return nil, err
}
db.dbEnv = env
var dbi lmdb.DBI
err = env.Update(func(txn *lmdb.Txn) error {
dbi, err = txn.OpenDBI("blocks", lmdb.Create)
return err
})
if err != nil {
return nil, err
}
db.db = dbi
// create index instances
metadataIndex := NewIndex(DefaultMetadataIndexName, env, dbi)
heightIndex := NewIndex("height", env, dbi)
db.metadataIndex = metadataIndex
db.heightIndex = heightIndex
return db, nil
}
func (d *Database) StoreBlock(block *types2.Block) error {
err := d.dbEnv.Update(func(txn *lmdb.Txn) error {
data, err := cbor.Marshal(block.Data)
if err != nil {
return err
}
headerData, err := cbor.Marshal(block.Header)
if err != nil {
return err
}
blockHash := hex.EncodeToString(block.Header.Hash)
err = txn.Put(d.db, []byte(DefaultBlockDataPrefix+blockHash), data, 0)
if err != nil {
return err
}
err = txn.Put(d.db, []byte(DefaultBlockHeaderPrefix+blockHash), headerData, 0) // store header separately for easy fetching
return err
})
if err != nil {
return err
}
// update index "height -> block hash"
heightBytes := make([]byte, 8)
binary.LittleEndian.PutUint64(heightBytes, block.Header.Height)
err = d.heightIndex.PutBytes(heightBytes, block.Header.Hash)
if err != nil {
return err
}
return nil
}
func (d *Database) HasBlock(blockHash []byte) (bool, error) {
var blockExists bool
err := d.dbEnv.View(func(txn *lmdb.Txn) error {
h := hex.EncodeToString(blockHash)
_, err := txn.Get(d.db, []byte(DefaultBlockHeaderPrefix+h)) // try to fetch block header
if err != nil {
if lmdb.IsNotFound(err) {
blockExists = false
return nil
}
return err
}
blockExists = true
return nil
})
if err != nil {
return false, err
}
return blockExists, nil
}
func (d *Database) FetchBlockData(blockHash []byte) ([]*types2.Transaction, error) {
var data []*types2.Transaction
err := d.dbEnv.View(func(txn *lmdb.Txn) error {
h := hex.EncodeToString(blockHash)
blockData, err := txn.Get(d.db, []byte(DefaultBlockDataPrefix+h))
if err != nil {
if lmdb.IsNotFound(err) {
return database.ErrBlockNotFound
}
return err
}
err = cbor.Unmarshal(blockData, &data)
return err
})
if err != nil {
return nil, err
}
return data, nil
}
func (d *Database) FetchBlockHeader(blockHash []byte) (*types2.BlockHeader, error) {
var blockHeader types2.BlockHeader
err := d.dbEnv.View(func(txn *lmdb.Txn) error {
h := hex.EncodeToString(blockHash)
data, err := txn.Get(d.db, []byte(DefaultBlockHeaderPrefix+h))
if err != nil {
if lmdb.IsNotFound(err) {
return database.ErrBlockNotFound
}
return err
}
err = cbor.Unmarshal(data, &blockHeader)
return err
})
if err != nil {
return nil, err
}
return &blockHeader, nil
}
func (d *Database) FetchBlock(blockHash []byte) (*types2.Block, error) {
var block types2.Block
header, err := d.FetchBlockHeader(blockHash)
if err != nil {
return nil, err
}
block.Header = header
data, err := d.FetchBlockData(blockHash)
if err != nil {
return nil, err
}
block.Data = data
return &block, nil
}
func (d *Database) FetchBlockByHeight(height uint64) (*types2.Block, error) {
var heightBytes = make([]byte, 8)
binary.LittleEndian.PutUint64(heightBytes, height)
blockHash, err := d.heightIndex.GetBytes(heightBytes)
if err != nil {
if err == ErrIndexKeyNotFound {
return nil, database.ErrBlockNotFound
}
}
block, err := d.FetchBlock(blockHash)
if err != nil {
return nil, err
}
return block, nil
}
func (d *Database) FetchBlockHeaderByHeight(height uint64) (*types2.BlockHeader, error) {
var heightBytes = make([]byte, 8)
binary.LittleEndian.PutUint64(heightBytes, height)
blockHash, err := d.heightIndex.GetBytes(heightBytes)
if err != nil {
if err == ErrIndexKeyNotFound {
return nil, database.ErrBlockNotFound
}
}
blockHeader, err := d.FetchBlockHeader(blockHash)
if err != nil {
return nil, err
}
return blockHeader, nil
}
func (d *Database) GetLatestBlockHeight() (uint64, error) {
height, err := d.metadataIndex.GetUint64([]byte(LatestBlockHeightKey))
if err != nil {
if err == ErrIndexKeyNotFound {
return 0, database.ErrLatestHeightNil
}
return 0, err
}
return height, nil
}
func (d *Database) SetLatestBlockHeight(height uint64) error {
err := d.metadataIndex.PutUint64([]byte(LatestBlockHeightKey), height)
if err != nil {
return err
}
return nil
}
|
/*
Given a string containing digits from 2-9 inclusive, return all possible letter combinations that the number could represent.
A mapping of digit to letters (just like on the telephone buttons) is given below. Note that 1 does not map to any letters.
Note:
Although the above answer is in lexicographical order, your answer could be in any order you want.
*/
package main
import (
"fmt"
"sort"
"strings"
)
func main() {
fmt.Println(lcb("23"))
fmt.Println(lcb("111-5555"))
fmt.Println(len(lcb("111-5555")))
fmt.Println(len(lcb("111-2424")))
fmt.Println(len(lcb(" 123456789 ")))
fmt.Println(len(lcb("abc")))
fmt.Println(lcb(""))
}
func lcb(d string) []string {
p := &phonemap{}
p.init()
p.gen(d, "")
sort.Strings(p.list)
return p.list
}
type phonemap struct {
tab []string
list []string
}
func (p *phonemap) init() {
p.tab = []string{
"abc", "def", "ghi", "jkl", "mno", "pqrs", "tuv", "wxyz",
}
p.list = p.list[:0]
}
func (p *phonemap) gen(d, c string) {
i := strings.IndexAny(d, "23456789")
if i < 0 {
if c != "" {
p.list = append(p.list, c)
}
return
}
l := p.tab[d[i]-'2']
for _, r := range l {
p.gen(d[i+1:], c+string(r))
}
}
|
package appqos
// AppQoS API Calls + Marshalling
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"k8s.io/apimachinery/pkg/api/errors"
"net/http"
"strconv"
)
const (
poolsEndpoint = "/pools"
appsEndpoint = "/apps"
powerProfilesEndpoint = "/power_profiles"
username = "admin"
passwd = "password"
)
// GetPools /pools
func (ac *AppQoSClient) GetPools(address string) ([]Pool, error) {
httpString := fmt.Sprintf("%s%s", address, poolsEndpoint)
req, err := http.NewRequest("GET", httpString, nil)
if err != nil {
return nil, err
}
req.SetBasicAuth(username, passwd)
resp, err := ac.client.Do(req)
if err != nil {
return nil, err
}
receivedJSON, err := ioutil.ReadAll(resp.Body) //This reads raw request body
if err != nil {
return nil, err
}
allPools := make([]Pool, 0)
err = json.Unmarshal([]byte(receivedJSON), &allPools)
if err != nil {
return nil, err
}
resp.Body.Close()
return allPools, nil
}
// GetPool /pools/{id}
func (ac *AppQoSClient) GetPool(address string, id int) (*Pool, error) {
httpString := fmt.Sprintf("%s%s%s%s", address, poolsEndpoint, "/", strconv.Itoa(id))
pool := &Pool{}
req, err := http.NewRequest("GET", httpString, nil)
if err != nil {
return pool, err
}
req.SetBasicAuth(username, passwd)
resp, err := ac.client.Do(req)
if err != nil {
return pool, err
}
receivedJSON, err := ioutil.ReadAll(resp.Body) //This reads raw request body
if err != nil {
return pool, err
}
err = json.Unmarshal([]byte(receivedJSON), pool)
if err != nil {
return pool, err
}
resp.Body.Close()
return pool, nil
}
// PostPools /pools
func (ac *AppQoSClient) PostPool(pool *Pool, address string) (string, error) {
postFailedErr := errors.NewServiceUnavailable("Response status code error")
payloadBytes, err := json.Marshal(pool)
if err != nil {
return "Failed to marshal payload data", err
}
body := bytes.NewReader(payloadBytes)
httpString := fmt.Sprintf("%s%s", address, poolsEndpoint)
req, err := http.NewRequest("POST", httpString, body)
if err != nil {
return "Failed to create new http post request", err
}
req.Header.Set("Content-Type", "application/json")
req.SetBasicAuth(username, passwd)
resp, err := ac.client.Do(req)
if err != nil {
return "Failed to set header for http post request", err
}
buf := new(bytes.Buffer)
buf.ReadFrom(resp.Body)
respStr := buf.String()
if resp.StatusCode != 201 {
errStr := fmt.Sprintf("%s%v", "Fail: ", respStr)
return errStr, postFailedErr
}
defer resp.Body.Close()
successStr := fmt.Sprintf("%s%v", "Success: ", resp.StatusCode)
return successStr, nil
}
// PutPool /pools/{id}
func (ac *AppQoSClient) PutPool(pool *Pool, address string, poolID int) (string, error) {
patchFailedErr := errors.NewServiceUnavailable("Response status code error")
payloadBytes, err := json.Marshal(pool)
if err != nil {
return "Failed to marshal payload data", err
}
body := bytes.NewReader(payloadBytes)
httpString := fmt.Sprintf("%s%s%s%s", address, poolsEndpoint, "/", strconv.Itoa(poolID))
req, err := http.NewRequest("PUT", httpString, body)
if err != nil {
return "Failed to create new http patch request", err
}
req.Header.Set("Content-Type", "application/json")
req.SetBasicAuth(username, passwd)
resp, err := ac.client.Do(req)
if err != nil {
return "Failed to set header for http patch request", err
}
buf := new(bytes.Buffer)
buf.ReadFrom(resp.Body)
respStr := buf.String()
if resp.StatusCode != 200 {
errStr := fmt.Sprintf("%s%v", "Fail: ", respStr)
return errStr, patchFailedErr
}
defer resp.Body.Close()
successStr := fmt.Sprintf("%s%v", "Success: ", resp.StatusCode)
return successStr, nil
}
// DeletePool /pools/{id}
func (ac *AppQoSClient) DeletePool(address string, poolID int) error {
httpString := fmt.Sprintf("%s%s%s%s", address, poolsEndpoint, "/", strconv.Itoa(poolID))
req, err := http.NewRequest("DELETE", httpString, nil)
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
req.SetBasicAuth(username, passwd)
resp, err := ac.client.Do(req)
if err != nil {
return err
}
buf := new(bytes.Buffer)
buf.ReadFrom(resp.Body)
if resp.StatusCode != 200 {
deleteFailedErr := errors.NewServiceUnavailable(buf.String())
return deleteFailedErr
}
defer resp.Body.Close()
return nil
}
// GetPowerProfiles /power_profiles
func (ac *AppQoSClient) GetPowerProfiles(address string) ([]PowerProfile, error) {
httpString := fmt.Sprintf("%s%s", address, powerProfilesEndpoint)
req, err := http.NewRequest("GET", httpString, nil)
if err != nil {
return nil, err
}
req.SetBasicAuth(username, passwd)
resp, err := ac.client.Do(req)
if err != nil {
return nil, err
}
receivedJSON, err := ioutil.ReadAll(resp.Body) //This reads raw request body
if err != nil {
return nil, err
}
allPowerProfiles := make([]PowerProfile, 0)
err = json.Unmarshal([]byte(receivedJSON), &allPowerProfiles)
if err != nil {
return nil, err
}
resp.Body.Close()
return allPowerProfiles, nil
}
// GetPowerProfile /power_profiles
func (ac *AppQoSClient) GetPowerProfile(address string, id int) (*PowerProfile, error) {
httpString := fmt.Sprintf("%s%s%s%s", address, powerProfilesEndpoint, "/", strconv.Itoa(id))
powerProfile := &PowerProfile{}
req, err := http.NewRequest("GET", httpString, nil)
if err != nil {
return powerProfile, err
}
req.SetBasicAuth(username, passwd)
resp, err := ac.client.Do(req)
if err != nil {
return powerProfile, err
}
receivedJSON, err := ioutil.ReadAll(resp.Body) //This reads raw request body
if err != nil {
return powerProfile, err
}
err = json.Unmarshal([]byte(receivedJSON), powerProfile)
if err != nil {
return powerProfile, err
}
resp.Body.Close()
return powerProfile, nil
}
// PostPowerProfile /power_profile
func (ac *AppQoSClient) PostPowerProfile(powerProfile *PowerProfile, address string) (string, error) {
postFailedErr := errors.NewServiceUnavailable("Response status code error")
payloadBytes, err := json.Marshal(powerProfile)
if err != nil {
return "Failed to marshal payload data", err
}
body := bytes.NewReader(payloadBytes)
httpString := fmt.Sprintf("%s%s", address, powerProfilesEndpoint)
req, err := http.NewRequest("POST", httpString, body)
if err != nil {
return "Failed to create new http post request", err
}
req.Header.Set("Content-Type", "application/json")
req.SetBasicAuth(username, passwd)
resp, err := ac.client.Do(req)
if err != nil {
return "Failed to set header for http post request", err
}
buf := new(bytes.Buffer)
buf.ReadFrom(resp.Body)
respStr := buf.String()
if resp.StatusCode != 201 {
errStr := fmt.Sprintf("%s%v", "Fail: ", respStr)
return errStr, postFailedErr
}
defer resp.Body.Close()
successStr := fmt.Sprintf("%s%v", "Success: ", resp.StatusCode)
return successStr, nil
}
// PutPowerProfile /power_profiles/{id}
func (ac *AppQoSClient) PutPowerProfile(powerProfile *PowerProfile, address string, powerProfileID int) (string, error) {
patchFailedErr := errors.NewServiceUnavailable("Response status code error")
payloadBytes, err := json.Marshal(powerProfile)
if err != nil {
return "Failed to marshal payload data", err
}
body := bytes.NewReader(payloadBytes)
httpString := fmt.Sprintf("%s%s%s%s", address, powerProfilesEndpoint, "/", strconv.Itoa(powerProfileID))
req, err := http.NewRequest("PUT", httpString, body)
if err != nil {
return "Failed to create new http patch request", err
}
req.Header.Set("Content-Type", "application/json")
req.SetBasicAuth(username, passwd)
resp, err := ac.client.Do(req)
if err != nil {
return "Failed to set header for http patch request", err
}
buf := new(bytes.Buffer)
buf.ReadFrom(resp.Body)
respStr := buf.String()
if resp.StatusCode != 200 {
errStr := fmt.Sprintf("%s%v", "Fail: ", respStr)
return errStr, patchFailedErr
}
defer resp.Body.Close()
successStr := fmt.Sprintf("%s%v", "Success: ", resp.StatusCode)
return successStr, nil
}
// DeletePowerProfile /power_profiles/{id}
func (ac *AppQoSClient) DeletePowerProfile(address string, powerProfileID int) error {
httpString := fmt.Sprintf("%s%s%s%s", address, powerProfilesEndpoint, "/", strconv.Itoa(powerProfileID))
req, err := http.NewRequest("DELETE", httpString, nil)
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
req.SetBasicAuth(username, passwd)
resp, err := ac.client.Do(req)
if err != nil {
return err
}
buf := new(bytes.Buffer)
buf.ReadFrom(resp.Body)
if resp.StatusCode != 200 {
deleteFailedErr := errors.NewServiceUnavailable(buf.String())
return deleteFailedErr
}
defer resp.Body.Close()
return nil
}
|
package database
import (
"context"
"database/sql"
"fmt"
"time"
"boiler/pkg/entity"
"boiler/pkg/store"
)
// AddEmail insert a new emails in the database
func (s *Database) AddEmail(ctx context.Context, tx *sql.Tx, email *entity.Email) error {
id, err := Insert(ctx, tx,
"INSERT INTO emails (user_id, address, created) VALUES (?, ?, ?)",
email.UserID, email.Address, time.Now(),
)
email.ID = id
return err
}
// DeleteEmail remove an email from the database
func (s *Database) DeleteEmail(ctx context.Context, tx *sql.Tx, emailID int64) error {
return Delete(ctx, tx, "DELETE FROM emails WHERE id = ?", emailID)
}
// DeleteEmailsByUserID remove email from the database
func (s *Database) DeleteEmailsByUserID(ctx context.Context, tx *sql.Tx, userID int64) error {
return Delete(ctx, tx, "DELETE FROM emails WHERE user_id = ?", userID)
}
// FilterEmails find for emails
func (s *Database) FilterEmails(ctx context.Context, filter store.FilterEmails, emails *[]entity.Email) error {
args := []interface{}{filter.UserID}
where := "user_id = ?"
if filter.EmailID > 0 {
where = "id = ?"
args = []interface{}{filter.EmailID}
}
rows, err := Select(ctx, s.sql, scanEmail,
"SELECT id, user_id, address, created FROM emails WHERE "+where,
args...,
)
if err != nil {
return err
}
*emails = make([]entity.Email, 0, len(rows))
for _, row := range rows {
*emails = append(*emails, *row.(*entity.Email))
}
return nil
}
func scanEmail(sc func(dest ...interface{}) error) (interface{}, error) {
var id int64
var userID int64
var address string
var created time.Time
err := sc(&id, &userID, &address, &created)
if err != nil {
return nil, fmt.Errorf("could not scan email; %w", err)
}
return &entity.Email{
ID: id,
UserID: userID,
Address: address,
Created: created,
}, nil
}
|
package main
import (
"context"
"flag"
"fmt"
"net/http"
"os"
"regexp"
"strings"
"time"
"github.com/google/go-github/github"
"github.com/gorilla/mux"
log "github.com/sirupsen/logrus"
"golang.org/x/oauth2"
)
var (
webhookSecretEnvVariable = "RELEASE_BOT_WEBHOOK_SECRET"
githubTokenEnvVariable = "RELEASE_BOT_GITHUB_TOKEN"
debugModeEnvVariable = "RELEASE_BOT_DEBUG"
)
type githubMonitor struct {
ctx context.Context
secret []byte
client *github.Client
}
func (mon *githubMonitor) handleGithubWebhook(w http.ResponseWriter, r *http.Request) {
log.Debugf("%s Recieved webhook", r.RequestURI)
payload, err := github.ValidatePayload(r, mon.secret)
if err != nil {
log.Errorf("%s Failed to validate secret, %v", r.RequestURI, err)
http.Error(w, "Secret did not match", http.StatusUnauthorized)
return
}
event, err := github.ParseWebHook(github.WebHookType(r), payload)
if err != nil {
log.Errorf("%s Failed to parse webhook, %v", r.RequestURI, err)
http.Error(w, "Bad webhook payload", http.StatusBadRequest)
return
}
switch e := event.(type) {
case *github.IssuesEvent:
switch *e.Action {
case "labeled":
go mon.handleLabelEvent(e, r)
case "opened":
go mon.handleIssueOpenedEvent(e, r)
}
}
}
// When a user submits an issue to docker/release-tracking we want that issue to
// automagically have a `triage` label for all open projects.
func (mon *githubMonitor) handleIssueOpenedEvent(e *github.IssuesEvent, r *http.Request) {
ctx, cancel := context.WithTimeout(mon.ctx, 5*time.Minute)
defer cancel()
labels, _, err := mon.client.Issues.ListLabels(ctx, *e.Repo.Owner.Login, *e.Repo.Name, nil)
if err != nil {
log.Errorf("%q", err)
return
}
appliedLabelsStructs, _, err := mon.client.Issues.ListLabelsByIssue(ctx, *e.Repo.Owner.Login, *e.Repo.Name, *e.Issue.Number, nil)
appliedLabels := make(map[string]bool)
if err != nil {
log.Errorf("%q", err)
return
}
for _, labelStruct := range appliedLabelsStructs {
appliedLabels[*labelStruct.Name] = true
}
var labelsToApply []string
for _, label := range labels {
matched, err := regexp.MatchString(".*/triage", *label.Name)
if err != nil {
log.Errorf("%q", err)
return
}
if matched {
projectPrefix, _, err := splitLabel(*label.Name)
if err != nil {
log.Errorf("%q", err)
return
}
// Only apply the label if there's a corresponding open project
if _, err := mon.getProject(projectPrefix, e); err != nil {
continue
}
if appliedLabels[*label.Name] == false {
labelsToApply = append(labelsToApply, *label.Name)
}
}
}
// We have labels to apply
if len(labelsToApply) > 0 {
log.Infof("%v Adding labels %v to issue #%v", r.RequestURI, labelsToApply, *e.Issue.Number)
_, _, err = mon.client.Issues.AddLabelsToIssue(
ctx,
*e.Repo.Owner.Login,
*e.Repo.Name,
*e.Issue.Number,
labelsToApply,
)
if err != nil {
log.Errorf("%q", err)
return
}
}
}
// When a user adds a label matching {projectPrefix}/{action} it should move the
// issue in the corresponding open project to the correct column.
//
// Defined label -> column map:
// * triage -> Triage
// * cherry-pick -> Cherry Pick
// * cherry-picked -> Cherry Picked
//
// NOTE: This should work even if an issue is not in a specified project board
//
// NOTE: This should work even for labels outside of the defined label map
// For example a mapping of label `17.03.1-ee/bleh` should move that issue
// to the bleh column of the open project of 17.03.1-ee-1-rc1 if that column
// exists
func (mon *githubMonitor) handleLabelEvent(e *github.IssuesEvent, r *http.Request) {
ctx, cancel := context.WithTimeout(mon.ctx, 5*time.Minute)
defer cancel()
var columnID, cardID int
var sourceColumn, destColumn github.ProjectColumn
projectPrefix, labelSuffix, err := splitLabel(*e.Label.Name)
if err != nil {
log.Errorf("%q", err)
return
}
project, err := mon.getProject(projectPrefix, e)
if err != nil {
log.Errorf("%q", err)
return
}
columns, _, err := mon.client.Projects.ListProjectColumns(ctx, *project.ID, nil)
if err != nil {
log.Errorf("%q", err)
return
}
columnName := map[string]string{
"triage": "Triage",
"cherry-pick": "Cherry Pick",
"cherry-picked": "Cherry Picked",
}[labelSuffix]
if columnName == "" {
columnName = labelSuffix
}
for _, column := range columns {
// Found our column to move into
if *column.Name == columnName {
destColumn = *column
columnID = *column.ID
}
cards, _, err := mon.client.Projects.ListProjectCards(ctx, *column.ID, nil)
if err != nil {
log.Errorf("%q", err)
return
}
for _, card := range cards {
if *card.ContentURL == *e.Issue.URL {
sourceColumn = *column
cardID = *card.ID
}
}
}
// destination column doesn't exist
if destColumn == (github.ProjectColumn{}) {
log.Infof(
"%s Requested destination column '%v' does not exist for project '%v'",
columnName,
*project.Name,
)
}
// card does not exist
if cardID == 0 {
contentType := "Issue"
if e.Issue.PullRequestLinks != nil {
contentType = "PullRequest"
}
log.Infof(
"%s Creating card for issue #%v in project %v in column '%v'",
r.RequestURI,
*e.Issue.Number,
*project.Name,
*destColumn.Name,
)
_, _, err := mon.client.Projects.CreateProjectCard(
ctx,
columnID,
&github.ProjectCardOptions{
ContentID: *e.Issue.ID,
ContentType: contentType,
},
)
if err != nil {
log.Errorf(
"%s Failed creating card for issue #%v in project %v in column '%v':\n%v",
r.RequestURI,
*e.Issue.Number,
*project.Name,
*destColumn.Name,
err,
)
}
} else {
log.Infof(
"%s Moving issue #%v in project %v from '%v' to '%v'",
r.RequestURI,
*e.Issue.Number,
*project.Name,
*sourceColumn.Name,
*destColumn.Name,
)
_, err = mon.client.Projects.MoveProjectCard(
ctx,
cardID,
&github.ProjectCardMoveOptions{
Position: "top",
ColumnID: columnID,
},
)
if err != nil {
log.Errorf(
"%s Move failed for issue #%v in project %v from '%v' to '%v':\n%v",
r.RequestURI,
*e.Issue.Number,
*project.Name,
*sourceColumn.Name,
*destColumn.Name,
err,
)
}
}
}
func splitLabel(label string) (string, string, error) {
splitResults := strings.Split(label, "/")
if len(splitResults) != 2 {
return "", "", fmt.Errorf("Label does not match pattern {release}/{action}")
}
return splitResults[0], splitResults[1], nil
}
func (mon *githubMonitor) getProject(projectPrefix string, e *github.IssuesEvent) (*github.Project, error) {
ctx, cancel := context.WithTimeout(mon.ctx, 5*time.Minute)
defer cancel()
projects, _, err := mon.client.Repositories.ListProjects(
ctx,
*e.Repo.Owner.Login,
*e.Repo.Name,
&github.ProjectListOptions{State: "open"},
)
if err != nil {
return nil, err
}
for _, project := range projects {
if strings.HasPrefix(*project.Name, projectPrefix) {
return project, nil
}
}
return nil, fmt.Errorf("No project found with prefix %s", projectPrefix)
}
func main() {
debug := flag.Bool("debug", false, "Toggle debug mode")
port := flag.String("port", "8080", "Port to bind release-bot to")
flag.Parse()
ctx := context.Background()
ts := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: os.Getenv(githubTokenEnvVariable)},
)
client := github.NewClient(oauth2.NewClient(ctx, ts))
if *debug || os.Getenv(debugModeEnvVariable) != "" {
log.SetLevel(log.DebugLevel)
log.Debug("Log level set to debug")
}
monitor := githubMonitor{
ctx: ctx,
secret: []byte(os.Getenv(webhookSecretEnvVariable)),
client: client,
}
router := mux.NewRouter()
router.Handle("/{user:.*}/{name:.*}", http.HandlerFunc(monitor.handleGithubWebhook)).Methods("POST")
log.Infof("Starting release-bot on port %s", *port)
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%s", *port), router))
}
|
package service
import (
"github.com/piotrpersona/saga/broker"
)
func NewOrderService(b broker.Broker) *OrderService {
return &OrderService{
Broker: b,
}
}
|
package Grammar
const Number = 0
const LParentheses = 1
const RParentheses = 2
const Plus = 3
const Minus = 4
const Multi = 5
const Divide = 6
const BEGIN = 254
const END = 255
|
package filter
import (
"github.com/comdeng/HapGo/hapgo/app"
)
func Execute(filterName string, WebApp *app) {
}
|
package rest
import (
"encoding/json"
)
type SubscriptionRequest struct {
EventFilters []string `json:"eventFilters"`
DeliveryMode map[string]string `json:"deliveryMode"`
}
func (resp SubscriptionRequest) String() string {
obj, _ := json.MarshalIndent(resp, "", " ")
return string(obj)
}
|
package goroutine
import (
"fmt"
"math"
)
//TestGoroutine5
func TestGoroutine5(n int) {
ch1 := make(chan float64)
for k := 0; k < n; k++ {
go term(ch1, float64(k))
}
sum := 0.0
for k := 0; k < n; k++ {
sum += <-ch1
}
fmt.Println(sum);
}
func term(ch1 chan float64, k float64) {
ch1 <- 4 * ((math.Pow(-1, k)) / (2*k + 1))
}
//TestGoroutine4 提供无限的随机 0 或者 1 的序列
func TestGoroutine4() {
c1 := make(chan int)
go func() {
for {
fmt.Printf("%d\t", <-c1)
}
}()
for {
select {
case c1 <- 0:
case c1 <- 1:
}
}
}
// TestGoroutine3
func TestGoroutine3() {
ch1 := make(chan int)
ch2 := make(chan int)
go pump1(ch1)
go pump2(ch2)
go suck(ch1, ch2)
}
func pump1(ch chan int) {
for i := 0; ; i++ {
ch <- i * 2
}
}
func pump2(ch2 chan int) {
for i := 0; ; i++ {
ch2 <- i + 6
}
}
func suck(ch1, ch2 chan int) {
for {
select {
case v := <-ch1:
fmt.Printf("Receive on ch1:%d\n", v)
case v := <-ch2:
fmt.Printf("Receive on ch2:%d\n", v)
}
}
}
// TestGoroutine
func TestGoroutine() {
ch1 := make(chan string)
go sendData(ch1)
go getData(ch1)
}
func sendData(ch chan string) {
ch <- "shenshuo"
ch <- "suhanyu"
ch <- "surui"
}
func getData(ch chan string) {
var input string
for {
input = <-ch
fmt.Println(input)
}
}
//TestGoroutine2
func TestGoroutine2() {
ch1 := make(chan string)
go func() {
ch1 <- "suhanyu"
}()
go func() {
fmt.Println(<-ch1)
}()
}
|
package model
//Post -
type Post struct {
ID int64
UserID int64
Title string
Body string
}
|
package model
//执行数据迁移
func migration() {
// 自动迁移模式
DB.AutoMigrate(&Route{}, &UpstreamInfo{})
DB.AutoMigrate(&User{})
DB.AutoMigrate(&Group{})
DB.AutoMigrate(&Role{}, &Privilege{})
//InitData()
//SetPrivilege()
}
func InitData() {
route := Route{
Name: "test",
Host: "liya.test.com",
Path: "/",
Upstream: "test_server",
}
upstreaminfo := UpstreamInfo{
Name: "test_server",
UpstreamAddr: "https://www.baidu.com",
Path: "/",
}
privilege := Privilege{
Ptype: "allow",
Host: "liya.test.com",
Path: "/",
Method: "GET",
}
var privle []Privilege
privle = append(privle, privilege)
role := Role{
Name: "admin",
Privileges: privle,
}
var roles []Role
roles = append(roles, role)
group := Group{
GroupName: "admin",
Description: "管理员组",
Roles: roles,
}
var groups []Group
groups = append(groups, group)
user := User{
UserName: "admin",
Nickname: "admin",
Groups: groups,
}
user.SetPassword("test123456")
DB.Create(&route)
DB.Create(&upstreaminfo)
DB.Create(&user)
DB.Commit()
}
|
package object
type Type string
// Object is the internal representation of any type in the doggo language.
type Object interface {
Type() Type
Inspect() string
}
|
package controllers
import (
"librarymanager/reviews/middlewares"
"github.com/gin-gonic/gin"
)
//MapUrls map routes to controller
func MapUrls(router *gin.Engine, reviewsController Reviews, middleware middlewares.Middleware) *gin.RouterGroup {
apiRoutes := router.Group("/api/reviews")
{
apiRoutes.GET("/books/:id", reviewsController.GetFromBook)
apiRoutes.POST("/books/:id", middleware.CheckJWTToken, reviewsController.CreateInBook)
}
return apiRoutes
}
|
package medtronic
import (
"time"
)
const (
CarbRatios Command = 0x8A
)
type Tenths int
type CarbRatio struct {
Start TimeOfDay
CarbRatio Tenths // 10x grams/unit or 100x units/exchange
Units CarbUnitsType
}
type CarbRatioSchedule []CarbRatio
func carbRatioStep(newerPump bool) int {
if newerPump {
return 3
} else {
return 2
}
}
func decodeCarbRatioSchedule(data []byte, units CarbUnitsType, newerPump bool) CarbRatioSchedule {
sched := []CarbRatio{}
step := carbRatioStep(newerPump)
for i := 0; i < len(data); i += step {
start := halfHoursToTimeOfDay(data[i])
if start == 0 && len(sched) != 0 {
break
}
value := Tenths(0)
if newerPump {
value = Tenths(twoByteInt(data[i+1 : i+3]))
} else {
value = Tenths(10 * int(data[i+1]))
}
sched = append(sched, CarbRatio{
Start: start,
CarbRatio: value,
Units: units,
})
}
return sched
}
func (pump *Pump) CarbRatios() CarbRatioSchedule {
// Format of response depends on the pump family.
newer := pump.Family() >= 23
data := pump.Execute(CarbRatios)
if pump.Error() != nil {
return CarbRatioSchedule{}
}
if len(data) < 2 {
pump.BadResponse(CarbRatios, data)
return CarbRatioSchedule{}
}
n := int(data[0]) - 1
step := carbRatioStep(newer)
if n%step != 0 {
pump.BadResponse(CarbRatios, data)
return CarbRatioSchedule{}
}
units := CarbUnitsType(data[1])
return decodeCarbRatioSchedule(data[step:step+n], units, newer)
}
func (s CarbRatioSchedule) CarbRatioAt(t time.Time) CarbRatio {
d := sinceMidnight(t)
last := CarbRatio{}
for _, v := range s {
if v.Start > d {
break
}
last = v
}
return last
}
|
package main
import (
"database/sql"
"fmt"
"log"
_ "github.com/ziutek/mymysql/godrv"
)
const (
DB_HOST = "tcp(127.0.0.1:3306)"
DB_NAME = "urlshortner"
DB_USER = "root"
DB_PASS = ""
)
func OpenDB() *sql.DB {
db, err := sql.Open("mymysql", fmt.Sprintf("%s/%s/%s", DB_NAME, DB_USER, DB_PASS))
if err != nil {
panic(err)
log.Fatal(err)
}
return db
}
func DBInsert(url string) int64 {
db := OpenDB()
defer db.Close()
result, err := db.Exec("INSERT INTO urls (url) VALUES (?)", url)
if err != nil {
fmt.Println("Error: couldn't insert url")
fmt.Println(err)
}
returnValue, err := result.LastInsertId()
if err != nil {
fmt.Println("Couldn't get lastIndex")
}
return returnValue
}
func DBget(id int) string {
db := OpenDB()
defer db.Close()
var link string
err := db.QueryRow("SELECT url FROM urls WHERE id = ?", id).Scan(&link)
if err != nil {
fmt.Println("Couldn't get link from database")
fmt.Println(err)
}
return link
}
|
package main
import (
"github.com/gorilla/websocket"
"log"
)
type client struct {
// このクライアントのためのwebsocket
socket *websocket.Conn
// メッセージをためておく
send chan []byte
room *room
}
// clientがroom.forwardにsocketの保有するメッセージを貯めこむ
func (c *client) read() {
for {
if _, msg, err := c.socket.ReadMessage(); err == nil {
log.Println("receive: ", string(msg), "from: ", *c)
c.room.forward <- msg
} else {
break
}
}
c.socket.Close()
}
// clientがsocketを介し、ブラウザに転送する
func (c *client) write() {
for msg := range c.send {
log.Println("send: ", string(msg), "to: ", *c)
if err := c.socket.WriteMessage(websocket.TextMessage, msg); err != nil {
break
}
}
c.socket.Close()
}
|
package main
import (
"archive/zip"
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
"github.com/otiai10/copy"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
)
type postsStruct struct {
ID string `json:"id"`
Date string `json:"date"`
Text string `json:"-"`
Description string `json:"description"`
}
type files struct {
FileName zip.File
Date string
}
// createZip создаёт из структуры post соответствующий zip архив с содержанием Text и именем ID структуры
func createZip(post postsStruct) {
fileTxt, err := os.Create(post.ID + ".txt")
if err != nil {
log.Println(err)
return
}
// удаляем текстовый после создания
defer os.Remove(fileTxt.Name())
// закрываем текстовый
defer fileTxt.Close()
_, err = fileTxt.WriteString(post.Text)
// fmt.Println(post.Text)
if err != nil {
log.Println(err)
return
}
// создание директории с именами ID
err = os.MkdirAll("./news/" + post.ID, os.ModePerm)
if err != nil {
fmt.Println(err)
return
}
//// создание xml файла c описанием
//filenameJSON := "data.json"
//fileJSON, _ := os.Create("./news/" + post.ID + "/" + filenameJSON)
//jsonWriter := io.Writer(fileJSON)
//enc := json.NewEncoder(jsonWriter).Encode(post)
//
//enc.Indent(" ", " ")
//if err := enc.Encode(post); err != nil {
// fmt.Printf("error: %v\n", err)
// return
//}
fileJSON, _ := json.MarshalIndent(post, "", " ")
filenameJSON := "data.json"
_ = ioutil.WriteFile("./news/" + post.ID + "/" + filenameJSON, fileJSON, 0644)
// создание zip архива с новостью
newZipFile, err := os.Create("./news/" + post.ID + "/data.zip")
if err != nil {
fmt.Println(err)
return
}
defer newZipFile.Close()
zipWriter := zip.NewWriter(newZipFile)
defer zipWriter.Close()
info, err := fileTxt.Stat()
if err != nil {
log.Println(err)
return
}
header, err := zip.FileInfoHeader(info)
if err != nil {
log.Println(err)
return
}
header.Name = fileTxt.Name()
header.Method = zip.Deflate
writer, err := zipWriter.CreateHeader(header)
if err != nil {
log.Println(err)
return
}
writer.Write([]byte(post.Text))
// _, err = io.Copy(writer, fileTxt)
//if err != nil {
// log.Println(err)
// return
//}
}
// функция генерирует из source каталога/файла zip архив с путём target
func zipit(source, target string) error {
zipfile, err := os.Create(target)
if err != nil {
return err
}
defer zipfile.Close()
archive := zip.NewWriter(zipfile)
defer archive.Close()
info, err := os.Stat(source)
if err != nil {
return nil
}
var baseDir string
if info.IsDir() {
baseDir = filepath.Base(source)
}
filepath.Walk(source, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
header, err := zip.FileInfoHeader(info)
if err != nil {
return err
}
if baseDir != "" {
header.Name = filepath.Join(baseDir, strings.TrimPrefix(path, source))
}
if info.IsDir() {
header.Name += "/"
} else {
header.Method = zip.Deflate
}
writer, err := archive.CreateHeader(header)
if err != nil {
return err
}
if info.IsDir() {
return nil
}
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
_, err = io.Copy(writer, file)
return err
})
return err
}
type IDFiles struct {
IDint int
IDstr string
}
// возвращает массив c именами файлов
func GetArrayLastFiles(lastNumPost int) []IDFiles {
files, err := ioutil.ReadDir("./news")
if err != nil {
log.Fatal(err)
}
arrayLastFiles := make([]IDFiles, 0)
for _, file := range files {
num, err := strconv.Atoi(file.Name())
if err != nil {
return []IDFiles{}
}
arrayLastFiles = append(arrayLastFiles, IDFiles{num, file.Name()})
}
sort.Slice(arrayLastFiles, func(i, j int) bool {
return arrayLastFiles[i].IDint < arrayLastFiles[j].IDint
})
return arrayLastFiles[len(arrayLastFiles) -lastNumPost:]
}
func main() {
posts := []postsStruct{
{"1", "25 Jun 21 19:06 MSK", "Статья 1", "Статья о чём-то 1"},
{"2", "26 Jun 21 18:01 MSK", "Статья 2", "Статья о чём-то 2"},
{"3", "27 Jun 21 20:16 MSK", "Статья 3", "Статья о чём-то 3"},
}
for i := 4; i < 20; i++ {
posts = append(posts, postsStruct{fmt.Sprintf("%v", i),
time.Now().Format(time.RFC822), fmt.Sprintf("Статья %v", i), fmt.Sprintf("Статья о чём-то %v", i)})
}
for i := 0; i < len(posts); i++ {
createZip(posts[i])
}
zipName := "./send.zip"
err := zipit("./news", zipName)
if err != nil {
log.Println(err)
return
}
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
// отдаём обычный HTML
fileContents, err := ioutil.ReadFile("index.html")
if err != nil {
log.Println(err)
w.WriteHeader(http.StatusNotFound)
return
}
w.Write(fileContents)
})
http.HandleFunc("/post", func(w http.ResponseWriter, r *http.Request) {
//str := client.Client()
//fmt.Fprintln(w, str)
fmt.Println("request: ", r.URL.Path)
fmt.Println("method request: ", r.Method)
defer r.Body.Close()
// Switch для разных типов запросов
switch r.Method {
// GET для получения данных
case http.MethodGet:
if r.FormValue("NumLastNews") == "" && r.FormValue("ID") == "" && r.FormValue("Hash") == "" && r.FormValue("Archive") == "" {
productsJson, _ := json.Marshal(posts)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write(productsJson)
} else if r.FormValue("Archive") != "" {
// defer os.Remove(zipName)
file, err := os.Open(zipName)
if err != nil {
log.Fatal(err)
}
defer file.Close()
mes, _ := ioutil.ReadAll(file)
w.Header().Set("Content-Type", "application/octet-stream")
w.Write(mes)
} else {
if r.FormValue("NumLastNews") != "" {
lastNumPost, err := strconv.Atoi(r.FormValue("NumLastNews"))
if err != nil {
log.Println(err)
w.WriteHeader(http.StatusBadRequest)
return
}
if lastNumPost < 0 || lastNumPost > len(posts) {
log.Println(errors.New("number of requested posts is not allowed"))
w.WriteHeader(http.StatusBadRequest)
return
}
dirNameLastNews := "./lastnews"
// lastFiles слайс с именами последних n новостей
lastFiles := GetArrayLastFiles(lastNumPost)
for _, value := range lastFiles {
// outputDir := "./newnews/"+file.Name()+"/"
err = copy.Copy("./news/"+value.IDstr+"/", dirNameLastNews + "/" + value.IDstr)
if err != nil {
log.Println(err)
return
}
}
defer os.RemoveAll(dirNameLastNews)
zipNameLastNews := "./lastnews.zip"
err = zipit("./lastnews", zipNameLastNews)
if err != nil {
log.Println(err)
return
}
defer os.Remove(zipNameLastNews)
file, err := os.Open(zipNameLastNews)
if err != nil {
log.Fatal(err)
}
defer file.Close()
mes, _ := ioutil.ReadAll(file)
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Add("Content-News", "lastnews")
w.Write(mes)
//productsJson, _ := json.Marshal(posts[len(posts)-lastNumPost:])
//w.Header().Set("Content-Type", "application/json")
//
//w.WriteHeader(http.StatusOK)
//w.Write(productsJson)
} else if r.FormValue("Hash") != "" {
file, _ := os.Open("./send.zip")
defer file.Close()
bytesReadZIP, err := ioutil.ReadAll(file)
if err != nil {
fmt.Println("Ошибка чтения")
return
}
hash := sha256.New()
hashSum := hash.Sum(bytesReadZIP)
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Add("Content-Hash", "Hash-256")
w.WriteHeader(http.StatusOK)
w.Write(hashSum)
}
}
default:
w.WriteHeader(http.StatusMethodNotAllowed)
}
})
err = http.ListenAndServe(":8081", nil)
if err != nil {
fmt.Println(err)
return
}
}
|
// package main implements plz_diff_graphs, a small utility to take the JSON representation
// of two build graphs (as output from 'plz query graph') and produce a list of targets
// that have changed between the two.
//
// Note that the 'ordering' of the two graphs matters, hence their labels 'before' and 'after';
// the operation is non-commutative because targets that are added appear and those deleted do not.
//
// It also accepts a list of filenames that have changed and invalidates targets appropriately.
package main
import (
"fmt"
"io/ioutil"
"os"
"strings"
"cli"
"tools/please_diff_graphs/diff"
)
var opts = struct {
Usage string
Verbosity int `short:"v" long:"verbosity" description:"Verbosity of output (higher number = more output, default 2 -> notice, warnings and errors only)" default:"2"`
Before string `short:"b" long:"before" required:"true" description:"File containing build graph before changes."`
After string `short:"a" long:"after" required:"true" description:"File containing build graph after changes."`
Include []string `short:"i" long:"include" description:"Label of targets to include."`
Exclude []string `short:"e" long:"exclude" description:"Label of targets to exclude." default:"manual" default:"manual:linux_amd64"`
NoRecurse bool `long:"norecurse" description:"Don't recurse into dependencies of rules to see if they've changed"`
ChangedFiles struct {
Files []string `positional-arg-name:"files" description:"Files that have changed. - to read from stdin."`
} `positional-args:"true"`
}{
Usage: `
please_diff_graphs is a small utility to calculate differences between two Please build graphs.
Its inputs are two JSON graph files (produced using 'plz query graph') and any files that have changed.
It will output a list of all build targets that have changed between the two.
For example:
please_diff_graphs -b before.json -a after.json src/core/my_file.go
> //src/core:my_target
> //src/elsewhere:some_other_target
Note that the 'ordering' of the two graphs matters, hence their labels 'before' and 'after';
the operation is non-commutative because targets that are added appear and those deleted do not.
please_diff_graphs is mostly useful in conjunction with Please in a CI system; you can use it to
formally determine what set of targets have changed in a diff and run the minimal set of affected tests.
`,
}
func readStdin() []string {
stdin, err := ioutil.ReadAll(os.Stdin)
if err != nil {
fmt.Printf("%s\n", err)
os.Exit(1)
}
trimmed := strings.TrimSpace(string(stdin))
if trimmed == "" {
return []string{}
}
ret := strings.Split(trimmed, "\n")
for i, s := range ret {
ret[i] = strings.TrimSpace(s)
}
return ret
}
func main() {
cli.ParseFlagsOrDie("Please graph differ", "9.1.2", &opts)
cli.InitLogging(opts.Verbosity)
before := diff.ParseGraphOrDie(opts.Before)
after := diff.ParseGraphOrDie(opts.After)
if len(opts.ChangedFiles.Files) == 1 && opts.ChangedFiles.Files[0] == "-" {
opts.ChangedFiles.Files = readStdin()
}
for _, label := range diff.Graphs(before, after, opts.ChangedFiles.Files, opts.Include, opts.Exclude, !opts.NoRecurse) {
fmt.Printf("%s\n", label)
}
}
|
package nougat
import (
"fmt"
"net/http"
"reflect"
"testing"
)
func TestDo_onSuccess(t *testing.T) {
const expectedText = "Some text"
const expectedFavoriteCount int64 = 24
client, mux, server := testServer()
defer server.Close()
mux.HandleFunc("/success", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
fmt.Fprintf(w, `{"text": "Some text", "favorite_count": 24}`)
})
Nougat := New().Client(client)
req, _ := http.NewRequest("GET", "http://example.com/success", nil)
model := new(FakeModel)
apiError := new(APIError)
resp, err := Nougat.Do(req, model, apiError)
if err != nil {
t.Errorf("expected nil, got %v", err)
}
if resp.StatusCode != 200 {
t.Errorf("expected %d, got %d", 200, resp.StatusCode)
}
if model.Text != expectedText {
t.Errorf("expected %s, got %s", expectedText, model.Text)
}
if model.FavoriteCount != expectedFavoriteCount {
t.Errorf("expected %d, got %d", expectedFavoriteCount, model.FavoriteCount)
}
}
func TestDo_onSuccessWithNilValue(t *testing.T) {
client, mux, server := testServer()
defer server.Close()
mux.HandleFunc("/success", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
fmt.Fprintf(w, `{"text": "Some text", "favorite_count": 24}`)
})
Nougat := New().Client(client)
req, _ := http.NewRequest("GET", "http://example.com/success", nil)
apiError := new(APIError)
resp, err := Nougat.Do(req, nil, apiError)
if err != nil {
t.Errorf("expected nil, got %v", err)
}
if resp.StatusCode != 200 {
t.Errorf("expected %d, got %d", 200, resp.StatusCode)
}
expected := &APIError{}
if !reflect.DeepEqual(expected, apiError) {
t.Errorf("failureV should not be populated, exepcted %v, got %v", expected, apiError)
}
}
func TestDo_noContent(t *testing.T) {
client, mux, server := testServer()
defer server.Close()
mux.HandleFunc("/nocontent", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(204)
})
Nougat := New().Client(client)
req, _ := http.NewRequest("DELETE", "http://example.com/nocontent", nil)
model := new(FakeModel)
apiError := new(APIError)
resp, err := Nougat.Do(req, model, apiError)
if err != nil {
t.Errorf("expected nil, got %v", err)
}
if resp.StatusCode != 204 {
t.Errorf("expected %d, got %d", 204, resp.StatusCode)
}
expectedModel := &FakeModel{}
if !reflect.DeepEqual(expectedModel, model) {
t.Errorf("successV should not be populated, exepcted %v, got %v", expectedModel, model)
}
expectedAPIError := &APIError{}
if !reflect.DeepEqual(expectedAPIError, apiError) {
t.Errorf("failureV should not be populated, exepcted %v, got %v", expectedAPIError, apiError)
}
}
func TestDo_onFailure(t *testing.T) {
const expectedMessage = "Invalid argument"
const expectedCode int = 215
client, mux, server := testServer()
defer server.Close()
mux.HandleFunc("/failure", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(400)
fmt.Fprintf(w, `{"message": "Invalid argument", "code": 215}`)
})
Nougat := New().Client(client)
req, _ := http.NewRequest("GET", "http://example.com/failure", nil)
model := new(FakeModel)
apiError := new(APIError)
resp, err := Nougat.Do(req, model, apiError)
if err != nil {
t.Errorf("expected nil, got %v", err)
}
if resp.StatusCode != 400 {
t.Errorf("expected %d, got %d", 400, resp.StatusCode)
}
if apiError.Message != expectedMessage {
t.Errorf("expected %s, got %s", expectedMessage, apiError.Message)
}
if apiError.Code != expectedCode {
t.Errorf("expected %d, got %d", expectedCode, apiError.Code)
}
}
func TestDo_onFailureWithNilValue(t *testing.T) {
client, mux, server := testServer()
defer server.Close()
mux.HandleFunc("/failure", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(420)
fmt.Fprintf(w, `{"message": "Enhance your calm", "code": 88}`)
})
Nougat := New().Client(client)
req, _ := http.NewRequest("GET", "http://example.com/failure", nil)
model := new(FakeModel)
resp, err := Nougat.Do(req, model, nil)
if err != nil {
t.Errorf("expected nil, got %v", err)
}
if resp.StatusCode != 420 {
t.Errorf("expected %d, got %d", 420, resp.StatusCode)
}
expected := &FakeModel{}
if !reflect.DeepEqual(expected, model) {
t.Errorf("successV should not be populated, exepcted %v, got %v", expected, model)
}
}
|
package parser
import (
"bufio"
"io"
"strings"
)
// Comment types.
const (
LCommand = "L"
CCommand = "C"
ACommand = "A"
)
// Parser is Hack assembly parser.
type Parser struct {
currentCommand string
s *bufio.Scanner
hasMoreCommand bool
}
// New creates a new Hack assembly parser.
func New(r io.Reader) *Parser {
p := &Parser{
s: bufio.NewScanner(r),
hasMoreCommand: true,
}
return p
}
// Advance reads until next command or eof.
func (p *Parser) Advance(ln *int) error {
for {
if !p.s.Scan() {
p.currentCommand = ""
p.hasMoreCommand = false
return nil
}
l := p.s.Text()
err := p.s.Err()
if err != nil {
return nil
}
*ln++
l = strings.TrimSpace(l)
commentPos := strings.Index(l, "//")
if commentPos != -1 {
l = l[:commentPos]
}
if len(l) == 0 {
continue
}
p.currentCommand = strings.TrimSpace(l)
break
}
return nil
}
// HasMoreCommand ...
func (p *Parser) HasMoreCommand() bool {
return p.hasMoreCommand
}
// CommandType returns the current comment's type.
func (p *Parser) CommandType() string {
if len(p.currentCommand) == 0 {
return ""
}
if strings.HasPrefix(p.currentCommand, "@") {
return ACommand
}
if strings.HasPrefix(p.currentCommand, "(") {
return LCommand
}
return CCommand
}
func (p *Parser) Symbol() string {
if p.CommandType() == ACommand {
return strings.TrimPrefix(p.currentCommand, "@")
}
if p.CommandType() == LCommand {
si := strings.Index(p.currentCommand, "(")
ei := strings.Index(p.currentCommand, ")")
return p.currentCommand[si+1 : ei]
}
return ""
}
func (p *Parser) Dest() string {
if p.CommandType() != CCommand {
return ""
}
i := strings.Index(p.currentCommand, "=")
if i != -1 {
return p.currentCommand[:i]
}
return ""
}
func (p *Parser) Comp() string {
equalPos := strings.Index(p.currentCommand, "=")
semicolonPos := strings.Index(p.currentCommand, ";")
// dest = comp;jump
if equalPos != -1 && semicolonPos != -1 {
return p.currentCommand[equalPos+1 : semicolonPos]
}
// dest = comp
if equalPos != -1 && semicolonPos == -1 {
return p.currentCommand[equalPos+1:]
}
// comp;jump
if equalPos == -1 && semicolonPos != -1 {
return p.currentCommand[0:semicolonPos]
}
return ""
}
func (p *Parser) Jump() string {
if p.CommandType() != CCommand {
return ""
}
semicolonPos := strings.Index(p.currentCommand, ";")
if semicolonPos != -1 {
return p.currentCommand[semicolonPos+1:]
}
return ""
}
|
import "strconv"
func reversal(s string) string {
r := []rune(s)
for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 {
r[i], r[j] = r[j], r[i]
}
return string(r)
}
func reverse(x int) int {
sign := 1
if x < 0 {
sign = -1
}
x *= sign
str := strconv.Itoa(x)
num, _ := strconv.Atoi(reversal(str))
if num > 2147483650 {
return 0
}
return num * sign
}
|
package test
import (
"fmt"
"testing"
"time"
)
func TestGoRoutine(t *testing.T) {
for i := 0; i < 1000; i++ {
go func(i int) {
for {
fmt.Printf("Hello from goroutine %d\n", i)
}
}(i)
}
time.Sleep(time.Minute)
}
|
// Copyright (c) 2019 Chair of Applied Cryptography, Technische Universität
// Darmstadt, Germany. All rights reserved. This file is part of go-perun. Use
// of this source code is governed by a MIT-style license that can be found in
// the LICENSE file.
// +build race
package test
// Race tells whether the -race build tag is set
const Race = true
|
package repository
type repoOption func(options *repoOptions)
// WithName ...
func WithName(name string) repoOption {
return func(options *repoOptions) {
options.name = name
}
}
// WithURL repo url
func WithURL(url string) repoOption {
return func(options *repoOptions) {
options.url = url
}
}
// WithUsername set username if repo need certification
func WithUsername(username string) repoOption {
return func(options *repoOptions) {
options.username = username
}
}
// WithPassword set password if repo need certification
func WithPassword(password string) repoOption {
return func(options *repoOptions) {
options.password = password
}
}
// WithRepoFile set repo file location
func WithRepoFile(repoFile string) repoOption {
return func(options *repoOptions) {
options.repoFile = repoFile
}
}
// WithRepoCache set repo cache file location
func WithRepoCache(repoCache string) repoOption {
return func(options *repoOptions) {
options.repoCache = repoCache
}
}
// WithInsecureSkipTLSverify set insecure skip tls verify
func WithInsecureSkipTLSverify(skip bool) repoOption {
return func(options *repoOptions) {
options.insecureSkipTLSverify = skip
}
}
|
package main
import (
"ddns/client"
"ddns/common"
"flag"
"log"
)
var (
enforcement = flag.Bool("f", false, "强制检查 DNS 解析记录")
moreTips = flag.Bool("mt", false, "显示更多的提示")
version = flag.Bool("version", false, "查看当前版本并检查更新")
initOption = flag.Bool("init", false, "初始化配置文件")
confPath = flag.String("conf_path", "", "手动设置配置文件路径(绝对路径)(有空格用双引号)")
)
func main() {
flag.Parse()
// 加载自定义配置文件路径
if *confPath != "" {
client.ConfPath = *confPath
}
// 初始化配置
if *initOption {
conf := client.ClientConf{}
conf.APIUrl = common.DefaultAPIServer
conf.LatestIP = "0:0:0:0:0:0:0:0"
conf.IsIPv6 = true
err := common.MarshalAndSave(conf, client.ConfPath+"/client.json")
if err != nil {
log.Fatal(err)
}
dpc := client.DNSPodConf{}
err = common.MarshalAndSave(dpc, client.ConfPath+"/dnspod.json")
if err != nil {
log.Fatal(err)
}
ayc := client.AliyunConf{}
err = common.MarshalAndSave(ayc, client.ConfPath+"/aliyun.json")
if err != nil {
log.Fatal(err)
}
cfc := client.CloudflareConf{}
err = common.MarshalAndSave(cfc, client.ConfPath+"/cloudflare.json")
if err != nil {
log.Fatal(err)
}
return
}
// 加载配置
conf := client.ClientConf{}
err := common.LoadAndUnmarshal(client.ConfPath+"/client.json", &conf)
if err != nil {
log.Fatal(err)
}
// 检查版本
if *version {
conf.CheckLatestVersion()
return
}
// 检查启用 ddns
if !conf.Services.DNSPod && !conf.Services.Aliyun && !conf.Services.Cloudflare {
log.Fatal("请打开客户端配置文件 " + client.ConfPath + "/client.json 启用需要使用的服务并重新启动")
}
// 获取 IP
acquiredIP, isIPv6, err := client.GetOwnIP(conf.APIUrl, conf.EnableNetworkCard, conf.NetworkCard)
if err != nil {
log.Fatal(err)
}
switch {
case acquiredIP != conf.LatestIP || *enforcement:
if acquiredIP != conf.LatestIP {
conf.LatestIP = acquiredIP
conf.IsIPv6 = isIPv6
err = common.MarshalAndSave(conf, client.ConfPath+"/client.json")
if err != nil {
log.Fatal(err)
}
}
waitDNSPod := make(chan bool)
waitAliyun := make(chan bool)
waitCloudflare := make(chan bool)
if conf.Services.DNSPod {
go startDNSPod(acquiredIP, waitDNSPod)
}
if conf.Services.Aliyun {
go startAliyun(acquiredIP, waitAliyun)
}
if conf.Services.Cloudflare {
go startCloudflare(acquiredIP, waitCloudflare)
}
if conf.Services.DNSPod {
<-waitDNSPod
}
if conf.Services.Aliyun {
<-waitAliyun
}
if conf.Services.Cloudflare {
<-waitCloudflare
}
case *moreTips:
log.Println("因为获取的 IP 和当前本地记录的 IP 相同,所以跳过检查解析记录\n" +
"若需要强制检查 DNS 解析记录,请添加启动参数 -f")
}
}
func startDNSPod(ipAddr string, done chan bool) {
err := client.DNSPod(ipAddr)
if err != nil {
log.Println(err)
}
done <- true
}
func startAliyun(ipAddr string, done chan bool) {
err := client.Aliyun(ipAddr)
if err != nil {
log.Println(err)
}
done <- true
}
func startCloudflare(ipAddr string, done chan bool) {
err := client.Cloudflare(ipAddr)
if err != nil {
log.Fatal(err)
}
done <- true
}
|
// Copyright (c) 2020 Xiaozhe Yao & AICAMP.CO.,LTD
//
// This software is released under the MIT License.
// https://opensource.org/licenses/MIT
package runtime
import (
"bufio"
"io"
"os/exec"
"reflect"
"sync"
"testing"
"time"
)
func TestNewProcess(t *testing.T) {
type args struct {
command string
envs string
args []string
}
tests := []struct {
name string
args args
want *Process
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := NewProcess(tt.args.command, tt.args.envs, tt.args.args...); !reflect.DeepEqual(got, tt.want) {
t.Errorf("NewProcess() = %v, want %v", got, tt.want)
}
})
}
}
func TestProcess_Start(t *testing.T) {
type fields struct {
proc *exec.Cmd
cancellationSignal chan uint8
done chan error
returnCode chan error
started bool
stdOutRead *io.PipeReader
stdOutWrite *io.PipeWriter
inputWriter *io.PipeWriter
inputStreamSet bool
outputStreamSet bool
completed bool
timeout time.Duration
mutex sync.RWMutex
}
tests := []struct {
name string
fields fields
want *Process
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &Process{
proc: tt.fields.proc,
cancellationSignal: tt.fields.cancellationSignal,
done: tt.fields.done,
returnCode: tt.fields.returnCode,
started: tt.fields.started,
stdOutRead: tt.fields.stdOutRead,
stdOutWrite: tt.fields.stdOutWrite,
inputWriter: tt.fields.inputWriter,
inputStreamSet: tt.fields.inputStreamSet,
outputStreamSet: tt.fields.outputStreamSet,
completed: tt.fields.completed,
timeout: tt.fields.timeout,
mutex: tt.fields.mutex,
}
if got := p.Start(); !reflect.DeepEqual(got, tt.want) {
t.Errorf("Process.Start() = %v, want %v", got, tt.want)
}
})
}
}
func TestProcess_SetTimeout(t *testing.T) {
type fields struct {
proc *exec.Cmd
cancellationSignal chan uint8
done chan error
returnCode chan error
started bool
stdOutRead *io.PipeReader
stdOutWrite *io.PipeWriter
inputWriter *io.PipeWriter
inputStreamSet bool
outputStreamSet bool
completed bool
timeout time.Duration
mutex sync.RWMutex
}
type args struct {
d time.Duration
}
tests := []struct {
name string
fields fields
args args
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &Process{
proc: tt.fields.proc,
cancellationSignal: tt.fields.cancellationSignal,
done: tt.fields.done,
returnCode: tt.fields.returnCode,
started: tt.fields.started,
stdOutRead: tt.fields.stdOutRead,
stdOutWrite: tt.fields.stdOutWrite,
inputWriter: tt.fields.inputWriter,
inputStreamSet: tt.fields.inputStreamSet,
outputStreamSet: tt.fields.outputStreamSet,
completed: tt.fields.completed,
timeout: tt.fields.timeout,
mutex: tt.fields.mutex,
}
p.SetTimeout(tt.args.d)
})
}
}
func TestProcess_Wait(t *testing.T) {
type fields struct {
proc *exec.Cmd
cancellationSignal chan uint8
done chan error
returnCode chan error
started bool
stdOutRead *io.PipeReader
stdOutWrite *io.PipeWriter
inputWriter *io.PipeWriter
inputStreamSet bool
outputStreamSet bool
completed bool
timeout time.Duration
mutex sync.RWMutex
}
tests := []struct {
name string
fields fields
wantErr bool
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &Process{
proc: tt.fields.proc,
cancellationSignal: tt.fields.cancellationSignal,
done: tt.fields.done,
returnCode: tt.fields.returnCode,
started: tt.fields.started,
stdOutRead: tt.fields.stdOutRead,
stdOutWrite: tt.fields.stdOutWrite,
inputWriter: tt.fields.inputWriter,
inputStreamSet: tt.fields.inputStreamSet,
outputStreamSet: tt.fields.outputStreamSet,
completed: tt.fields.completed,
timeout: tt.fields.timeout,
mutex: tt.fields.mutex,
}
if err := p.Wait(); (err != nil) != tt.wantErr {
t.Errorf("Process.Wait() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestProcess_awaitOutput(t *testing.T) {
type fields struct {
proc *exec.Cmd
cancellationSignal chan uint8
done chan error
returnCode chan error
started bool
stdOutRead *io.PipeReader
stdOutWrite *io.PipeWriter
inputWriter *io.PipeWriter
inputStreamSet bool
outputStreamSet bool
completed bool
timeout time.Duration
mutex sync.RWMutex
}
tests := []struct {
name string
fields fields
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &Process{
proc: tt.fields.proc,
cancellationSignal: tt.fields.cancellationSignal,
done: tt.fields.done,
returnCode: tt.fields.returnCode,
started: tt.fields.started,
stdOutRead: tt.fields.stdOutRead,
stdOutWrite: tt.fields.stdOutWrite,
inputWriter: tt.fields.inputWriter,
inputStreamSet: tt.fields.inputStreamSet,
outputStreamSet: tt.fields.outputStreamSet,
completed: tt.fields.completed,
timeout: tt.fields.timeout,
mutex: tt.fields.mutex,
}
p.awaitOutput()
})
}
}
func TestProcess_Kill(t *testing.T) {
type fields struct {
proc *exec.Cmd
cancellationSignal chan uint8
done chan error
returnCode chan error
started bool
stdOutRead *io.PipeReader
stdOutWrite *io.PipeWriter
inputWriter *io.PipeWriter
inputStreamSet bool
outputStreamSet bool
completed bool
timeout time.Duration
mutex sync.RWMutex
}
tests := []struct {
name string
fields fields
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &Process{
proc: tt.fields.proc,
cancellationSignal: tt.fields.cancellationSignal,
done: tt.fields.done,
returnCode: tt.fields.returnCode,
started: tt.fields.started,
stdOutRead: tt.fields.stdOutRead,
stdOutWrite: tt.fields.stdOutWrite,
inputWriter: tt.fields.inputWriter,
inputStreamSet: tt.fields.inputStreamSet,
outputStreamSet: tt.fields.outputStreamSet,
completed: tt.fields.completed,
timeout: tt.fields.timeout,
mutex: tt.fields.mutex,
}
p.Kill()
})
}
}
func TestProcess_OpenInputStream(t *testing.T) {
type fields struct {
proc *exec.Cmd
cancellationSignal chan uint8
done chan error
returnCode chan error
started bool
stdOutRead *io.PipeReader
stdOutWrite *io.PipeWriter
inputWriter *io.PipeWriter
inputStreamSet bool
outputStreamSet bool
completed bool
timeout time.Duration
mutex sync.RWMutex
}
tests := []struct {
name string
fields fields
want io.WriteCloser
wantErr bool
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &Process{
proc: tt.fields.proc,
cancellationSignal: tt.fields.cancellationSignal,
done: tt.fields.done,
returnCode: tt.fields.returnCode,
started: tt.fields.started,
stdOutRead: tt.fields.stdOutRead,
stdOutWrite: tt.fields.stdOutWrite,
inputWriter: tt.fields.inputWriter,
inputStreamSet: tt.fields.inputStreamSet,
outputStreamSet: tt.fields.outputStreamSet,
completed: tt.fields.completed,
timeout: tt.fields.timeout,
mutex: tt.fields.mutex,
}
got, err := p.OpenInputStream()
if (err != nil) != tt.wantErr {
t.Errorf("Process.OpenInputStream() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("Process.OpenInputStream() = %v, want %v", got, tt.want)
}
})
}
}
func TestProcess_StreamOutput(t *testing.T) {
type fields struct {
proc *exec.Cmd
cancellationSignal chan uint8
done chan error
returnCode chan error
started bool
stdOutRead *io.PipeReader
stdOutWrite *io.PipeWriter
inputWriter *io.PipeWriter
inputStreamSet bool
outputStreamSet bool
completed bool
timeout time.Duration
mutex sync.RWMutex
}
tests := []struct {
name string
fields fields
want *bufio.Scanner
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &Process{
proc: tt.fields.proc,
cancellationSignal: tt.fields.cancellationSignal,
done: tt.fields.done,
returnCode: tt.fields.returnCode,
started: tt.fields.started,
stdOutRead: tt.fields.stdOutRead,
stdOutWrite: tt.fields.stdOutWrite,
inputWriter: tt.fields.inputWriter,
inputStreamSet: tt.fields.inputStreamSet,
outputStreamSet: tt.fields.outputStreamSet,
completed: tt.fields.completed,
timeout: tt.fields.timeout,
mutex: tt.fields.mutex,
}
if got := p.StreamOutput(); !reflect.DeepEqual(got, tt.want) {
t.Errorf("Process.StreamOutput() = %v, want %v", got, tt.want)
}
})
}
}
func TestProcess_finishTimeOutOrDie(t *testing.T) {
type fields struct {
proc *exec.Cmd
cancellationSignal chan uint8
done chan error
returnCode chan error
started bool
stdOutRead *io.PipeReader
stdOutWrite *io.PipeWriter
inputWriter *io.PipeWriter
inputStreamSet bool
outputStreamSet bool
completed bool
timeout time.Duration
mutex sync.RWMutex
}
tests := []struct {
name string
fields fields
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &Process{
proc: tt.fields.proc,
cancellationSignal: tt.fields.cancellationSignal,
done: tt.fields.done,
returnCode: tt.fields.returnCode,
started: tt.fields.started,
stdOutRead: tt.fields.stdOutRead,
stdOutWrite: tt.fields.stdOutWrite,
inputWriter: tt.fields.inputWriter,
inputStreamSet: tt.fields.inputStreamSet,
outputStreamSet: tt.fields.outputStreamSet,
completed: tt.fields.completed,
timeout: tt.fields.timeout,
mutex: tt.fields.mutex,
}
p.finishTimeOutOrDie()
})
}
}
func TestProcess_cleanup(t *testing.T) {
type fields struct {
proc *exec.Cmd
cancellationSignal chan uint8
done chan error
returnCode chan error
started bool
stdOutRead *io.PipeReader
stdOutWrite *io.PipeWriter
inputWriter *io.PipeWriter
inputStreamSet bool
outputStreamSet bool
completed bool
timeout time.Duration
mutex sync.RWMutex
}
tests := []struct {
name string
fields fields
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &Process{
proc: tt.fields.proc,
cancellationSignal: tt.fields.cancellationSignal,
done: tt.fields.done,
returnCode: tt.fields.returnCode,
started: tt.fields.started,
stdOutRead: tt.fields.stdOutRead,
stdOutWrite: tt.fields.stdOutWrite,
inputWriter: tt.fields.inputWriter,
inputStreamSet: tt.fields.inputStreamSet,
outputStreamSet: tt.fields.outputStreamSet,
completed: tt.fields.completed,
timeout: tt.fields.timeout,
mutex: tt.fields.mutex,
}
p.cleanup()
})
}
}
|
package prettyms
import (
"fmt"
"math"
"strconv"
"strings"
parsems "github.com/fernandoporazzi/parse-ms"
)
// Result holds an array with values, such as year, days, hours and so on...
type result struct {
Values []string
}
var options Options
func newResult() *result {
return &result{}
}
func pluralize(l string, v float64) string {
if v == 1 {
return l
}
return l + "s"
}
func floorDecimals(value float64, decimalDigits int) string {
secondRoundingEpsilon := 0.0000001
pow10 := math.Pow10(decimalDigits)
v := float64(value)
flooredInterimValue := math.Floor((v * pow10) + secondRoundingEpsilon)
flooredValue := math.Round(flooredInterimValue) / pow10
return fmt.Sprintf("%.[2]*[1]f", flooredValue, decimalDigits)
}
// Append pushes a new slice into the r.Values array
func (r *result) Append(value float64, args ...string) {
long := args[0]
short := args[1]
var valueString string
if len(args) == 3 {
valueString = args[2]
}
if (len(r.Values) == 0 || !options.ColonNotation) && value == 0 && !(options.ColonNotation && short == "m") {
return
}
if value > 0 {
valueString = strconv.FormatFloat(value, 'f', -1, 64)
if options.Compact && options.Verbose {
valueString = fmt.Sprintf("%d", int(value))
}
} else {
valueString = "0"
}
var prefix, suffix string
if options.ColonNotation {
if len(r.Values) > 0 {
prefix = ":"
} else {
prefix = ""
}
suffix = ""
var wholeDigits, minLength int
if strings.Contains(valueString, ".") {
s := strings.Split(valueString, ".")[0]
wholeDigits = len(s)
} else {
wholeDigits = len(valueString)
}
if len(r.Values) > 0 {
minLength = 2
} else {
minLength = 1
}
repeat := int(math.Max(0, float64(minLength-wholeDigits)))
valueString = strings.Repeat("0", repeat) + valueString
} else {
prefix = ""
if options.Verbose {
suffix = " " + pluralize(long, value)
} else {
suffix = short
}
}
r.Values = append(r.Values, prefix+valueString+suffix)
}
// Humanize takes a float64 as milliseconds and
// returns a human readable string
//
// Ex.: 144000000 -> 1d 16h 0m 0s
func Humanize(m float64, o ...Options) string {
r := newResult()
if len(o) == 1 {
options = o[0]
}
if options.ColonNotation {
options.Compact = false
options.FormatSubMilliseconds = false
options.SeparateMilliseconds = false
options.Verbose = false
}
if options.Compact {
options.SecondsDecimalDigits = 0
options.MillisecondsDecimalDigits = 0
}
parsed := parsems.Parse(m)
r.Append(math.Trunc(parsed.Days/365), "year", "y")
r.Append(float64(int(parsed.Days)%365), "day", "d")
r.Append(parsed.Hours, "hour", "h")
r.Append(parsed.Minutes, "minute", "m")
if options.SeparateMilliseconds || options.FormatSubMilliseconds || m < 1000 {
r.Append(parsed.Seconds, "second", "s")
if options.FormatSubMilliseconds {
r.Append(parsed.Milliseconds, "millisecond", "ms")
r.Append(parsed.Microseconds, "microsecond", "µs")
r.Append(parsed.Nanoseconds, "nanosecond", "ns")
} else {
millisecondsAndBelow := parsed.Milliseconds + (parsed.Microseconds / 1000) + (parsed.Nanoseconds / 1000000)
millisecondsDecimalDigits := options.MillisecondsDecimalDigits
var roundedMiliseconds float64
if millisecondsAndBelow >= 1 {
roundedMiliseconds = math.Round(millisecondsAndBelow)
} else {
roundedMiliseconds = math.Ceil(millisecondsAndBelow)
}
var millisecondsString string
if millisecondsDecimalDigits != 0 {
millisecondsString = fmt.Sprintf("%.[2]*[1]f", millisecondsAndBelow, millisecondsDecimalDigits)
} else {
millisecondsString = fmt.Sprintf("%f", roundedMiliseconds)
}
pf, err := strconv.ParseFloat(millisecondsString, 64)
if err != nil {
panic(err)
}
r.Append(pf, "millisecond", "ms", millisecondsString)
}
} else {
seconds := math.Mod(m/1000, 60)
secondsDecimalDigits := 1
if options.WithSecondsDecimalDigits {
secondsDecimalDigits = options.SecondsDecimalDigits
}
secondsFixed := floorDecimals(seconds, secondsDecimalDigits)
f, err := strconv.ParseFloat(secondsFixed, 64)
if err != nil {
panic(err)
}
secondsString := strconv.FormatFloat(f, 'f', -1, 64)
if options.KeepDecimalsOnWholeSeconds {
secondsString = secondsFixed
}
f, err = strconv.ParseFloat(secondsString, 64)
if err != nil {
panic(err)
}
r.Append(f, "second", "s", secondsString)
}
if len(r.Values) == 0 {
if options.Verbose {
return "0 milliseconds"
}
return "0ms"
}
if options.Compact {
return r.Values[0]
}
if options.WithUnitCount {
separator := " "
if options.ColonNotation {
separator = ""
}
max := int(math.Max(float64(options.UnitCount), 1))
slice := r.Values[0:max]
return strings.Join(slice[:], separator)
}
separator := " "
if options.ColonNotation {
separator = ""
}
return strings.Join(r.Values[:], separator)
}
|
package pool
import (
"context"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/mee6aas/kyle/internal/pkg/runtime"
runtimesConnected "github.com/mee6aas/kyle/internal/pkg/var/runtimes/connected"
runtimesPended "github.com/mee6aas/kyle/internal/pkg/var/runtimes/pended"
)
func spawn(ctx context.Context, conf runtime.Config) (e error) {
r, e := runtime.NewRuntime(conf)
if e != nil {
e = errors.Wrap(e, "Failed to create a new runtime")
return
}
defer func() {
if e == nil {
return
}
r.Cancel()
}()
e = r.Start()
if e != nil {
e = errors.Wrap(e, "Failed to start the runtime")
return
}
pid, ok := r.PID()
if !ok {
e = errors.New("Failed to get PID from the runtime")
return
}
onReleased, ok := runtimesPended.Add(r)
if !ok {
e = errors.New("Failed to add the runtime to the collection for the pended runtimes")
return
}
log.WithField("pid", pid).Debug("Runtime pended")
select {
case <-ctx.Done():
e = ctx.Err()
return
case <-onReleased:
if ok := runtimesConnected.Add(r); !ok {
e = errors.New("Failed to add the runtime to the collection for the allocated runtimes")
return
}
log.WithField("pid", pid).Debug("Runtime connected")
return
}
}
|
package gid
import (
"fmt"
"github.com/go-redis/redis"
"ism.com/common/rediscache"
)
type RedisChecker struct {
GidCheckerInterface
}
func (gidChecker *RedisChecker) CheckGID(gid string) bool {
println("RedidChecker ...")
_, err := rediscache.Get(fmt.Sprint("GID:", gid))
if err != nil {
if err == redis.Nil {
rediscache.SetExpire(fmt.Sprint("GID:", gid), gid, 10)
println("return true")
return true
} else {
panic(err.Error())
}
}
println("return false")
return false
}
|
package treecmds
import (
"sync"
"github.com/Nv7-Github/Nv7Haven/eod/base"
"github.com/Nv7-Github/Nv7Haven/eod/types"
"github.com/bwmarrin/discordgo"
)
type TreeCmds struct {
lock *sync.RWMutex
dat map[string]types.ServerData
base *base.Base
dg *discordgo.Session
}
func NewTreeCmds(dat map[string]types.ServerData, dg *discordgo.Session, base *base.Base, lock *sync.RWMutex) *TreeCmds {
return &TreeCmds{
lock: lock,
dat: dat,
base: base,
dg: dg,
}
}
|
package main
import (
"fmt"
"strconv"
"github.com/Cloud-Foundations/Dominator/lib/log"
"github.com/Cloud-Foundations/Dominator/lib/srpc"
proto "github.com/Cloud-Foundations/Dominator/proto/logger"
)
func setDebugLevelSubcommand(args []string, logger log.DebugLogger) error {
level, err := strconv.ParseInt(args[0], 10, 16)
if err != nil {
return fmt.Errorf("error parsing level: %s", err)
}
clients, _, err := dial(false)
if err != nil {
return err
}
if err := setDebugLevel(clients[0], int16(level)); err != nil {
return fmt.Errorf("error setting debug level: %s", err)
}
return nil
}
func setDebugLevel(client *srpc.Client, level int16) error {
request := proto.SetDebugLevelRequest{
Name: *loggerName,
Level: level,
}
var reply proto.SetDebugLevelResponse
return client.RequestReply("Logger.SetDebugLevel", request, &reply)
}
|
// Copyright 2016 Martin Hebnes Pedersen (LA5NTA). All rights reserved.
// Use of this source code is governed by the MIT-license that can be
// found in the LICENSE file.
package cfg
import (
"encoding/json"
"fmt"
"net"
"strconv"
"strings"
"github.com/la5nta/wl2k-go/transport/ardop"
)
const (
PlaceholderMycall = "{mycall}"
)
type AuxAddr struct {
Address string
Password *string
}
func (a AuxAddr) MarshalJSON() ([]byte, error) {
if a.Password == nil {
return json.Marshal(a.Address)
}
return json.Marshal(a.Address + ":" + *a.Password)
}
func (a *AuxAddr) UnmarshalJSON(p []byte) error {
var str string
if err := json.Unmarshal(p, &str); err != nil {
return err
}
parts := strings.SplitN(str, ":", 2)
a.Address = parts[0]
if len(parts) > 1 {
a.Password = &parts[1]
}
return nil
}
type Config struct {
// This station's callsign.
MyCall string `json:"mycall"`
// Secure login password used when a secure login challenge is received.
//
// The user is prompted if this is undefined.
SecureLoginPassword string `json:"secure_login_password"`
// Auxiliary callsigns to fetch email on behalf of.
//
// Passwords can optionally be specified by appending :MYPASS (e.g. EMCOMM-1:MyPassw0rd).
// If no password is specified, the SecureLoginPassword is used.
AuxAddrs []AuxAddr `json:"auxiliary_addresses"`
// Maidenhead grid square (e.g. JP20qe).
Locator string `json:"locator"`
// List of service codes for rmslist (defaults to PUBLIC)
ServiceCodes []string `json:"service_codes"`
// Default HTTP listen address (for web UI).
//
// Use ":8080" to listen on any device, port 8080.
HTTPAddr string `json:"http_addr"`
// Handshake comment lines sent to remote node on incoming connections.
//
// Example: ["QTH: Hagavik, Norway. Operator: Martin", "Rig: FT-897 with Signalink USB"]
MOTD []string `json:"motd"`
// Connect aliases
//
// Example: {"LA1B-10": "ax25:///LD5GU/LA1B-10", "LA1B": "ardop://LA3F?freq=5350"}
// Any occurrence of the substring "{mycall}" will be replaced with user's callsign.
ConnectAliases map[string]string `json:"connect_aliases"`
// Methods to listen for incoming P2P connections by default.
//
// Example: ["ax25", "telnet", "ardop"]
Listen []string `json:"listen"`
// Hamlib rigs available (with reference name) for ptt and frequency control.
HamlibRigs map[string]HamlibConfig `json:"hamlib_rigs"`
AX25 AX25Config `json:"ax25"` // See AX25Config.
AX25Linux AX25LinuxConfig `json:"ax25_linux"` // See AX25LinuxConfig.
AGWPE AGWPEConfig `json:"agwpe"` // See AGWPEConfig.
SerialTNC SerialTNCConfig `json:"serial-tnc"` // See SerialTNCConfig.
Ardop ArdopConfig `json:"ardop"` // See ArdopConfig.
Pactor PactorConfig `json:"pactor"` // See PactorConfig.
Telnet TelnetConfig `json:"telnet"` // See TelnetConfig.
VaraHF VaraConfig `json:"varahf"` // See VaraConfig.
VaraFM VaraConfig `json:"varafm"` // See VaraConfig.
// See GPSdConfig.
GPSd GPSdConfig `json:"gpsd"`
// Legacy support for old config files only. This field is deprecated!
// Please use "Addr" field in GPSd config struct (GPSd.Addr)
GPSdAddrLegacy string `json:"gpsd_addr,omitempty"`
// Command schedule (cron-like syntax).
//
// Examples:
// # Connect to telnet once every hour
// "@hourly": "connect telnet"
//
// # Change ardop listen frequency based on hour of day
// "00 10 * * *": "freq ardop:7350.000", # 40m from 10:00
// "00 18 * * *": "freq ardop:5347.000", # 60m from 18:00
// "00 22 * * *": "freq ardop:3602.000" # 80m from 22:00
Schedule map[string]string `json:"schedule"`
// By default, Pat posts your callsign and running version to the Winlink CMS Web Services
//
// Set to true if you don't want your information sent.
VersionReportingDisabled bool `json:"version_reporting_disabled"`
}
type HamlibConfig struct {
// The network type ("serial" or "tcp"). Use 'tcp' for rigctld.
//
// (For serial support: build with "-tags libhamlib".)
Network string `json:"network,omitempty"`
// The rig address.
//
// For tcp (rigctld): "address:port" (e.g. localhost:4532).
// For serial: "/path/to/tty?model=&baudrate=" (e.g. /dev/ttyS0?model=123&baudrate=4800).
Address string `json:"address,omitempty"`
// The rig's VFO to control ("A" or "B"). If empty, the current active VFO is used.
VFO string `json:"VFO"`
}
type ArdopConfig struct {
// Network address of the Ardop TNC (e.g. localhost:8515).
Addr string `json:"addr"`
// Default/listen ARQ bandwidth (200/500/1000/2000 MAX/FORCED).
ARQBandwidth ardop.Bandwidth `json:"arq_bandwidth"`
// (optional) Reference name to the Hamlib rig to control frequency and ptt.
Rig string `json:"rig"`
// Set to true if hamlib should control PTT (SignaLink=false, most rigexpert=true).
PTTControl bool `json:"ptt_ctrl"`
// (optional) Send ID frame at a regular interval when the listener is active (unit is seconds)
BeaconInterval int `json:"beacon_interval"`
// Send FSK CW ID after an ID frame.
CWID bool `json:"cwid_enabled"`
}
type VaraConfig struct {
// Network host of the VARA modem (defaults to localhost:8300).
Addr string `json:"addr"`
// Default/listen bandwidth (HF: 500/2300/2750 Hz).
Bandwidth int `json:"bandwidth"`
// (optional) Reference name to the Hamlib rig to control frequency and ptt.
Rig string `json:"rig"`
// Set to true if hamlib should control PTT (SignaLink=false, most rigexpert=true).
PTTControl bool `json:"ptt_ctrl"`
}
// UnmarshalJSON implements VaraConfig JSON unmarshalling with support for legacy format.
func (v *VaraConfig) UnmarshalJSON(b []byte) error {
type newFormat VaraConfig
legacy := struct {
newFormat
Host string `json:"host"`
CmdPort int `json:"cmdPort"`
DataPort int `json:"dataPort"`
}{}
if err := json.Unmarshal(b, &legacy); err != nil {
return err
}
if legacy.newFormat.Addr == "" && legacy.Host != "" {
legacy.newFormat.Addr = fmt.Sprintf("%s:%d", legacy.Host, legacy.CmdPort)
}
*v = VaraConfig(legacy.newFormat)
if !v.IsZero() && v.CmdPort() <= 0 {
return fmt.Errorf("invalid addr format")
}
return nil
}
func (v VaraConfig) IsZero() bool { return v == (VaraConfig{}) }
func (v VaraConfig) Host() string {
host, _, _ := net.SplitHostPort(v.Addr)
return host
}
func (v VaraConfig) CmdPort() int {
_, portStr, _ := net.SplitHostPort(v.Addr)
port, _ := strconv.Atoi(portStr)
return port
}
func (v VaraConfig) DataPort() int { return v.CmdPort() + 1 }
type PactorConfig struct {
// Path/port to TNC device (e.g. /dev/ttyUSB0 or COM1).
Path string `json:"path"`
// Baudrate for the serial port (e.g. 57600).
Baudrate int `json:"baudrate"`
// (optional) Reference name to the Hamlib rig for frequency control.
Rig string `json:"rig"`
// (optional) Path to custom TNC initialization script.
InitScript string `json:"custom_init_script"`
}
type TelnetConfig struct {
// Network address (and port) to listen for telnet-p2p connections (e.g. :8774).
ListenAddr string `json:"listen_addr"`
// Telnet-p2p password.
Password string `json:"password"`
}
type SerialTNCConfig struct {
// Serial port (e.g. /dev/ttyUSB0 or COM1).
Path string `json:"path"`
// SerialBaud is the serial port's baudrate (e.g. 57600).
SerialBaud int `json:"serial_baud"`
// HBaud is the the packet connection's baudrate (1200 or 9600).
HBaud int `json:"hbaud"`
// Baudrate of the packet connection.
// Deprecated: Use HBaud instead.
BaudrateLegacy int `json:"baudrate,omitempty"`
// Type of TNC (currently only 'kenwood').
Type string `json:"type"`
// (optional) Reference name to the Hamlib rig for frequency control.
Rig string `json:"rig"`
}
type AGWPEConfig struct {
// The TCP address of the TNC.
Addr string `json:"addr"`
// The AGWPE "radio port" (0-3).
RadioPort int `json:"radio_port"`
}
type AX25Config struct {
// The AX.25 engine to be used.
//
// Valid options are:
// - linux
// - agwpe
// - serial-tnc
Engine AX25Engine `json:"engine"`
// (optional) Reference name to the Hamlib rig for frequency control.
Rig string `json:"rig"`
// DEPRECATED: See AX25Linux.Port.
AXPort string `json:"port,omitempty"`
// Optional beacon when listening for incoming packet-p2p connections.
Beacon BeaconConfig `json:"beacon"`
}
type AX25LinuxConfig struct {
// axport to use (as defined in /etc/ax25/axports). Only applicable to ax25 engine 'linux'.
Port string `json:"port"`
}
type BeaconConfig struct {
// Beacon interval in seconds (e.g. 3600 for once every 1 hour)
Every int `json:"every"` // (seconds)
// Beacon data/message
Message string `json:"message"`
// Beacon destination (e.g. IDENT)
Destination string `json:"destination"`
}
type GPSdConfig struct {
// Enable GPSd proxy for HTTP (web GUI)
//
// Caution: Your GPS position will be accessible to any network device able to access Pat's HTTP interface.
EnableHTTP bool `json:"enable_http"`
// Allow Winlink forms to use GPSd for aquiring your position.
//
// Caution: Your current GPS position will be automatically injected, without your explicit consent, into forms requesting such information.
AllowForms bool `json:"allow_forms"`
// Use server time instead of timestamp provided by GPSd (e.g for older GPS device with week roll-over issue).
UseServerTime bool `json:"use_server_time"`
// Address and port of GPSd server (e.g. localhost:2947).
Addr string `json:"addr"`
}
var DefaultConfig = Config{
MOTD: []string{"Open source Winlink client - getpat.io"},
AuxAddrs: []AuxAddr{},
ServiceCodes: []string{"PUBLIC"},
ConnectAliases: map[string]string{
"telnet": "telnet://{mycall}:CMSTelnet@cms.winlink.org:8772/wl2k",
},
Listen: []string{},
HTTPAddr: "localhost:8080",
AX25: AX25Config{
Engine: DefaultAX25Engine(),
Beacon: BeaconConfig{
Every: 3600,
Message: "Winlink P2P",
Destination: "IDENT",
},
},
AX25Linux: AX25LinuxConfig{
Port: "wl2k",
},
SerialTNC: SerialTNCConfig{
Path: "/dev/ttyUSB0",
SerialBaud: 9600,
HBaud: 1200,
Type: "Kenwood",
},
AGWPE: AGWPEConfig{
Addr: "localhost:8000",
RadioPort: 0,
},
Ardop: ArdopConfig{
Addr: "localhost:8515",
ARQBandwidth: ardop.Bandwidth500Max,
CWID: true,
},
Pactor: PactorConfig{
Path: "/dev/ttyUSB0",
Baudrate: 57600,
},
Telnet: TelnetConfig{
ListenAddr: ":8774",
Password: "",
},
VaraHF: VaraConfig{
Addr: "localhost:8300",
Bandwidth: 2300,
},
VaraFM: VaraConfig{
Addr: "localhost:8300",
},
GPSd: GPSdConfig{
EnableHTTP: false, // Default to false to help protect privacy of unknowing users (see github.com//issues/146)
AllowForms: false, // Default to false to help protect location privacy of unknowing users
UseServerTime: false,
Addr: "localhost:2947", // Default listen address for GPSd
},
GPSdAddrLegacy: "",
Schedule: map[string]string{},
HamlibRigs: map[string]HamlibConfig{},
}
|
package main
import (
"fmt"
"golang.org/x/tour/wc"
"strings"
)
type Vertex struct {
Lat, Lon float64
}
var m map[string]Vertex
func main() {
m = make(map[string]Vertex)
m["anitha"] = Vertex{
45.34534, 56.4435,
}
fmt.Println(m["anitha"])
var mm = map[string]Vertex{
"abc" : Vertex{45.3, 56.7},
"def" : Vertex{22.2, 9.3453456},
}
fmt.Println(mm)
wc.Test(WordCount)
}
func WordCount(s string) map[string]int {
var words = make (map[string]int)
var t = strings.Fields(s)
for i :=0; i<len(t); i++{
words[t[i]]++;
}
return words
}
|
package sheet_logic
import (
"hub/framework"
"hub/sheet_logic/sheet_logic_types"
)
type IntLesser IntComparator
func NewIntLesser(name string) *IntLesser {
tmp := NewIntComparator(
name,
sheet_logic_types.IntLesser,
func(a int64, b int64) bool { return a < b })
return (*IntLesser)(tmp)
}
type FloatLesser FloatComparator
func NewFloatLesser(name string) *FloatLesser {
tmp := NewFloatComparator(
name,
sheet_logic_types.FloatLesser,
framework.FloatLs)
return (*FloatLesser)(tmp)
}
type StringLesser StringComparator
func NewStringLesser(name string) *StringLesser {
tmp := NewStringComparator(
name,
sheet_logic_types.StringLesser,
func(a string, b string) bool { return a < b })
return (*StringLesser)(tmp)
}
|
package blog
import (
"fmt"
"strconv"
"time"
"mingchuan.me/util"
"github.com/jinzhu/gorm"
"mingchuan.me/app/errors"
)
// CreatePost - create a new post
func (blog *BlogService) CreatePost(
title string,
content string,
initialStatus ArticleStatus,
initialPermission ArticlePermission) (Article, *errors.Error) {
var newPost Article
var err error
// check validation
if errV := blog.validateNewPost(title, content, initialStatus); errV != nil {
return newPost, errV
}
// create transaction
finish := false
tx := blog.DB.Begin()
defer func() {
if finish != true {
tx.Rollback()
}
}()
// error checking
if tx.Error != nil {
return newPost, errors.SQLExecutionError(tx.Error)
}
// 1. create post
newPost = Article{
Title: title,
Content: content,
Status: initialStatus,
Permission: initialPermission,
}
if err = tx.Create(&newPost).Error; err != nil {
return newPost, errors.SQLExecutionError(err)
}
// 2. create event log
var articleEvent ArticleEvent
switch initialStatus {
case Published:
articleEvent = PublishPost
default:
articleEvent = SaveDraft
}
newEvent := ArticleEventLog{
ArticleID: newPost.ID,
ArticleEvent: articleEvent,
NewStatus: initialStatus,
NewPermission: initialPermission,
CreatedAt: time.Now(),
}
if err = tx.Create(&newEvent).Error; err != nil {
return newPost, errors.SQLExecutionError(err)
}
if err = tx.Commit().Error; err != nil {
return newPost, errors.SQLExecutionError(err)
}
finish = true
return newPost, nil
}
// UpdatePost - update content of a post
func (blog *BlogService) UpdatePost(ID uint32, payload *ArticleUpdatePayload) (*Article, *errors.Error) {
// TODO
db := blog.DB
var article Article
db = db.Where("id = ?", ID).First(&article)
if db.Error != nil {
return nil, errors.ArticleIDNotFoundError(ID)
}
if payload.Content != nil {
article.Content = *(payload.Content)
}
if payload.Title != nil {
article.Title = *(payload.Title)
}
if payload.Status != nil {
article.Status = *(payload.Status)
}
if payload.Permission != nil {
article.Permission = *(payload.Permission)
}
db = db.Save(&article)
if db.Error != nil {
return nil, errors.SQLExecutionError(db.Error)
}
return &article, nil
}
// DeletePost - delete a post
// Notice, we will not delete a post
func (blog *BlogService) DeletePost(ID uint32) *errors.Error {
// validate if ID exists
var article Article
var err error
// not found
if err = blog.DB.Where("id = ?", ID).First(&article).Error; err != nil {
return errors.ArticleIDNotFoundError(ID)
}
// Delete that post
// and create transaction
finish := false
tx := blog.DB.Begin()
if tx.Error != nil {
return errors.SQLExecutionError(tx.Error)
}
defer func() {
if finish == false {
tx.Rollback()
}
}()
// 1. Update post to DELETED
if err = tx.Model(&article).Update("status", Removed).Error; err != nil {
return errors.SQLExecutionError(tx.Error)
}
// 2. and register event log
newEvent := ArticleEventLog{
ArticleID: article.ID,
ArticleEvent: DeletePost,
NewStatus: Removed,
NewPermission: article.Permission,
CreatedAt: time.Now(),
}
if err = tx.Create(&newEvent).Error; err != nil {
return errors.SQLExecutionError(err)
}
// 3. commit
if err = tx.Commit().Error; err != nil {
return errors.SQLExecutionError(err)
}
finish = true
return nil
}
// PublishPost - publish a drafted post
// NOTICE: only status = DRAFTED could do this operation
func (blog *BlogService) PublishPost(ID uint32) (*Article, *errors.Error) {
// 0. validate if it exists and status = DRAFTED
var article Article
var err error
// not found
if err = blog.DB.Where("id = ?", ID).First(&article).Error; err != nil {
return nil, errors.SQLExecutionError(err)
}
// status != DRAFTED
if article.Status != Drafted {
return nil, errors.NotDraftedPostError()
}
// 1. update status
finish := false
tx := blog.DB.Begin()
if tx.Error != nil {
return nil, errors.SQLExecutionError(tx.Error)
}
defer func() {
if finish == false {
tx.Rollback()
}
}()
if err = tx.Model(&article).Update("status", Published).Error; err != nil {
return nil, errors.SQLExecutionError(err)
}
// 2. and register event log
newEvent := ArticleEventLog{
ArticleID: article.ID,
ArticleEvent: PublishPost,
NewStatus: Published,
NewPermission: article.Permission,
CreatedAt: time.Now(),
}
if err = tx.Create(&newEvent).Error; err != nil {
return nil, errors.SQLExecutionError(err)
}
// 3. commit
if err = tx.Commit().Error; err != nil {
return nil, errors.SQLExecutionError(err)
}
finish = true
return &article, nil
}
// GetOnePost - get one post
func (blog *BlogService) GetOnePost(ID uint32) (Article, *errors.Error) {
var article Article
if err := blog.DB.Find(&article, "id = ?", ID).Error; err != nil {
return article, errors.ArticleIDNotFoundError(ID)
}
return article, nil
}
// GetOnePublicPost - get one post (public to be seen)
func (blog *BlogService) GetOnePublicPost(ID uint32) (*Article, *errors.Error) {
var article Article
db := blog.DB
if err := db.Find(&article, "id = ?", ID).Error; err != nil {
return nil, errors.ArticleIDNotFoundError(ID)
}
// check if article is public & published
if article.Permission != Public || article.Status != Published {
return nil, errors.NoAccessToArticleIDError(ID)
}
return &article, nil
}
// ListAllPostsByPage - list all posts for admin panel, including DRAFTED, PUBLISHED, REMOVED
// This is usually used in admin panel
// returns: page(bool), articles(Array<Article>), error
func (blog *BlogService) ListAllPostsByPage(page *int64, limit *int64) (int64, []Article, *errors.Error) {
// TODO add sorting
var articles []Article
var err error
db := blog.DB
// get actuall limit
var aLimit int64
var aPage int64
var aOffset int64
if limit != nil {
aLimit = *limit
if aLimit > DefaultLimitValue {
aLimit = DefaultLimitValue
}
} else {
aLimit = DefaultLimitValue
}
if page != nil {
aPage = *page
aOffset = (*page)*aLimit - aLimit
} else {
aPage = 1
aOffset = 0
}
pageLimit := &ArticlePageLimit{
Limit: aLimit,
Offset: aOffset,
}
odesc := "desc"
articleOrder := &ArticleOrder{
UpdatedAt: &odesc,
}
articles, err = listPostsWithFilters(db, nil, pageLimit, articleOrder)
if err != nil {
return 0, articles, errors.SQLExecutionError(err)
}
return aPage, articles, nil
}
// ListPublicPostsByCursor - list posts by cursor
// used in blog main page to show all blog posts
func (blog *BlogService) ListPublicPostsByCursor(cursor *string, limit *int64) (bool, string, []Article, *errors.Error) {
db := blog.DB
var articles []Article
// enusre only published & public posts will be shown
filter := &ArticleFilter{
Status: util.String(Published),
Permission: util.String(Public),
}
// get actaul limt
var aLimit int64
if limit != nil {
aLimit = *limit
if aLimit > DefaultLimitValue {
aLimit = DefaultLimitValue
}
} else {
aLimit = DefaultLimitValue
}
// get cursor
if cursor != nil {
db = db.Where("id < ?", *cursor)
}
// order id = desc
db = db.Order("id desc")
// add 1 to calculate if hasMOre is true or not
db = db.Limit(aLimit + 1)
articles, err := listPostsWithFilters(db, filter, nil, nil)
if err != nil {
return false, "", articles, errors.SQLExecutionError(err)
}
var hasMore bool
var fCursor string
hasMore = true
// calculate if hasMore
if len(articles) < int(aLimit+1) {
hasMore = false
}
// get cursor
if hasMore == true {
// remove last one
articles = articles[:len(articles)-1]
fCursor = strconv.Itoa(int(articles[len(articles)-1].ID))
}
return hasMore, fCursor, articles, nil
}
// common functions
/**
Rule #1: len(title) < maxTitleLength
Rule #1.1: len(title) > 0
Rule #2: len(contet) < maxContentLength
Rule #3: state in [Published, ]
*/
func (blog *BlogService) validateNewPost(
title string,
content string,
status ArticleStatus) *errors.Error {
// Rule #1
maxTitleChars := blog.Validations.MaxTitleChars
if uint32(len([]rune(title))) >= maxTitleChars {
return errors.NewPostValidationError(
fmt.Sprintf("The length of title has exceed limit:%d chars", maxTitleChars),
)
}
// Rule #1.1
if len([]rune(title)) == 0 {
return errors.NewPostValidationError(
fmt.Sprintf("Title cannot be null"),
)
}
// Rule #2
maxContentChars := blog.Validations.MaxArticleChars
if uint32(len([]rune(content))) >= maxContentChars {
return errors.NewPostValidationError(
fmt.Sprintf("The length of content has exceed limit:%d chars", maxContentChars),
)
}
// Rule #3
notFound := true
var availStatus = []ArticleStatus{Published, Drafted}
for _, s := range availStatus {
if s == status {
notFound = false
break
}
}
if notFound == true {
return errors.NewPostValidationError(
fmt.Sprintf("Status %s not available", status),
)
}
// all passed
return nil
}
/**
List Posts with filters
We only support two kinds of filters: status fitler and page filter
1) status filter: PUBLISHED | DRAFTED | REMOVED
2) page filter: page, limit
*/
func listPostsWithFilters(
// db - gorm database connection.
// for parsing more conditions, add `Where()` clause on this method
// before passing into the function.
db *gorm.DB,
// filter - article status and permission filter
filter *ArticleFilter,
pageLimit *ArticlePageLimit,
order *ArticleOrder,
) (articles []Article, err error) {
// add status filter
if filter != nil {
if filter.Status != nil {
db = db.Where("status = ?", *(filter.Status))
}
if filter.Permission != nil {
db = db.Where("permission = ?", *(filter.Permission))
}
}
// add page filter
if pageLimit != nil {
db = db.Limit(pageLimit.Limit).Offset(pageLimit.Offset)
}
if order != nil {
if order.CreatedAt != nil {
db = db.Order(fmt.Sprintf("created_at %s", *order.CreatedAt))
}
if order.UpdatedAt != nil {
db = db.Order(fmt.Sprintf("updated_at %s", *order.UpdatedAt))
}
}
// do search
if err = db.Find(&articles).Error; err != nil {
articles = nil
}
return
}
|
package main
import (
"fmt"
"bufio"
"log"
"net"
)
func main() {
listener, error := net.Listen("tcp", ":8080")
if error != nil {
log.Fatalln(error)
}
defer listener.Close()
fmt.Println("Awaiting request...")
for {
connection, error := listener.Accept()
if error != nil {
log.Println(error)
continue
}
go handle(connection)
}
}
func handle(connection net.Conn) {
scanner := bufio.NewScanner(connection)
for scanner.Scan() {
line := scanner.Text()
fmt.Println(line)
}
defer connection.Close()
fmt.Println("Code made it here...")
}
|
package s3urlupload
import (
"errors"
"io"
"net/http"
"strings"
"sync"
"github.com/rlmcpherson/s3gof3r"
)
type Config struct {
AwsAccessKey string
AwsSecretKey string
AwsS3Endpoint string
AwsS3Bucket string
Workers uint
GetFilePath func(string) string
}
func Init(c Config) *S3UrlUpload {
if c.Workers == 0 {
c.Workers = 1
}
if c.GetFilePath == nil {
c.GetFilePath = func(url string) string {
parts := strings.Split(url, "/")
return parts[len(parts)-1]
}
}
s3 := s3gof3r.New(c.AwsS3Endpoint, s3gof3r.Keys{
AccessKey: c.AwsAccessKey,
SecretKey: c.AwsSecretKey,
})
b := s3.Bucket(c.AwsS3Bucket)
return &S3UrlUpload{
config: &c,
bucket: b,
}
}
type download struct {
Body io.ReadCloser
URL string
Name string
Error error
}
type Result struct {
URL string
Error error
}
type S3UrlUpload struct {
config *Config
bucket *s3gof3r.Bucket
}
func (s3uu *S3UrlUpload) Run(files ...string) <-chan Result {
count := len(files)
jobs := make(chan string, count)
results := make(chan Result, count)
workers := int(s3uu.config.Workers)
var wg sync.WaitGroup
wg.Add(workers)
for w := 1; w <= workers; w++ {
go s3uu.worker(jobs, results, &wg)
}
for _, f := range files {
jobs <- f
}
close(jobs)
go func() {
wg.Wait()
close(results)
}()
return results
}
func (s3uu *S3UrlUpload) download(url string) <-chan download {
out := make(chan download)
go func() {
defer close(out)
d := download{
URL: url,
Name: s3uu.config.GetFilePath(url),
}
resp, err := http.Get(url)
if err != nil {
d.Error = err
out <- d
return
}
if resp.Status != "200 OK" {
d.Error = errors.New("Status was not OK")
out <- d
return
}
d.Body = resp.Body
out <- d
}()
return out
}
func (s3uu *S3UrlUpload) upload(in <-chan download) <-chan Result {
var doUpload = func(d download) error {
defer d.Body.Close()
w, err := s3uu.bucket.PutWriter(d.Name, nil, nil)
if err != nil {
return err
}
defer w.Close()
_, err = io.Copy(w, d.Body)
if err != nil {
return err
}
return nil
}
out := make(chan Result)
go func() {
defer close(out)
for d := range in {
result := Result{URL: d.URL}
if d.Error != nil {
result.Error = d.Error
out <- result
continue
}
result.Error = doUpload(d)
out <- result
}
}()
return out
}
func (s3uu *S3UrlUpload) worker(jobs <-chan string, results chan<- Result, wg *sync.WaitGroup) {
for j := range jobs {
result := <-s3uu.upload(s3uu.download(j))
results <- result
}
wg.Done()
}
|
package tx
import (
"testing"
)
//var gateway *EtherGateway
func makeGateway() *EtherGateway {
return NewEtherGateway()
}
func Test_loadConfigs(t *testing.T) {
makeGateway()
}
func Test_NetworkType(t *testing.T) {
g := makeGateway()
if networkID := g.GetCurrentNetworkType(); networkID == "" {
t.Errorf("Empty network id!!")
}
}
func Test_GasPrice(t *testing.T) {
g := makeGateway()
if gasPrice := g.GetCurrentGasPrice(); gasPrice == 0 {
t.Errorf("Invalid GasPrice")
}
}
func Test_TxEther(t *testing.T) {
g := makeGateway()
//g.txEther(0.004, "0x7aaf4fcB8AB215f719E6DaBb7f0a192b7024dD21")
}
|
package tal
import (
"github.com/cpusoft/goutil/belogs"
"github.com/cpusoft/goutil/ginserver"
"github.com/cpusoft/goutil/jsonutil"
"github.com/gin-gonic/gin"
model "rpstir2-model"
)
//
func GetTals(c *gin.Context) {
belogs.Info("GetTals")
talModels, err := getTals()
if err != nil {
belogs.Error("GetTals(): getTals fail:", err)
ginserver.ResponseFail(c, err, "")
return
}
belogs.Debug("GetTals(): getTals, talModels:", jsonutil.MarshalJson(talModels))
talModelsResponse := model.TalModelsResponse{TalModels: talModels}
ginserver.ResponseOk(c, talModelsResponse)
}
|
// _通道(Channels)_ 是连接多个 Go 协程的管道。你可以从一个 Go 协程
// 将值发送到通道,然后在别的 Go 协程中接收。
package main
import "fmt"
func main() {
// 使用 `make(chan val-type)` 创建一个新的通道。
// 通道类型就是他们需要传递值的类型。
messages := make(chan string)
// 使用 `channel <-` 语法 _发送(send)_ 一个新的值到通道中。这里
// 我们在一个新的 Go 协程中发送 `"ping"` 到上面创建的
// `messages` 通道中。
go func() { messages <- "ping" }()
// 使用 `<-channel` 语法从通道中 _接收(receives)_ 一个值。这里
// 将接收我们在上面发送的 `"ping"` 消息并打印出来。
msg := <-messages
//默认发送和接收操作是阻塞的,直到发送方和接收方都准备完毕。
// 这个特性允许我们,不使用任何其它的同步操作,来在程序结尾等待 消息 "ping"。
fmt.Println(msg)
}
|
package request
type LevelPrice struct {
RateCardID int `json:"rate_card_id"`
Prices []struct {
LevelID int `json:"level_id"`
Price float64 `json:"price"`
} `json:"prices"`
}
|
package cgroups
import (
"bufio"
"fmt"
"os"
"path/filepath"
"strings"
rspec "github.com/opencontainers/runtime-spec/specs-go"
)
var (
// AbsCgroupPath is absolute path for container's cgroup mount
AbsCgroupPath = "/cgrouptest"
// RelCgroupPath is relative path for container's cgroup mount
RelCgroupPath = "testdir/cgrouptest/container"
)
// Cgroup represents interfaces for cgroup validation
type Cgroup interface {
GetBlockIOData(pid int, cgPath string) (*rspec.LinuxBlockIO, error)
GetCPUData(pid int, cgPath string) (*rspec.LinuxCPU, error)
GetDevicesData(pid int, cgPath string) ([]rspec.LinuxDeviceCgroup, error)
GetHugepageLimitData(pid int, cgPath string) ([]rspec.LinuxHugepageLimit, error)
GetMemoryData(pid int, cgPath string) (*rspec.LinuxMemory, error)
GetNetworkData(pid int, cgPath string) (*rspec.LinuxNetwork, error)
GetPidsData(pid int, cgPath string) (*rspec.LinuxPids, error)
}
// FindCgroup gets cgroup root mountpoint
func FindCgroup() (Cgroup, error) {
f, err := os.Open("/proc/self/mountinfo")
if err != nil {
return nil, err
}
defer f.Close()
cgroupv2 := false
scanner := bufio.NewScanner(f)
for scanner.Scan() {
text := scanner.Text()
fields := strings.Split(text, " ")
// Safe as mountinfo encodes mountpoints with spaces as \040.
index := strings.Index(text, " - ")
postSeparatorFields := strings.Split(text[index+3:], " ")
numPostFields := len(postSeparatorFields)
// This is an error as we can't detect if the mount is for "cgroup"
if numPostFields == 0 {
return nil, fmt.Errorf("Found no fields post '-' in %q", text)
}
if postSeparatorFields[0] == "cgroup" {
// No need to parse the rest of the postSeparatorFields
cg := &CgroupV1{
MountPath: filepath.Dir(fields[4]),
}
return cg, nil
} else if postSeparatorFields[0] == "cgroup2" {
cgroupv2 = true
continue
//TODO cgroupv2 unimplemented
}
}
if err := scanner.Err(); err != nil {
return nil, err
}
if cgroupv2 {
return nil, fmt.Errorf("cgroupv2 is not supported yet")
}
return nil, fmt.Errorf("cgroup is not found")
}
// GetSubsystemPath gets path of subsystem
func GetSubsystemPath(pid int, subsystem string) (string, error) {
contents, err := os.ReadFile(fmt.Sprintf("/proc/%d/cgroup", pid))
if err != nil {
return "", err
}
parts := strings.Split(strings.TrimSpace(string(contents)), "\n")
for _, part := range parts {
elem := strings.SplitN(part, ":", 3)
if len(elem) < 3 {
continue
}
subelems := strings.Split(elem[1], ",")
for _, subelem := range subelems {
if subelem == subsystem {
return elem[2], nil
}
}
}
return "", fmt.Errorf("subsystem %s not found", subsystem)
}
|
package report
import (
"github.com/gin-gonic/gin"
"github.com/naggie/dsnet"
)
var conf *dsnet.DsnetConfig
// Routes sets up endpoints for peers.
func Routes(router *gin.RouterGroup, dsConf *dsnet.DsnetConfig) {
conf = dsConf
router.GET("", handleGetReport)
}
func handleGetReport(c *gin.Context) {
newReport := getReport()
c.JSON(200, newReport)
}
|
package main
import (
"fmt"
"sort"
)
func main() {
capital, keyboards, usbs, price, j := 0, 0, 0, 0, 0
var kBrands, uBrands []int
fmt.Scanf("%d", &capital)
fmt.Scanf("%d", &keyboards)
fmt.Scanf("%d", &usbs)
for i := 0; i < keyboards; i++ {
fmt.Scanf("%d", &price)
kBrands = append(kBrands, price)
}
for i := 0; i < usbs; i++ {
fmt.Scanf("%d", &price)
uBrands = append(uBrands, price)
}
sort.Ints(uBrands)
sort.Sort(sort.Reverse(sort.IntSlice(kBrands)))
max := -1
for i := 0; i < keyboards; i++ {
for ; j < usbs; j++ {
if kBrands[i]+uBrands[j] > capital {
break
} else if kBrands[i] + uBrands[j] > max {
max = kBrands[i] + uBrands[j]
}
}
}
fmt.Println(max)
}
|
// Copyright 2020 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sqlite3
import (
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/matrix-org/dendrite/keyserver/storage/shared"
)
func NewDatabase(dataSourceName string) (*shared.Database, error) {
var err error
cs, err := sqlutil.ParseFileURI(dataSourceName)
if err != nil {
return nil, err
}
db, err := sqlutil.Open(sqlutil.SQLiteDriverName(), cs, nil)
if err != nil {
return nil, err
}
otk, err := NewSqliteOneTimeKeysTable(db)
if err != nil {
return nil, err
}
dk, err := NewSqliteDeviceKeysTable(db)
if err != nil {
return nil, err
}
kc, err := NewSqliteKeyChangesTable(db)
if err != nil {
return nil, err
}
return &shared.Database{
DB: db,
OneTimeKeysTable: otk,
DeviceKeysTable: dk,
KeyChangesTable: kc,
}, nil
}
|
package command
import (
"jabrok.com/global"
"jabrok.com/service"
"log"
)
func GetCommand() {
var cmdstring string
if len(global.GetArgs()) > 0 {
cmdstring = global.GetArgs()[0]
cmd, ok := commandMap()[cmdstring]
if !ok {
log.Fatal("No command")
}
cmd()
}
if cmdstring == "" {
var listOfCommands string
for k := range commandMap() {
listOfCommands += (k + "\n")
}
log.Println("No command found")
log.Print("Commands: " + listOfCommands)
}
}
func commandMap() map[string]func() {
return map[string]func(){
"start": commandStart,
}
}
func commandStart() {
go service.Start()
global.IsServerStarted = true
}
|
package testcontainers
import (
"context"
"fmt"
"net/http"
"testing"
"time"
"database/sql"
// Import mysql into the scope of this package (required)
_ "github.com/go-sql-driver/mysql"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
"github.com/docker/go-connections/nat"
"github.com/testcontainers/testcontainers-go/wait"
)
func TestContainerRemoving(t *testing.T) {
ctx := context.Background()
client, err := client.NewEnvClient()
if err != nil {
t.Fatal(err)
}
client.NegotiateAPIVersion(ctx)
creationName := fmt.Sprintf("%s_%d", "test_container", time.Now().Unix())
nginxA, err := GenericContainer(ctx, GenericContainerRequest{
ContainerRequest: ContainerRequest{
Image: "nginx",
ExposedPorts: []string{
"80/tcp",
},
SkipReaper: true,
Name: creationName,
},
Started: true,
})
if err != nil {
t.Fatal(err)
}
dockerProvider, err := ProviderDocker.GetProvider()
if err != nil {
t.Fatal(err)
}
containerExists, err := dockerProvider.ContainerExists(ctx, creationName)
if err != nil {
t.Fatal(err)
}
if !containerExists {
t.Fatalf("Container '%s' should exist", creationName)
}
err = nginxA.Remove(ctx, true)
if err != nil {
t.Fatal(err)
}
containerExists, err = dockerProvider.ContainerExists(ctx, creationName)
if err != nil {
t.Fatal(err)
}
if containerExists {
t.Fatalf("Container '%s' should nod exist", creationName)
}
}
// also slightly checks IsRunning() method (that uses State() method inside)
func TestContainerStopping(t *testing.T) {
ctx := context.Background()
client, err := client.NewEnvClient()
if err != nil {
t.Fatal(err)
}
client.NegotiateAPIVersion(ctx)
nginxA, err := GenericContainer(ctx, GenericContainerRequest{
ContainerRequest: ContainerRequest{
Image: "nginx",
ExposedPorts: []string{
"80/tcp",
},
SkipReaper: true,
},
Started: true,
})
if err != nil {
t.Fatal(err)
}
isRunning, err := nginxA.IsRunning(ctx)
if err != nil {
t.Fatal(err)
}
if !isRunning {
t.Fatal("The container should be in running state")
}
err = nginxA.Stop(ctx)
if err != nil {
t.Fatal(err)
}
isRunning, err = nginxA.IsRunning(ctx)
if err != nil {
t.Fatal(err)
}
if isRunning {
t.Fatal("The container should not be in running state")
}
}
func TestContainerReturnItsContainerID(t *testing.T) {
ctx := context.Background()
nginxA, err := GenericContainer(ctx, GenericContainerRequest{
ContainerRequest: ContainerRequest{
Image: "nginx",
ExposedPorts: []string{
"80/tcp",
},
},
})
if err != nil {
t.Fatal(err)
}
defer nginxA.Terminate(ctx)
if nginxA.GetContainerID() == "" {
t.Errorf("expected a containerID but we got an empty string.")
}
}
func TestContainerStartsWithoutTheReaper(t *testing.T) {
t.Skip("need to use the sessionID")
ctx := context.Background()
client, err := client.NewEnvClient()
if err != nil {
t.Fatal(err)
}
client.NegotiateAPIVersion(ctx)
_, err = GenericContainer(ctx, GenericContainerRequest{
ContainerRequest: ContainerRequest{
Image: "nginx",
ExposedPorts: []string{
"80/tcp",
},
SkipReaper: true,
},
Started: true,
})
if err != nil {
t.Fatal(err)
}
filtersJSON := fmt.Sprintf(`{"label":{"%s":true}}`, TestcontainerLabelIsReaper)
f, err := filters.FromJSON(filtersJSON)
if err != nil {
t.Fatal(err)
}
resp, err := client.ContainerList(ctx, types.ContainerListOptions{
Filters: f,
})
if err != nil {
t.Fatal(err)
}
if len(resp) != 0 {
t.Fatal("expected zero reaper running.")
}
}
func TestContainerStartsWithTheReaper(t *testing.T) {
ctx := context.Background()
client, err := client.NewEnvClient()
if err != nil {
t.Fatal(err)
}
client.NegotiateAPIVersion(ctx)
_, err = GenericContainer(ctx, GenericContainerRequest{
ContainerRequest: ContainerRequest{
Image: "nginx",
ExposedPorts: []string{
"80/tcp",
},
},
Started: true,
})
if err != nil {
t.Fatal(err)
}
filtersJSON := fmt.Sprintf(`{"label":{"%s":true}}`, TestcontainerLabelIsReaper)
f, err := filters.FromJSON(filtersJSON)
if err != nil {
t.Fatal(err)
}
resp, err := client.ContainerList(ctx, types.ContainerListOptions{
Filters: f,
})
if err != nil {
t.Fatal(err)
}
if len(resp) == 0 {
t.Fatal("expected at least one reaper to be running.")
}
}
func TestContainerTerminationWithReaper(t *testing.T) {
ctx := context.Background()
client, err := client.NewEnvClient()
if err != nil {
t.Fatal(err)
}
client.NegotiateAPIVersion(ctx)
nginxA, err := GenericContainer(ctx, GenericContainerRequest{
ContainerRequest: ContainerRequest{
Image: "nginx",
ExposedPorts: []string{
"80/tcp",
},
},
Started: true,
})
if err != nil {
t.Fatal(err)
}
containerID := nginxA.GetContainerID()
resp, err := client.ContainerInspect(ctx, containerID)
if err != nil {
t.Fatal(err)
}
if resp.State.Running != true {
t.Fatal("The container shoud be in running state")
}
err = nginxA.Terminate(ctx)
if err != nil {
t.Fatal(err)
}
_, err = client.ContainerInspect(ctx, containerID)
if err == nil {
t.Fatal("expected error from container inspect.")
}
}
func TestContainerTerminationWithoutReaper(t *testing.T) {
ctx := context.Background()
client, err := client.NewEnvClient()
if err != nil {
t.Fatal(err)
}
client.NegotiateAPIVersion(ctx)
nginxA, err := GenericContainer(ctx, GenericContainerRequest{
ContainerRequest: ContainerRequest{
Image: "nginx",
ExposedPorts: []string{
"80/tcp",
},
SkipReaper: true,
},
Started: true,
})
if err != nil {
t.Fatal(err)
}
containerID := nginxA.GetContainerID()
resp, err := client.ContainerInspect(ctx, containerID)
if err != nil {
t.Fatal(err)
}
if resp.State.Running != true {
t.Fatal("The container shoud be in running state")
}
err = nginxA.Terminate(ctx)
if err != nil {
t.Fatal(err)
}
_, err = client.ContainerInspect(ctx, containerID)
if err == nil {
t.Fatal("expected error from container inspect.")
}
}
func TestTwoContainersExposingTheSamePort(t *testing.T) {
ctx := context.Background()
nginxA, err := GenericContainer(ctx, GenericContainerRequest{
ContainerRequest: ContainerRequest{
Image: "nginx",
ExposedPorts: []string{
"80/tcp",
},
},
Started: true,
})
if err != nil {
t.Fatal(err)
}
defer func() {
err := nginxA.Terminate(ctx)
if err != nil {
t.Fatal(err)
}
}()
nginxB, err := GenericContainer(ctx, GenericContainerRequest{
ContainerRequest: ContainerRequest{
Image: "nginx",
ExposedPorts: []string{
"80/tcp",
},
},
Started: true,
})
if err != nil {
t.Fatal(err)
}
defer func() {
err := nginxB.Terminate(ctx)
if err != nil {
t.Fatal(err)
}
}()
ipA, err := nginxA.Host(ctx)
if err != nil {
t.Fatal(err)
}
portA, err := nginxA.MappedPort(ctx, "80/tcp")
if err != nil {
t.Fatal(err)
}
resp, err := http.Get(fmt.Sprintf("http://%s:%s", ipA, portA.Port()))
if err != nil {
t.Fatal(err)
}
if resp.StatusCode != http.StatusOK {
t.Errorf("Expected status code %d. Got %d.", http.StatusOK, resp.StatusCode)
}
ipB, err := nginxB.Host(ctx)
if err != nil {
t.Fatal(err)
}
portB, err := nginxB.MappedPort(ctx, "80")
if err != nil {
t.Fatal(err)
}
resp, err = http.Get(fmt.Sprintf("http://%s:%s", ipB, portB.Port()))
if err != nil {
t.Fatal(err)
}
if resp.StatusCode != http.StatusOK {
t.Errorf("Expected status code %d. Got %d.", http.StatusOK, resp.StatusCode)
}
}
func TestContainerCreation(t *testing.T) {
ctx := context.Background()
nginxPort := "80/tcp"
expectedImageName := "nginx"
nginxC, err := GenericContainer(ctx, GenericContainerRequest{
ContainerRequest: ContainerRequest{
Image: expectedImageName,
ExposedPorts: []string{
nginxPort,
},
},
Started: true,
})
if err != nil {
t.Fatal(err)
}
defer func() {
err := nginxC.Terminate(ctx)
if err != nil {
t.Fatal(err)
}
}()
ip, err := nginxC.Host(ctx)
if err != nil {
t.Fatal(err)
}
port, err := nginxC.MappedPort(ctx, "80")
if err != nil {
t.Fatal(err)
}
resp, err := http.Get(fmt.Sprintf("http://%s:%s", ip, port.Port()))
if err != nil {
t.Fatal(err)
}
if resp.StatusCode != http.StatusOK {
t.Errorf("Expected status code %d. Got %d.", http.StatusOK, resp.StatusCode)
}
}
func TestContainerCreationWithName(t *testing.T) {
ctx := context.Background()
creationName := fmt.Sprintf("%s_%d", "test_container", time.Now().Unix())
expectedName := "/" + creationName // inspect adds '/' in the beginning
nginxPort := "80/tcp"
nginxC, err := GenericContainer(ctx, GenericContainerRequest{
ContainerRequest: ContainerRequest{
Image: "nginx",
ExposedPorts: []string{
nginxPort,
},
Name: creationName,
},
Started: true,
})
if err != nil {
t.Fatal(err)
}
defer func() {
err := nginxC.Terminate(ctx)
if err != nil {
t.Fatal(err)
}
}()
name, err := nginxC.Name(ctx)
if err != nil {
t.Fatal(err)
}
if name != expectedName {
t.Errorf("Expected container name '%s'. Got '%s'.", expectedName, name)
}
ip, err := nginxC.Host(ctx)
if err != nil {
t.Fatal(err)
}
port, err := nginxC.MappedPort(ctx, "80")
if err != nil {
t.Fatal(err)
}
resp, err := http.Get(fmt.Sprintf("http://%s:%s", ip, port.Port()))
if err != nil {
t.Fatal(err)
}
if resp.StatusCode != http.StatusOK {
t.Errorf("Expected status code %d. Got %d.", http.StatusOK, resp.StatusCode)
}
}
func TestContainerCreationAndWaitForListeningPortLongEnough(t *testing.T) {
t.Skip("Wait needs to be fixed")
ctx := context.Background()
nginxPort := "80/tcp"
// delayed-nginx will wait 2s before opening port
nginxC, err := GenericContainer(ctx, GenericContainerRequest{
ContainerRequest: ContainerRequest{
Image: "menedev/delayed-nginx:1.15.2",
ExposedPorts: []string{
nginxPort,
},
WaitingFor: wait.ForListeningPort("80"), // default startupTimeout is 60s
},
Started: true,
})
if err != nil {
t.Fatal(err)
}
defer func() {
err := nginxC.Terminate(ctx)
if err != nil {
t.Fatal(err)
}
}()
origin, err := nginxC.PortEndpoint(ctx, nat.Port(nginxPort), "http")
if err != nil {
t.Fatal(err)
}
resp, err := http.Get(origin)
if err != nil {
t.Fatal(err)
}
if resp.StatusCode != http.StatusOK {
t.Errorf("Expected status code %d. Got %d.", http.StatusOK, resp.StatusCode)
}
}
func TestContainerCreationTimesOut(t *testing.T) {
t.Skip("Wait needs to be fixed")
ctx := context.Background()
// delayed-nginx will wait 2s before opening port
nginxC, err := GenericContainer(ctx, GenericContainerRequest{
ContainerRequest: ContainerRequest{
Image: "menedev/delayed-nginx:1.15.2",
ExposedPorts: []string{
"80/tcp",
},
WaitingFor: wait.ForListeningPort("80").WithStartupTimeout(1 * time.Second),
},
Started: true,
})
if err == nil {
t.Error("Expected timeout")
err := nginxC.Terminate(ctx)
if err != nil {
t.Fatal(err)
}
}
}
func TestContainerRespondsWithHttp200ForIndex(t *testing.T) {
t.Skip("Wait needs to be fixed")
ctx := context.Background()
nginxPort := "80/tcp"
// delayed-nginx will wait 2s before opening port
nginxC, err := GenericContainer(ctx, GenericContainerRequest{
ContainerRequest: ContainerRequest{
Image: "nginx",
ExposedPorts: []string{
nginxPort,
},
WaitingFor: wait.ForHTTP("/"),
},
Started: true,
})
if err != nil {
t.Fatal(err)
}
defer func() {
err := nginxC.Terminate(ctx)
if err != nil {
t.Fatal(err)
}
}()
origin, err := nginxC.PortEndpoint(ctx, nat.Port(nginxPort), "http")
if err != nil {
t.Fatal(err)
}
resp, err := http.Get(origin)
if err != nil {
t.Error(err)
}
if resp.StatusCode != http.StatusOK {
t.Errorf("Expected status code %d. Got %d.", http.StatusOK, resp.StatusCode)
}
}
func TestContainerRespondsWithHttp404ForNonExistingPage(t *testing.T) {
t.Skip("Wait needs to be fixed")
ctx := context.Background()
nginxPort := "80/tcp"
// delayed-nginx will wait 2s before opening port
nginxC, err := GenericContainer(ctx, GenericContainerRequest{
ContainerRequest: ContainerRequest{
Image: "nginx",
ExposedPorts: []string{
nginxPort,
},
WaitingFor: wait.ForHTTP("/nonExistingPage").WithStatusCodeMatcher(func(status int) bool {
return status == http.StatusNotFound
}),
},
Started: true,
})
if err != nil {
t.Fatal(err)
}
rC, err := RunContainer(ctx, "nginx", RequestContainer{
ExportedPort: []string{
nginxPort,
},
WaitingFor: wait.ForHTTP("/nonExistingPage").WithStatusCodeMatcher(func(status int) bool {
return status == http.StatusNotFound
}),
})
if rC != nil {
t.Fatal(rC)
}
if err != nil {
t.Fatal(err)
}
defer func() {
err := nginxC.Terminate(ctx)
if err != nil {
t.Fatal(err)
}
}()
origin, err := nginxC.PortEndpoint(ctx, nat.Port(nginxPort), "http")
if err != nil {
t.Fatal(err)
}
resp, err := http.Get(origin + "/nonExistingPage")
if err != nil {
t.Error(err)
}
if resp.StatusCode != http.StatusNotFound {
t.Errorf("Expected status code %d. Got %d.", http.StatusNotFound, resp.StatusCode)
}
}
func TestContainerCreationTimesOutWithHttp(t *testing.T) {
t.Skip("Wait needs to be fixed")
ctx := context.Background()
// delayed-nginx will wait 2s before opening port
nginxC, err := GenericContainer(ctx, GenericContainerRequest{
ContainerRequest: ContainerRequest{
Image: "menedev/delayed-nginx:1.15.2",
ExposedPorts: []string{
"80/tcp",
},
WaitingFor: wait.ForHTTP("/").WithStartupTimeout(1 * time.Second),
},
Started: true,
})
defer func() {
err := nginxC.Terminate(ctx)
if err != nil {
t.Fatal(err)
}
}()
if err == nil {
t.Error("Expected timeout")
}
}
func TestContainerCreationWaitsForLogContextTimeout(t *testing.T) {
ctx := context.Background()
req := ContainerRequest{
Image: "mysql:latest",
ExposedPorts: []string{"3306/tcp", "33060/tcp"},
Env: map[string]string{
"MYSQL_ROOT_PASSWORD": "password",
"MYSQL_DATABASE": "database",
},
WaitingFor: wait.ForLog("test context timeout").WithStartupTimeout(1 * time.Second),
}
_, err := GenericContainer(ctx, GenericContainerRequest{
ContainerRequest: req,
Started: true,
})
if err == nil {
t.Error("Expected timeout")
}
}
func TestContainerCreationWaitsForLog(t *testing.T) {
ctx := context.Background()
req := ContainerRequest{
Image: "mysql:latest",
ExposedPorts: []string{"3306/tcp", "33060/tcp"},
Env: map[string]string{
"MYSQL_ROOT_PASSWORD": "password",
"MYSQL_DATABASE": "database",
},
WaitingFor: wait.ForLog("port: 3306 MySQL Community Server - GPL"),
}
mysqlC, _ := GenericContainer(ctx, GenericContainerRequest{
ContainerRequest: req,
Started: true,
})
defer func() {
t.Log("terminating container")
err := mysqlC.Terminate(ctx)
if err != nil {
t.Fatal(err)
}
}()
host, _ := mysqlC.Host(ctx)
p, _ := mysqlC.MappedPort(ctx, "3306/tcp")
port := p.Int()
connectionString := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?tls=skip-verify",
"root", "password", host, port, "database")
db, err := sql.Open("mysql", connectionString)
defer db.Close()
if err = db.Ping(); err != nil {
t.Errorf("error pinging db: %+v\n", err)
}
_, err = db.Exec("CREATE TABLE IF NOT EXISTS a_table ( \n" +
" `col_1` VARCHAR(128) NOT NULL, \n" +
" `col_2` VARCHAR(128) NOT NULL, \n" +
" PRIMARY KEY (`col_1`, `col_2`) \n" +
")")
if err != nil {
t.Errorf("error creating table: %+v\n", err)
}
}
|
package main
import (
"github.com/lxn/walk"
"sort"
)
type Condom struct {
Machineid string //客户端唯一识别码
IP string
Name string
Whoami string
Remark string
Terrace string
Time string
checked bool
}
type CondomModel struct {
walk.TableModelBase
walk.SorterBase
sortColumn int
sortOrder walk.SortOrder
items []*Condom
}
func (m *CondomModel) RowCount() int {
return len(m.items)
}
func (m *CondomModel) Value(row, col int) interface{} {
item := m.items[row]
switch col {
case 0:
return item.Terrace
case 1:
return item.IP
case 2:
return item.Remark
case 3:
return item.Whoami
case 4:
return item.Name
case 5:
return item.Time
case 6:
return item.Machineid
}
panic("unexpected col")
}
func (m *CondomModel) Checked(row int) bool {
return m.items[row].checked
}
func (m *CondomModel) SetChecked(row int, checked bool) error {
m.items[row].checked = checked
return nil
}
func (m *CondomModel) Sort(col int, order walk.SortOrder) error {
m.sortColumn, m.sortOrder = col, order
sort.Stable(m)
return m.SorterBase.Sort(col, order)
}
func (m *CondomModel) Len() int {
return len(m.items)
}
func (m *CondomModel) Less(i, j int) bool {
a, b := m.items[i], m.items[j]
c := func(ls bool) bool {
if m.sortOrder == walk.SortAscending {
return ls
}
return !ls
}
switch m.sortColumn {
case 0:
return c(a.Terrace < b.Terrace)
case 1:
return c(a.IP < b.IP)
case 2:
return c(a.Remark < b.Remark)
case 3:
return c(a.Whoami < b.Whoami)
case 4:
return c(a.Name < b.Name)
case 5:
return c(a.Time < b.Time)
case 6:
return c(a.Machineid < b.Machineid)
}
panic("unreachable")
}
func (m *CondomModel) Swap(i, j int) {
m.items[i], m.items[j] = m.items[j], m.items[i]
}
func NewCondomModel() *CondomModel {
m := new(CondomModel)
return m
}
|
package src
import (
"net/url"
)
// Mkdir will make specified folder on Yandex Disk
func (c *Client) Mkdir(remotePath string) (int, string, error) {
values := url.Values{}
values.Add("path", remotePath) // only one current folder will be created. Not all the folders in the path.
urlPath := "/v1/disk/resources?" + values.Encode()
fullURL := RootAddr
if urlPath[:1] != "/" {
fullURL += "/" + urlPath
} else {
fullURL += urlPath
}
return c.PerformMkdir(fullURL)
}
|
package main
import "math"
const (
controlPI = 0
controlPID = 1
)
type Tuner struct {
input, output, outputStart, NoiseBand, OStep, lastTime, refVal, absMin, absMax, kp, ki, kd, ku, pu float64
ControlType, lookbackSec, nLookback, sampleTime int
running bool
}
func (t *Tuner) Cancel() {
t.running = false
}
func (t *Tuner) Finish() {
t.output = t.outputStart
_ = 4 * (2 * t.OStep)/((t.absMax - t.absMin) * math.Pi)
}
func (t *Tuner) GetKp() float64 {
if t.ControlType == controlPID {
return 0.6 * t.ku
}
return 0.4 * t.ku
}
func (t *Tuner) GetKi() float64 {
if t.ControlType == controlPID {
return 1.2 * t.ku / t.pu
}
return 0.48 * t.ku / t.pu
}
func (t *Tuner) GetKd() float64 {
if t.ControlType == controlPID {
return 0.075 * t.ku * t.pu
}
return 0
}
func (t *Tuner) SetLookbackSec(value int) {
if value < 1 {
value = 1
}
if value < 25 {
t.nLookback = 4 * value
t.sampleTime = 250
} else {
t.nLookback = 100
t.sampleTime = 10 * value
}
}
func (t *Tuner) GetLoockBackSec() int {
return t.nLookback * t.sampleTime / 1000
}
|
package dto
type MeditationExerciseStarted struct {
ExerciseStarted
}
|
// Copyright 2016 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"flag"
"log"
"os"
"github.com/google/slothfs/gitiles"
"github.com/google/slothfs/populate"
)
func main() {
gitilesOptions := gitiles.DefineFlags()
branch := flag.String("branch", "master", "Specify branch of the manifest repository to use.")
repo := flag.String("repo", "platform/manifest", "Set repository name holding manifest file.")
flag.Parse()
service, err := gitiles.NewService(*gitilesOptions)
if err != nil {
log.Fatalf("NewService: %v", err)
}
mf, err := populate.FetchManifest(service, *repo, *branch)
if err != nil {
log.Fatalf("FetchManifest: %v", err)
}
mf.Filter()
if err := populate.DerefManifest(service, mf); err != nil {
log.Fatalf("DerefManifest: %v", err)
}
xml, err := mf.MarshalXML()
if err != nil {
log.Fatalf("MarshalXML: %v", err)
}
os.Stdout.Write(xml)
}
|
package main
import ("log"
// "net"
// "github.com/grpc-go-course/hello/hellopb"
"../grpcpb"
"google.golang.org/grpc"
"context"
)
func main() {
opts := grpc.WithInsecure()
cc, err := grpc.Dial("localhost:50051", opts)
if err != nil {
log.Fatal(err)
}
defer cc.Close()
grpcClient := grpcpb.NewSatuationGreetClient(cc)
req := &grpcpb.Request{Name: "pabby ji"}
resp, err := grpcClient.GreetSal(context.Background(), req)
if err != nil {
log.Fatal("Error recieving the response")
}
log.Println("ddrrreeecccttt from grpc response",resp.Salutaion)
}
|
package cliutil
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"fmt"
"os"
"github.com/koinos/koinos-proto-golang/koinos/protocol"
util "github.com/koinos/koinos-util-golang"
"github.com/minio/sio"
)
const (
// Version number (this should probably not live here)
Version = "v2.0.0"
)
// Hardcoded Koin contract constants
const (
KoinSymbol = "KOIN"
ManaSymbol = "mana"
KoinPrecision = 8
KoinContractID = "15DJN4a8SgrbGhhGksSBASiSYjGnMU8dGL"
KoinBalanceOfEntry = uint32(0x5c721497)
KoinTransferEntry = uint32(0x27f576ca)
)
// Hardcoded Multihash constants.
const (
RIPEMD128 = 0x1052
RIPEMD160 = 0x1053
RIPEMD256 = 0x1054
RIPEMD320 = 0x1055
)
// TransactionReceiptToString creates a string from a receipt
func TransactionReceiptToString(receipt *protocol.TransactionReceipt, operations int) string {
s := fmt.Sprintf("Transaction with ID 0x%s containing %d operations", hex.EncodeToString(receipt.Id), operations)
if receipt.Reverted {
s += " reverted."
} else {
s += " submitted."
}
// Build the mana result
manaDec, err := util.SatoshiToDecimal(receipt.RcUsed, KoinPrecision)
if err != nil {
s += "\n" + err.Error()
return s
}
s += fmt.Sprintf("\nMana cost: %v (Disk: %d, Network: %d, Compute: %d)", manaDec, receipt.DiskStorageUsed, receipt.NetworkBandwidthUsed, receipt.ComputeBandwidthUsed)
// Show logs if available
if receipt.Logs != nil && len(receipt.Logs) > 0 {
s += "\nLogs:"
for _, log := range receipt.Logs {
s += "\n" + log
}
}
return s
}
func walletConfig(password []byte) sio.Config {
return sio.Config{
MinVersion: sio.Version20,
MaxVersion: sio.Version20,
CipherSuites: []byte{sio.AES_256_GCM, sio.CHACHA20_POLY1305},
Key: password,
SequenceNumber: uint32(0),
}
}
// CreateWalletFile creates a new wallet file on disk
func CreateWalletFile(file *os.File, passphrase string, privateKey []byte) error {
hasher := sha256.New()
bytesWritten, err := hasher.Write([]byte(passphrase))
if err != nil {
return err
}
if bytesWritten <= 0 {
return ErrEmptyPassphrase
}
passwordHash := hasher.Sum(nil)
if len(passwordHash) != 32 {
return ErrUnexpectedHashLength
}
source := bytes.NewReader(privateKey)
_, err = sio.Encrypt(file, source, walletConfig(passwordHash))
return err
}
// ReadWalletFile extracts the private key from the provided wallet file
func ReadWalletFile(file *os.File, passphrase string) ([]byte, error) {
hasher := sha256.New()
bytesWritten, err := hasher.Write([]byte(passphrase))
if err != nil {
return nil, err
}
if bytesWritten <= 0 {
return nil, ErrEmptyPassphrase
}
passwordHash := hasher.Sum(nil)
if len(passwordHash) != 32 {
return nil, ErrUnexpectedHashLength
}
var destination bytes.Buffer
_, err = sio.Decrypt(&destination, file, walletConfig(passwordHash))
return destination.Bytes(), err
}
// GetPassword takes the password input from a command, and returns the string password which should be used
func GetPassword(password *string) (string, error) {
// Get the password
result := ""
if password == nil { // If no password is provided, check the environment variable
result = os.Getenv("WALLET_PASS")
// Advise about the environment variable
if result == "" {
return result, fmt.Errorf("%w: no password was provided and env variable WALLET_PASS is empty", ErrBlankPassword)
}
} else {
result = *password
}
// If the result is blank, return an error
if result == "" {
return result, fmt.Errorf("%w: password cannot be empty", ErrBlankPassword)
}
return result, nil
}
|
package git
/*
#include <git2.h>
extern void _go_git_populate_apply_callbacks(git_apply_options *options);
extern int _go_git_diff_foreach(git_diff *diff, int eachFile, int eachHunk, int eachLine, void *payload);
extern void _go_git_setup_diff_notify_callbacks(git_diff_options* opts);
extern int _go_git_diff_blobs(git_blob *old, const char *old_path, git_blob *new, const char *new_path, git_diff_options *opts, int eachFile, int eachHunk, int eachLine, void *payload);
*/
import "C"
import (
"errors"
"runtime"
"unsafe"
)
type DiffFlag uint32
const (
DiffFlagBinary DiffFlag = C.GIT_DIFF_FLAG_BINARY
DiffFlagNotBinary DiffFlag = C.GIT_DIFF_FLAG_NOT_BINARY
DiffFlagValidOid DiffFlag = C.GIT_DIFF_FLAG_VALID_ID
DiffFlagExists DiffFlag = C.GIT_DIFF_FLAG_EXISTS
)
type Delta int
const (
DeltaUnmodified Delta = C.GIT_DELTA_UNMODIFIED
DeltaAdded Delta = C.GIT_DELTA_ADDED
DeltaDeleted Delta = C.GIT_DELTA_DELETED
DeltaModified Delta = C.GIT_DELTA_MODIFIED
DeltaRenamed Delta = C.GIT_DELTA_RENAMED
DeltaCopied Delta = C.GIT_DELTA_COPIED
DeltaIgnored Delta = C.GIT_DELTA_IGNORED
DeltaUntracked Delta = C.GIT_DELTA_UNTRACKED
DeltaTypeChange Delta = C.GIT_DELTA_TYPECHANGE
DeltaUnreadable Delta = C.GIT_DELTA_UNREADABLE
DeltaConflicted Delta = C.GIT_DELTA_CONFLICTED
)
//go:generate stringer -type Delta -trimprefix Delta -tags static
type DiffLineType int
const (
DiffLineContext DiffLineType = C.GIT_DIFF_LINE_CONTEXT
DiffLineAddition DiffLineType = C.GIT_DIFF_LINE_ADDITION
DiffLineDeletion DiffLineType = C.GIT_DIFF_LINE_DELETION
DiffLineContextEOFNL DiffLineType = C.GIT_DIFF_LINE_CONTEXT_EOFNL
DiffLineAddEOFNL DiffLineType = C.GIT_DIFF_LINE_ADD_EOFNL
DiffLineDelEOFNL DiffLineType = C.GIT_DIFF_LINE_DEL_EOFNL
DiffLineFileHdr DiffLineType = C.GIT_DIFF_LINE_FILE_HDR
DiffLineHunkHdr DiffLineType = C.GIT_DIFF_LINE_HUNK_HDR
DiffLineBinary DiffLineType = C.GIT_DIFF_LINE_BINARY
)
//go:generate stringer -type DiffLineType -trimprefix DiffLine -tags static
type DiffFile struct {
Path string
Oid *Oid
Size int
Flags DiffFlag
Mode uint16
}
func diffFileFromC(file *C.git_diff_file) DiffFile {
return DiffFile{
Path: C.GoString(file.path),
Oid: newOidFromC(&file.id),
Size: int(file.size),
Flags: DiffFlag(file.flags),
Mode: uint16(file.mode),
}
}
type DiffDelta struct {
Status Delta
Flags DiffFlag
Similarity uint16
OldFile DiffFile
NewFile DiffFile
}
func diffDeltaFromC(delta *C.git_diff_delta) DiffDelta {
return DiffDelta{
Status: Delta(delta.status),
Flags: DiffFlag(delta.flags),
Similarity: uint16(delta.similarity),
OldFile: diffFileFromC(&delta.old_file),
NewFile: diffFileFromC(&delta.new_file),
}
}
type DiffHunk struct {
OldStart int
OldLines int
NewStart int
NewLines int
Header string
}
func diffHunkFromC(hunk *C.git_diff_hunk) DiffHunk {
return DiffHunk{
OldStart: int(hunk.old_start),
OldLines: int(hunk.old_lines),
NewStart: int(hunk.new_start),
NewLines: int(hunk.new_lines),
Header: C.GoStringN(&hunk.header[0], C.int(hunk.header_len)),
}
}
type DiffLine struct {
Origin DiffLineType
OldLineno int
NewLineno int
NumLines int
Content string
}
func diffLineFromC(line *C.git_diff_line) DiffLine {
return DiffLine{
Origin: DiffLineType(line.origin),
OldLineno: int(line.old_lineno),
NewLineno: int(line.new_lineno),
NumLines: int(line.num_lines),
Content: C.GoStringN(line.content, C.int(line.content_len)),
}
}
type Diff struct {
doNotCompare
ptr *C.git_diff
repo *Repository
runFinalizer bool
}
func (diff *Diff) NumDeltas() (int, error) {
if diff.ptr == nil {
return -1, ErrInvalid
}
ret := int(C.git_diff_num_deltas(diff.ptr))
runtime.KeepAlive(diff)
return ret, nil
}
func (diff *Diff) Delta(index int) (DiffDelta, error) {
if diff.ptr == nil {
return DiffDelta{}, ErrInvalid
}
ptr := C.git_diff_get_delta(diff.ptr, C.size_t(index))
ret := diffDeltaFromC(ptr)
runtime.KeepAlive(diff)
return ret, nil
}
// deprecated: You should use `Diff.Delta()` instead.
func (diff *Diff) GetDelta(index int) (DiffDelta, error) {
return diff.Delta(index)
}
func newDiffFromC(ptr *C.git_diff, repo *Repository) *Diff {
if ptr == nil {
return nil
}
diff := &Diff{
ptr: ptr,
repo: repo,
runFinalizer: true,
}
runtime.SetFinalizer(diff, (*Diff).Free)
return diff
}
func (diff *Diff) Free() error {
if diff.ptr == nil {
return ErrInvalid
}
if !diff.runFinalizer {
// This is the case with the Diff objects that are involved in the DiffNotifyCallback.
diff.ptr = nil
return nil
}
runtime.SetFinalizer(diff, nil)
C.git_diff_free(diff.ptr)
diff.ptr = nil
return nil
}
func (diff *Diff) FindSimilar(opts *DiffFindOptions) error {
var copts *C.git_diff_find_options
if opts != nil {
copts = &C.git_diff_find_options{
version: C.GIT_DIFF_FIND_OPTIONS_VERSION,
flags: C.uint32_t(opts.Flags),
rename_threshold: C.uint16_t(opts.RenameThreshold),
copy_threshold: C.uint16_t(opts.CopyThreshold),
rename_from_rewrite_threshold: C.uint16_t(opts.RenameFromRewriteThreshold),
break_rewrite_threshold: C.uint16_t(opts.BreakRewriteThreshold),
rename_limit: C.size_t(opts.RenameLimit),
}
}
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ecode := C.git_diff_find_similar(diff.ptr, copts)
runtime.KeepAlive(diff)
if ecode < 0 {
return MakeGitError(ecode)
}
return nil
}
type DiffStats struct {
doNotCompare
ptr *C.git_diff_stats
}
func (stats *DiffStats) Free() error {
if stats.ptr == nil {
return ErrInvalid
}
runtime.SetFinalizer(stats, nil)
C.git_diff_stats_free(stats.ptr)
stats.ptr = nil
return nil
}
func (stats *DiffStats) Insertions() int {
ret := int(C.git_diff_stats_insertions(stats.ptr))
runtime.KeepAlive(stats)
return ret
}
func (stats *DiffStats) Deletions() int {
ret := int(C.git_diff_stats_deletions(stats.ptr))
runtime.KeepAlive(stats)
return ret
}
func (stats *DiffStats) FilesChanged() int {
ret := int(C.git_diff_stats_files_changed(stats.ptr))
runtime.KeepAlive(stats)
return ret
}
type DiffStatsFormat int
const (
DiffStatsNone DiffStatsFormat = C.GIT_DIFF_STATS_NONE
DiffStatsFull DiffStatsFormat = C.GIT_DIFF_STATS_FULL
DiffStatsShort DiffStatsFormat = C.GIT_DIFF_STATS_SHORT
DiffStatsNumber DiffStatsFormat = C.GIT_DIFF_STATS_NUMBER
DiffStatsIncludeSummary DiffStatsFormat = C.GIT_DIFF_STATS_INCLUDE_SUMMARY
)
func (stats *DiffStats) String(format DiffStatsFormat,
width uint) (string, error) {
buf := C.git_buf{}
defer C.git_buf_dispose(&buf)
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_diff_stats_to_buf(&buf,
stats.ptr, C.git_diff_stats_format_t(format), C.size_t(width))
runtime.KeepAlive(stats)
if ret < 0 {
return "", MakeGitError(ret)
}
return C.GoString(buf.ptr), nil
}
func (diff *Diff) Stats() (*DiffStats, error) {
stats := new(DiffStats)
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ecode := C.git_diff_get_stats(&stats.ptr, diff.ptr)
runtime.KeepAlive(diff)
if ecode < 0 {
return nil, MakeGitError(ecode)
}
runtime.SetFinalizer(stats, (*DiffStats).Free)
return stats, nil
}
type diffForEachCallbackData struct {
fileCallback DiffForEachFileCallback
hunkCallback DiffForEachHunkCallback
lineCallback DiffForEachLineCallback
errorTarget *error
}
type DiffForEachFileCallback func(delta DiffDelta, progress float64) (DiffForEachHunkCallback, error)
type DiffDetail int
const (
DiffDetailFiles DiffDetail = iota
DiffDetailHunks
DiffDetailLines
)
func (diff *Diff) ForEach(cbFile DiffForEachFileCallback, detail DiffDetail) error {
if diff.ptr == nil {
return ErrInvalid
}
intHunks := C.int(0)
if detail >= DiffDetailHunks {
intHunks = C.int(1)
}
intLines := C.int(0)
if detail >= DiffDetailLines {
intLines = C.int(1)
}
var err error
data := &diffForEachCallbackData{
fileCallback: cbFile,
errorTarget: &err,
}
handle := pointerHandles.Track(data)
defer pointerHandles.Untrack(handle)
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C._go_git_diff_foreach(diff.ptr, 1, intHunks, intLines, handle)
runtime.KeepAlive(diff)
if ret == C.int(ErrorCodeUser) && err != nil {
return err
}
if ret < 0 {
return MakeGitError(ret)
}
return nil
}
//export diffForEachFileCallback
func diffForEachFileCallback(delta *C.git_diff_delta, progress C.float, handle unsafe.Pointer) C.int {
payload := pointerHandles.Get(handle)
data, ok := payload.(*diffForEachCallbackData)
if !ok {
panic("could not retrieve data for handle")
}
data.hunkCallback = nil
if data.fileCallback != nil {
cb, err := data.fileCallback(diffDeltaFromC(delta), float64(progress))
if err != nil {
*data.errorTarget = err
return C.int(ErrorCodeUser)
}
data.hunkCallback = cb
}
return C.int(ErrorCodeOK)
}
type DiffForEachHunkCallback func(DiffHunk) (DiffForEachLineCallback, error)
//export diffForEachHunkCallback
func diffForEachHunkCallback(delta *C.git_diff_delta, hunk *C.git_diff_hunk, handle unsafe.Pointer) C.int {
payload := pointerHandles.Get(handle)
data, ok := payload.(*diffForEachCallbackData)
if !ok {
panic("could not retrieve data for handle")
}
data.lineCallback = nil
if data.hunkCallback != nil {
cb, err := data.hunkCallback(diffHunkFromC(hunk))
if err != nil {
*data.errorTarget = err
return C.int(ErrorCodeUser)
}
data.lineCallback = cb
}
return C.int(ErrorCodeOK)
}
type DiffForEachLineCallback func(DiffLine) error
//export diffForEachLineCallback
func diffForEachLineCallback(delta *C.git_diff_delta, hunk *C.git_diff_hunk, line *C.git_diff_line, handle unsafe.Pointer) C.int {
payload := pointerHandles.Get(handle)
data, ok := payload.(*diffForEachCallbackData)
if !ok {
panic("could not retrieve data for handle")
}
err := data.lineCallback(diffLineFromC(line))
if err != nil {
*data.errorTarget = err
return C.int(ErrorCodeUser)
}
return C.int(ErrorCodeOK)
}
func (diff *Diff) Patch(deltaIndex int) (*Patch, error) {
if diff.ptr == nil {
return nil, ErrInvalid
}
var patchPtr *C.git_patch
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ecode := C.git_patch_from_diff(&patchPtr, diff.ptr, C.size_t(deltaIndex))
runtime.KeepAlive(diff)
if ecode < 0 {
return nil, MakeGitError(ecode)
}
return newPatchFromC(patchPtr), nil
}
type DiffFormat int
const (
DiffFormatPatch DiffFormat = C.GIT_DIFF_FORMAT_PATCH
DiffFormatPatchHeader DiffFormat = C.GIT_DIFF_FORMAT_PATCH_HEADER
DiffFormatRaw DiffFormat = C.GIT_DIFF_FORMAT_RAW
DiffFormatNameOnly DiffFormat = C.GIT_DIFF_FORMAT_NAME_ONLY
DiffFormatNameStatus DiffFormat = C.GIT_DIFF_FORMAT_NAME_STATUS
)
func (diff *Diff) ToBuf(format DiffFormat) ([]byte, error) {
if diff.ptr == nil {
return nil, ErrInvalid
}
diffBuf := C.git_buf{}
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ecode := C.git_diff_to_buf(&diffBuf, diff.ptr, C.git_diff_format_t(format))
runtime.KeepAlive(diff)
if ecode < 0 {
return nil, MakeGitError(ecode)
}
defer C.git_buf_dispose(&diffBuf)
return C.GoBytes(unsafe.Pointer(diffBuf.ptr), C.int(diffBuf.size)), nil
}
type DiffOptionsFlag int
const (
DiffNormal DiffOptionsFlag = C.GIT_DIFF_NORMAL
DiffReverse DiffOptionsFlag = C.GIT_DIFF_REVERSE
DiffIncludeIgnored DiffOptionsFlag = C.GIT_DIFF_INCLUDE_IGNORED
DiffRecurseIgnoredDirs DiffOptionsFlag = C.GIT_DIFF_RECURSE_IGNORED_DIRS
DiffIncludeUntracked DiffOptionsFlag = C.GIT_DIFF_INCLUDE_UNTRACKED
DiffRecurseUntracked DiffOptionsFlag = C.GIT_DIFF_RECURSE_UNTRACKED_DIRS
DiffIncludeUnmodified DiffOptionsFlag = C.GIT_DIFF_INCLUDE_UNMODIFIED
DiffIncludeTypeChange DiffOptionsFlag = C.GIT_DIFF_INCLUDE_TYPECHANGE
DiffIncludeTypeChangeTrees DiffOptionsFlag = C.GIT_DIFF_INCLUDE_TYPECHANGE_TREES
DiffIgnoreFilemode DiffOptionsFlag = C.GIT_DIFF_IGNORE_FILEMODE
DiffIgnoreSubmodules DiffOptionsFlag = C.GIT_DIFF_IGNORE_SUBMODULES
DiffIgnoreCase DiffOptionsFlag = C.GIT_DIFF_IGNORE_CASE
DiffIncludeCaseChange DiffOptionsFlag = C.GIT_DIFF_INCLUDE_CASECHANGE
DiffDisablePathspecMatch DiffOptionsFlag = C.GIT_DIFF_DISABLE_PATHSPEC_MATCH
DiffSkipBinaryCheck DiffOptionsFlag = C.GIT_DIFF_SKIP_BINARY_CHECK
DiffEnableFastUntrackedDirs DiffOptionsFlag = C.GIT_DIFF_ENABLE_FAST_UNTRACKED_DIRS
DiffForceText DiffOptionsFlag = C.GIT_DIFF_FORCE_TEXT
DiffForceBinary DiffOptionsFlag = C.GIT_DIFF_FORCE_BINARY
DiffIgnoreWhitespace DiffOptionsFlag = C.GIT_DIFF_IGNORE_WHITESPACE
DiffIgnoreWhitespaceChange DiffOptionsFlag = C.GIT_DIFF_IGNORE_WHITESPACE_CHANGE
DiffIgnoreWhitespaceEOL DiffOptionsFlag = C.GIT_DIFF_IGNORE_WHITESPACE_EOL
DiffShowUntrackedContent DiffOptionsFlag = C.GIT_DIFF_SHOW_UNTRACKED_CONTENT
DiffShowUnmodified DiffOptionsFlag = C.GIT_DIFF_SHOW_UNMODIFIED
DiffPatience DiffOptionsFlag = C.GIT_DIFF_PATIENCE
DiffMinimal DiffOptionsFlag = C.GIT_DIFF_MINIMAL
DiffShowBinary DiffOptionsFlag = C.GIT_DIFF_SHOW_BINARY
DiffIndentHeuristic DiffOptionsFlag = C.GIT_DIFF_INDENT_HEURISTIC
)
type DiffNotifyCallback func(diffSoFar *Diff, deltaToAdd DiffDelta, matchedPathspec string) error
type DiffOptions struct {
Flags DiffOptionsFlag
IgnoreSubmodules SubmoduleIgnore
Pathspec []string
NotifyCallback DiffNotifyCallback
ContextLines uint32
InterhunkLines uint32
IdAbbrev uint16
MaxSize int
OldPrefix string
NewPrefix string
}
func DefaultDiffOptions() (DiffOptions, error) {
opts := C.git_diff_options{}
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ecode := C.git_diff_options_init(&opts, C.GIT_DIFF_OPTIONS_VERSION)
if ecode < 0 {
return DiffOptions{}, MakeGitError(ecode)
}
return DiffOptions{
Flags: DiffOptionsFlag(opts.flags),
IgnoreSubmodules: SubmoduleIgnore(opts.ignore_submodules),
Pathspec: makeStringsFromCStrings(opts.pathspec.strings, int(opts.pathspec.count)),
ContextLines: uint32(opts.context_lines),
InterhunkLines: uint32(opts.interhunk_lines),
IdAbbrev: uint16(opts.id_abbrev),
MaxSize: int(opts.max_size),
OldPrefix: "a",
NewPrefix: "b",
}, nil
}
type DiffFindOptionsFlag int
const (
DiffFindByConfig DiffFindOptionsFlag = C.GIT_DIFF_FIND_BY_CONFIG
DiffFindRenames DiffFindOptionsFlag = C.GIT_DIFF_FIND_RENAMES
DiffFindRenamesFromRewrites DiffFindOptionsFlag = C.GIT_DIFF_FIND_RENAMES_FROM_REWRITES
DiffFindCopies DiffFindOptionsFlag = C.GIT_DIFF_FIND_COPIES
DiffFindCopiesFromUnmodified DiffFindOptionsFlag = C.GIT_DIFF_FIND_COPIES_FROM_UNMODIFIED
DiffFindRewrites DiffFindOptionsFlag = C.GIT_DIFF_FIND_REWRITES
DiffFindBreakRewrites DiffFindOptionsFlag = C.GIT_DIFF_BREAK_REWRITES
DiffFindAndBreakRewrites DiffFindOptionsFlag = C.GIT_DIFF_FIND_AND_BREAK_REWRITES
DiffFindForUntracked DiffFindOptionsFlag = C.GIT_DIFF_FIND_FOR_UNTRACKED
DiffFindAll DiffFindOptionsFlag = C.GIT_DIFF_FIND_ALL
DiffFindIgnoreLeadingWhitespace DiffFindOptionsFlag = C.GIT_DIFF_FIND_IGNORE_LEADING_WHITESPACE
DiffFindIgnoreWhitespace DiffFindOptionsFlag = C.GIT_DIFF_FIND_IGNORE_WHITESPACE
DiffFindDontIgnoreWhitespace DiffFindOptionsFlag = C.GIT_DIFF_FIND_DONT_IGNORE_WHITESPACE
DiffFindExactMatchOnly DiffFindOptionsFlag = C.GIT_DIFF_FIND_EXACT_MATCH_ONLY
DiffFindBreakRewritesForRenamesOnly DiffFindOptionsFlag = C.GIT_DIFF_BREAK_REWRITES_FOR_RENAMES_ONLY
DiffFindRemoveUnmodified DiffFindOptionsFlag = C.GIT_DIFF_FIND_REMOVE_UNMODIFIED
)
// TODO implement git_diff_similarity_metric
type DiffFindOptions struct {
Flags DiffFindOptionsFlag
RenameThreshold uint16
CopyThreshold uint16
RenameFromRewriteThreshold uint16
BreakRewriteThreshold uint16
RenameLimit uint
}
func DefaultDiffFindOptions() (DiffFindOptions, error) {
opts := C.git_diff_find_options{}
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ecode := C.git_diff_find_options_init(&opts, C.GIT_DIFF_FIND_OPTIONS_VERSION)
if ecode < 0 {
return DiffFindOptions{}, MakeGitError(ecode)
}
return DiffFindOptions{
Flags: DiffFindOptionsFlag(opts.flags),
RenameThreshold: uint16(opts.rename_threshold),
CopyThreshold: uint16(opts.copy_threshold),
RenameFromRewriteThreshold: uint16(opts.rename_from_rewrite_threshold),
BreakRewriteThreshold: uint16(opts.break_rewrite_threshold),
RenameLimit: uint(opts.rename_limit),
}, nil
}
var (
ErrDeltaSkip = errors.New("Skip delta")
)
type diffNotifyCallbackData struct {
callback DiffNotifyCallback
repository *Repository
errorTarget *error
}
//export diffNotifyCallback
func diffNotifyCallback(_diff_so_far unsafe.Pointer, delta_to_add *C.git_diff_delta, matched_pathspec *C.char, handle unsafe.Pointer) C.int {
diff_so_far := (*C.git_diff)(_diff_so_far)
payload := pointerHandles.Get(handle)
data, ok := payload.(*diffNotifyCallbackData)
if !ok {
panic("could not retrieve data for handle")
}
if data == nil {
return C.int(ErrorCodeOK)
}
// We are not taking ownership of this diff pointer, so no finalizer is set.
diff := &Diff{
ptr: diff_so_far,
repo: data.repository,
runFinalizer: false,
}
err := data.callback(diff, diffDeltaFromC(delta_to_add), C.GoString(matched_pathspec))
// Since the callback could theoretically keep a reference to the diff
// (which could be freed by libgit2 if an error occurs later during the
// diffing process), this converts a use-after-free (terrible!) into a nil
// dereference ("just" pretty bad).
diff.ptr = nil
if err == ErrDeltaSkip {
return 1
}
if err != nil {
*data.errorTarget = err
return C.int(ErrorCodeUser)
}
return C.int(ErrorCodeOK)
}
func populateDiffOptions(copts *C.git_diff_options, opts *DiffOptions, repo *Repository, errorTarget *error) *C.git_diff_options {
C.git_diff_options_init(copts, C.GIT_DIFF_OPTIONS_VERSION)
if opts == nil {
return nil
}
copts.flags = C.uint32_t(opts.Flags)
copts.ignore_submodules = C.git_submodule_ignore_t(opts.IgnoreSubmodules)
if len(opts.Pathspec) > 0 {
copts.pathspec.count = C.size_t(len(opts.Pathspec))
copts.pathspec.strings = makeCStringsFromStrings(opts.Pathspec)
}
copts.context_lines = C.uint32_t(opts.ContextLines)
copts.interhunk_lines = C.uint32_t(opts.InterhunkLines)
copts.id_abbrev = C.uint16_t(opts.IdAbbrev)
copts.max_size = C.git_off_t(opts.MaxSize)
copts.old_prefix = C.CString(opts.OldPrefix)
copts.new_prefix = C.CString(opts.NewPrefix)
if opts.NotifyCallback != nil {
notifyData := &diffNotifyCallbackData{
callback: opts.NotifyCallback,
repository: repo,
errorTarget: errorTarget,
}
C._go_git_setup_diff_notify_callbacks(copts)
copts.payload = pointerHandles.Track(notifyData)
}
return copts
}
func freeDiffOptions(copts *C.git_diff_options) {
if copts == nil {
return
}
freeStrarray(&copts.pathspec)
C.free(unsafe.Pointer(copts.old_prefix))
C.free(unsafe.Pointer(copts.new_prefix))
if copts.payload != nil {
pointerHandles.Untrack(copts.payload)
}
}
func (v *Repository) DiffTreeToTree(oldTree, newTree *Tree, opts *DiffOptions) (*Diff, error) {
var diffPtr *C.git_diff
var oldPtr, newPtr *C.git_tree
if oldTree != nil {
oldPtr = oldTree.cast_ptr
}
if newTree != nil {
newPtr = newTree.cast_ptr
}
var err error
copts := populateDiffOptions(&C.git_diff_options{}, opts, v, &err)
defer freeDiffOptions(copts)
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_diff_tree_to_tree(&diffPtr, v.ptr, oldPtr, newPtr, copts)
runtime.KeepAlive(oldTree)
runtime.KeepAlive(newTree)
if ret == C.int(ErrorCodeUser) && err != nil {
return nil, err
}
if ret < 0 {
return nil, MakeGitError(ret)
}
return newDiffFromC(diffPtr, v), nil
}
func (v *Repository) DiffTreeToWorkdir(oldTree *Tree, opts *DiffOptions) (*Diff, error) {
var diffPtr *C.git_diff
var oldPtr *C.git_tree
if oldTree != nil {
oldPtr = oldTree.cast_ptr
}
var err error
copts := populateDiffOptions(&C.git_diff_options{}, opts, v, &err)
defer freeDiffOptions(copts)
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_diff_tree_to_workdir(&diffPtr, v.ptr, oldPtr, copts)
runtime.KeepAlive(oldTree)
if ret == C.int(ErrorCodeUser) && err != nil {
return nil, err
}
if ret < 0 {
return nil, MakeGitError(ret)
}
return newDiffFromC(diffPtr, v), nil
}
func (v *Repository) DiffTreeToIndex(oldTree *Tree, index *Index, opts *DiffOptions) (*Diff, error) {
var diffPtr *C.git_diff
var oldPtr *C.git_tree
var indexPtr *C.git_index
if oldTree != nil {
oldPtr = oldTree.cast_ptr
}
if index != nil {
indexPtr = index.ptr
}
var err error
copts := populateDiffOptions(&C.git_diff_options{}, opts, v, &err)
defer freeDiffOptions(copts)
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_diff_tree_to_index(&diffPtr, v.ptr, oldPtr, indexPtr, copts)
runtime.KeepAlive(oldTree)
runtime.KeepAlive(index)
if ret == C.int(ErrorCodeUser) && err != nil {
return nil, err
}
if ret < 0 {
return nil, MakeGitError(ret)
}
return newDiffFromC(diffPtr, v), nil
}
func (v *Repository) DiffTreeToWorkdirWithIndex(oldTree *Tree, opts *DiffOptions) (*Diff, error) {
var diffPtr *C.git_diff
var oldPtr *C.git_tree
if oldTree != nil {
oldPtr = oldTree.cast_ptr
}
var err error
copts := populateDiffOptions(&C.git_diff_options{}, opts, v, &err)
defer freeDiffOptions(copts)
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_diff_tree_to_workdir_with_index(&diffPtr, v.ptr, oldPtr, copts)
runtime.KeepAlive(oldTree)
if ret == C.int(ErrorCodeUser) && err != nil {
return nil, err
}
if ret < 0 {
return nil, MakeGitError(ret)
}
return newDiffFromC(diffPtr, v), nil
}
func (v *Repository) DiffIndexToWorkdir(index *Index, opts *DiffOptions) (*Diff, error) {
var diffPtr *C.git_diff
var indexPtr *C.git_index
if index != nil {
indexPtr = index.ptr
}
var err error
copts := populateDiffOptions(&C.git_diff_options{}, opts, v, &err)
defer freeDiffOptions(copts)
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_diff_index_to_workdir(&diffPtr, v.ptr, indexPtr, copts)
runtime.KeepAlive(index)
if ret == C.int(ErrorCodeUser) && err != nil {
return nil, err
}
if ret < 0 {
return nil, MakeGitError(ret)
}
return newDiffFromC(diffPtr, v), nil
}
// DiffBlobs performs a diff between two arbitrary blobs. You can pass
// whatever file names you'd like for them to appear as in the diff.
func DiffBlobs(oldBlob *Blob, oldAsPath string, newBlob *Blob, newAsPath string, opts *DiffOptions, fileCallback DiffForEachFileCallback, detail DiffDetail) error {
var err error
data := &diffForEachCallbackData{
fileCallback: fileCallback,
errorTarget: &err,
}
intHunks := C.int(0)
if detail >= DiffDetailHunks {
intHunks = C.int(1)
}
intLines := C.int(0)
if detail >= DiffDetailLines {
intLines = C.int(1)
}
handle := pointerHandles.Track(data)
defer pointerHandles.Untrack(handle)
var repo *Repository
var oldBlobPtr, newBlobPtr *C.git_blob
if oldBlob != nil {
oldBlobPtr = oldBlob.cast_ptr
repo = oldBlob.repo
}
if newBlob != nil {
newBlobPtr = newBlob.cast_ptr
repo = newBlob.repo
}
oldBlobPath := C.CString(oldAsPath)
defer C.free(unsafe.Pointer(oldBlobPath))
newBlobPath := C.CString(newAsPath)
defer C.free(unsafe.Pointer(newBlobPath))
copts := populateDiffOptions(&C.git_diff_options{}, opts, repo, &err)
defer freeDiffOptions(copts)
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C._go_git_diff_blobs(oldBlobPtr, oldBlobPath, newBlobPtr, newBlobPath, copts, 1, intHunks, intLines, handle)
runtime.KeepAlive(oldBlob)
runtime.KeepAlive(newBlob)
if ret == C.int(ErrorCodeUser) && err != nil {
return err
}
if ret < 0 {
return MakeGitError(ret)
}
return nil
}
// ApplyHunkCallback is a callback that will be made per delta (file) when applying a patch.
type ApplyHunkCallback func(*DiffHunk) (apply bool, err error)
// ApplyDeltaCallback is a callback that will be made per hunk when applying a patch.
type ApplyDeltaCallback func(*DiffDelta) (apply bool, err error)
// ApplyOptions has 2 callbacks that are called for hunks or deltas
// If these functions return an error, abort the apply process immediately.
// If the first return value is true, the delta/hunk will be applied. If it is false, the delta/hunk will not be applied. In either case, the rest of the apply process will continue.
type ApplyOptions struct {
ApplyHunkCallback ApplyHunkCallback
ApplyDeltaCallback ApplyDeltaCallback
Flags uint
}
type applyCallbackData struct {
options *ApplyOptions
errorTarget *error
}
//export hunkApplyCallback
func hunkApplyCallback(_hunk *C.git_diff_hunk, _payload unsafe.Pointer) C.int {
data, ok := pointerHandles.Get(_payload).(*applyCallbackData)
if !ok {
panic("invalid apply options payload")
}
if data.options.ApplyHunkCallback == nil {
return C.int(ErrorCodeOK)
}
hunk := diffHunkFromC(_hunk)
apply, err := data.options.ApplyHunkCallback(&hunk)
if err != nil {
*data.errorTarget = err
return C.int(ErrorCodeUser)
}
if !apply {
return 1
}
return C.int(ErrorCodeOK)
}
//export deltaApplyCallback
func deltaApplyCallback(_delta *C.git_diff_delta, _payload unsafe.Pointer) C.int {
data, ok := pointerHandles.Get(_payload).(*applyCallbackData)
if !ok {
panic("invalid apply options payload")
}
if data.options.ApplyDeltaCallback == nil {
return C.int(ErrorCodeOK)
}
delta := diffDeltaFromC(_delta)
apply, err := data.options.ApplyDeltaCallback(&delta)
if err != nil {
*data.errorTarget = err
return C.int(ErrorCodeUser)
}
if !apply {
return 1
}
return C.int(ErrorCodeOK)
}
// DefaultApplyOptions returns default options for applying diffs or patches.
func DefaultApplyOptions() (*ApplyOptions, error) {
opts := C.git_apply_options{}
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ecode := C.git_apply_options_init(&opts, C.GIT_APPLY_OPTIONS_VERSION)
if int(ecode) != 0 {
return nil, MakeGitError(ecode)
}
return applyOptionsFromC(&opts), nil
}
func populateApplyOptions(copts *C.git_apply_options, opts *ApplyOptions, errorTarget *error) *C.git_apply_options {
C.git_apply_options_init(copts, C.GIT_APPLY_OPTIONS_VERSION)
if opts == nil {
return nil
}
copts.flags = C.uint(opts.Flags)
if opts.ApplyDeltaCallback != nil || opts.ApplyHunkCallback != nil {
data := &applyCallbackData{
options: opts,
errorTarget: errorTarget,
}
C._go_git_populate_apply_callbacks(copts)
copts.payload = pointerHandles.Track(data)
}
return copts
}
func freeApplyOptions(copts *C.git_apply_options) {
if copts == nil {
return
}
if copts.payload != nil {
pointerHandles.Untrack(copts.payload)
}
}
func applyOptionsFromC(copts *C.git_apply_options) *ApplyOptions {
return &ApplyOptions{
Flags: uint(copts.flags),
}
}
// ApplyLocation represents the possible application locations for applying
// diffs.
type ApplyLocation int
const (
// ApplyLocationWorkdir applies the patch to the workdir, leaving the
// index untouched. This is the equivalent of `git apply` with no location
// argument.
ApplyLocationWorkdir ApplyLocation = C.GIT_APPLY_LOCATION_WORKDIR
// ApplyLocationIndex applies the patch to the index, leaving the working
// directory untouched. This is the equivalent of `git apply --cached`.
ApplyLocationIndex ApplyLocation = C.GIT_APPLY_LOCATION_INDEX
// ApplyLocationBoth applies the patch to both the working directory and
// the index. This is the equivalent of `git apply --index`.
ApplyLocationBoth ApplyLocation = C.GIT_APPLY_LOCATION_BOTH
)
// ApplyDiff appllies a Diff to the given repository, making changes directly
// in the working directory, the index, or both.
func (v *Repository) ApplyDiff(diff *Diff, location ApplyLocation, opts *ApplyOptions) error {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
var err error
cOpts := populateApplyOptions(&C.git_apply_options{}, opts, &err)
defer freeApplyOptions(cOpts)
ret := C.git_apply(v.ptr, diff.ptr, C.git_apply_location_t(location), cOpts)
runtime.KeepAlive(v)
runtime.KeepAlive(diff)
runtime.KeepAlive(cOpts)
if ret == C.int(ErrorCodeUser) && err != nil {
return err
}
if ret < 0 {
return MakeGitError(ret)
}
return nil
}
// ApplyToTree applies a Diff to a Tree and returns the resulting image as an Index.
func (v *Repository) ApplyToTree(diff *Diff, tree *Tree, opts *ApplyOptions) (*Index, error) {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
var err error
cOpts := populateApplyOptions(&C.git_apply_options{}, opts, &err)
defer freeApplyOptions(cOpts)
var indexPtr *C.git_index
ret := C.git_apply_to_tree(&indexPtr, v.ptr, tree.cast_ptr, diff.ptr, cOpts)
runtime.KeepAlive(diff)
runtime.KeepAlive(tree)
runtime.KeepAlive(cOpts)
if ret == C.int(ErrorCodeUser) && err != nil {
return nil, err
}
if ret < 0 {
return nil, MakeGitError(ret)
}
return newIndexFromC(indexPtr, v), nil
}
// DiffFromBuffer reads the contents of a git patch file into a Diff object.
//
// The diff object produced is similar to the one that would be produced if you
// actually produced it computationally by comparing two trees, however there
// may be subtle differences. For example, a patch file likely contains
// abbreviated object IDs, so the object IDs in a git_diff_delta produced by
// this function will also be abbreviated.
//
// This function will only read patch files created by a git implementation, it
// will not read unified diffs produced by the diff program, nor any other
// types of patch files.
func DiffFromBuffer(buffer []byte, repo *Repository) (*Diff, error) {
var diff *C.git_diff
cBuffer := C.CBytes(buffer)
defer C.free(unsafe.Pointer(cBuffer))
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ecode := C.git_diff_from_buffer(&diff, (*C.char)(cBuffer), C.size_t(len(buffer)))
if ecode < 0 {
return nil, MakeGitError(ecode)
}
runtime.KeepAlive(diff)
return newDiffFromC(diff, repo), nil
}
|
package dcmdata
import "testing"
func TestNewDcmList(t *testing.T) {
cases := []struct {
want DcmList
}{
{DcmList{nil, nil, nil, 0}},
}
for _, c := range cases {
got := NewDcmList()
if *got != c.want {
t.Errorf("NewDcmList(), want '%v' got '%v'", c.want, got)
}
}
}
func TestNewDcmListNode(t *testing.T) {
cases := []struct {
in *DcmObject
want *DcmListNode
}{
{nil, &DcmListNode{nil, nil, nil}},
}
for _, c := range cases {
got := NewDcmListNode(c.in)
if *got != *c.want {
t.Errorf("NewDcmListNode(), want '%v' got '%v'", c.want, got)
}
}
}
func TestDcmListNodeValue(t *testing.T) {
cases := []struct {
in *DcmListNode
want *DcmObject
}{
{&DcmListNode{}, nil},
}
for _, c := range cases {
got := c.in.Value()
if got != c.want {
t.Errorf("Value(), want '%v' got '%v'", c.want, got)
}
}
}
func TestEmpty(t *testing.T) {
cases := []struct {
in DcmList
want bool
}{
{DcmList{}, true},
}
for _, c := range cases {
got := c.in.Empty()
if got != c.want {
t.Errorf("Empty(), want '%v' got '%v'", c.want, got)
}
}
}
func TestDcmListValid(t *testing.T) {
cases := []struct {
in DcmList
want bool
}{
{DcmList{}, false},
}
for _, c := range cases {
got := c.in.Valid()
if got != c.want {
t.Errorf("Valid(), want '%v' got '%v'", c.want, got)
}
}
}
func TestDcmListCard(t *testing.T) {
cases := []struct {
in DcmList
want uint32
}{
{DcmList{}, 0},
}
for _, c := range cases {
got := c.in.Card()
if got != c.want {
t.Errorf("Card(), want '%v' got '%v'", c.want, got)
}
}
}
func TestDcmListAppend(t *testing.T) {
cases := []struct {
in_0 *DcmList
in_1 *DcmObject
want_1 *DcmObject
want_2 uint32
}{
{&DcmList{}, nil, nil, 0},
}
for _, c := range cases {
got_1 := c.in_0.Append(c.in_1)
got_2 := c.in_0.Card()
if (got_1 != c.want_1) || (got_2 != c.want_2) {
t.Errorf("%v Append(%v), want '%v' | %v got '%v' | %v ", c.in_0, c.in_1, c.want_1, c.want_2, got_1, got_2)
}
}
}
func TestDcmListPrepend(t *testing.T) {
cases := []struct {
in_0 *DcmList
in_1 *DcmObject
want_1 *DcmObject
want_2 uint32
}{
{&DcmList{}, nil, nil, 0},
}
for _, c := range cases {
got_1 := c.in_0.Prepend(c.in_1)
got_2 := c.in_0.Card()
if (got_1 != c.want_1) || (got_2 != c.want_2) {
t.Errorf("%v Prepend(%v), want '%v' | %v got '%v' | %v ", c.in_0, c.in_1, c.want_1, c.want_2, got_1, got_2)
}
}
}
func TestDcmListInsert(t *testing.T) {
cases := []struct {
in_0 *DcmList
in_1 *DcmObject
in_2 E_ListPos
want_1 *DcmObject
want_2 uint32
}{
{&DcmList{}, nil, ELP_atpos, nil, 0},
}
for _, c := range cases {
got_1 := c.in_0.Insert(c.in_1, c.in_2)
got_2 := c.in_0.Card()
if (got_1 != c.want_1) || (got_2 != c.want_2) {
t.Errorf("%v Insert(%v,%v), want '%v' | %v got '%v' | %v ", c.in_0, c.in_1, c.in_2, c.want_1, c.want_2, got_1, got_2)
}
}
}
func TestDcmListRemove(t *testing.T) {
cases := []struct {
in *DcmList
want_1 *DcmObject
want_2 uint32
}{
{&DcmList{}, nil, 0},
}
for _, c := range cases {
got_1 := c.in.Remove()
got_2 := c.in.Card()
if (got_1 != c.want_1) || (got_2 != c.want_2) {
t.Errorf("%v Remove(), want '%v' | %v got '%v' | %v ", c.in, c.want_1, c.want_2, got_1, got_2)
}
}
}
func TestDcmListGet(t *testing.T) {
cases := []struct {
in_0 *DcmList
in_1 E_ListPos
want *DcmObject
}{
{&DcmList{}, ELP_atpos, nil},
}
for _, c := range cases {
got := c.in_0.Get(c.in_1)
if got != c.want {
t.Errorf("%v Get(%v), want '%v' got '%v' ", c.in_0, c.in_1, c.want, got)
}
}
}
func TestDcmListSeek(t *testing.T) {
cases := []struct {
in_0 *DcmList
in_1 E_ListPos
want *DcmObject
}{
{&DcmList{}, ELP_first, nil},
{&DcmList{}, ELP_last, nil},
{&DcmList{}, ELP_prev, nil},
{&DcmList{}, ELP_next, nil},
}
for _, c := range cases {
got := c.in_0.Seek(c.in_1)
if got != c.want {
t.Errorf("%v Seek(%v), want '%v' got '%v' ", c.in_0, c.in_1, c.want, got)
}
}
}
func TestDcmListSeekTo(t *testing.T) {
cases := []struct {
in_0 *DcmList
in_1 uint32
want *DcmObject
}{
{&DcmList{}, 0, nil},
}
for _, c := range cases {
got := c.in_0.Seek_to(c.in_1)
if got != c.want {
t.Errorf("%v Seek_to(%v), want '%v' got '%v' ", c.in_0, c.in_1, c.want, got)
}
}
}
func TestDcmListDeleteAllElements(t *testing.T) {
cases := []struct {
in *DcmList
want *DcmList
}{
{NewDcmList(), &DcmList{nil, nil, nil, 0}},
}
for _, c := range cases {
c.in.DeleteAllElements()
got := c.in
if *got != *c.want {
t.Errorf("%v DeleteAllElements(), want '%v' got '%v' ", c.in, c.want, got)
}
}
}
|
package proof
import (
// "incognito-chain/common"
"incognito-chain/privacy/coin"
errhandler "incognito-chain/privacy/errorhandler"
// "incognito-chain/privacy/key"
"incognito-chain/privacy/proof/agg_interface"
)
// Paymentproof
type Proof interface {
GetVersion() uint8
Init()
GetInputCoins() []coin.PlainCoin
GetOutputCoins() []coin.Coin
GetAggregatedRangeProof() agg_interface.AggregatedRangeProof
SetInputCoins([]coin.PlainCoin) error
SetOutputCoins([]coin.Coin) error
Bytes() []byte
SetBytes(proofBytes []byte) *errhandler.PrivacyError
MarshalJSON() ([]byte, error)
UnmarshalJSON([]byte) error
IsPrivacy() bool
// ValidateSanity(interface{}) (bool, error)
// Verify(boolParams map[string]bool, pubKey key.PublicKey, fee uint64, shardID byte, tokenID *common.Hash, additionalData interface{}) (bool, error)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.