text stringlengths 11 4.05M |
|---|
package ecc
import (
"fmt"
"math/big"
)
// var Curves = map[string] Curve{
func (C *Curve) GetCurve(name string) *Curve {
switch name {
case "secp112r1":
return C.load_curve_hex("secp112r1",
"DB7C2ABF62E35E668076BEAD208B",
"DB7C2ABF62E35E668076BEAD2088",
"659EF8BA043916EEDE8911702B22",
"09487239995A5EE76B55F9C2F098",
"A89CE5AF8724C0A23E0E0FF77500",
"DB7C2ABF62E35E7628DFAC6561C5",
"01")
case "secp112r2":
return C.load_curve_hex("secp112r2",
"DB7C2ABF62E35E668076BEAD208B",
"6127C24C05F38A0AAAF65C0EF02C",
"51DEF1815DB5ED74FCC34C85D709",
"4BA30AB5E892B4E1649DD0928643",
"ADCD46F5882E3747DEF36E956E97",
"36DF0AAFD8B8D7597CA10520D04B",
"04")
case "secp128r1":
return C.load_curve_hex("secp128r1",
"FFFFFFFDFFFFFFFFFFFFFFFFFFFFFFFF",
"FFFFFFFDFFFFFFFFFFFFFFFFFFFFFFFC",
"E87579C11079F43DD824993C2CEE5ED3",
"161FF7528B899B2D0C28607CA52C5B86",
"CF5AC8395BAFEB13C02DA292DDED7A83",
"FFFFFFFE0000000075A30D1B9038A115",
"01")
case "secp128r2":
return C.load_curve_hex("secp128r2",
"FFFFFFFDFFFFFFFFFFFFFFFFFFFFFFFF",
"D6031998D1B3BBFEBF59CC9BBFF9AEE1",
"5EEEFCA380D02919DC2C6558BB6D8A5D",
"7B6AA5D85E572983E6FB32A7CDEBC140",
"27B6916A894D3AEE7106FE805FC34B44",
"3FFFFFFF7FFFFFFFBE0024720613B5A3",
"04")
case "secp160k1":
return C.load_curve_hex("secp160k1",
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC73",
"0000000000000000000000000000000000000000",
"0000000000000000000000000000000000000007",
"3B4C382CE37AA192A4019E763036F4F5DD4D7EBB",
"938CF935318FDCED6BC28286531733C3F03C4FEE",
"0100000000000000000001B8FA16DFAB9ACA16B6B3",
"01")
case "secp160r1":
return C.load_curve_hex("secp160r1",
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF7FFFFFFF",
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF7FFFFFFC",
"1C97BEFC54BD7A8B65ACF89F81D4D4ADC565FA45",
"4A96B5688EF573284664698968C38BB913CBFC82",
"23A628553168947D59DCC912042351377AC5FB32",
"0100000000000000000001F4C8F927AED3CA752257",
"01")
case "secp160r2":
return C.load_curve_hex("secp160r2",
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC73",
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC70",
"B4E134D3FB59EB8BAB57274904664D5AF50388BA",
"52DCB034293A117E1F4FF11B30F7199D3144CE6D",
"FEAFFEF2E331F296E071FA0DF9982CFEA7D43F2E",
"0100000000000000000000351EE786A818F3A1A16B",
"01")
case "secp192k1":
return C.load_curve_hex("secp192k1",
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFEE37",
"000000000000000000000000000000000000000000000000",
"000000000000000000000000000000000000000000000003",
"DB4FF10EC057E9AE26B07D0280B7F4341DA5D1B1EAE06C7D",
"9B2F2F6D9C5628A7844163D015BE86344082AA88D95E2F9D",
"FFFFFFFFFFFFFFFFFFFFFFFE26F2FC170F69466A74DEFD8D",
"01")
case "secp192r1":
return C.load_curve_hex("secp192r1",
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFF",
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFC",
"64210519E59C80E70FA7E9AB72243049FEB8DEECC146B9B1",
"188DA80EB03090F67CBF20EB43A18800F4FF0AFD82FF1012",
"07192B95FFC8DA78631011ED6B24CDD573F977A11E794811",
"FFFFFFFFFFFFFFFFFFFFFFFF99DEF836146BC9B1B4D22831",
"01")
case "secp224k1":
return C.load_curve_hex("secp224k1",
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFE56D",
"00000000000000000000000000000000000000000000000000000000",
"00000000000000000000000000000000000000000000000000000005",
"A1455B334DF099DF30FC28A169A467E9E47075A90F7E650EB6B7A45C",
"7E089FED7FBA344282CAFBD6F7E319F7C0B0BD59E2CA4BDB556D61A5",
"010000000000000000000000000001DCE8D2EC6184CAF0A971769FB1F7",
"01")
case "secp224r1":
return C.load_curve_hex("secp224r1",
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF000000000000000000000001",
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFFFFFFFFFE",
"B4050A850C04B3ABF54132565044B0B7D7BFD8BA270B39432355FFB4",
"B70E0CBD6BB4BF7F321390B94A03C1D356C21122343280D6115C1D21",
"BD376388B5F723FB4C22DFE6CD4375A05A07476444D5819985007E34",
"FFFFFFFFFFFFFFFFFFFFFFFFFFFF16A2E0B8F03E13DD29455C5C2A3D",
"01")
case "secp256k1":
return C.load_curve_hex("secp256k1",
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F",
"0000000000000000000000000000000000000000000000000000000000000000",
"0000000000000000000000000000000000000000000000000000000000000007",
"79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798",
"483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8",
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141",
"01")
case "secp256r1":
return C.load_curve_hex("secp256r1",
"FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF",
"FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFC",
"5AC635D8AA3A93E7B3EBBD55769886BC651D06B0CC53B0F63BCE3C3E27D2604B",
"6B17D1F2E12C4247F8BCE6E563A440F277037D812DEB33A0F4A13945D898C296",
"4FE342E2FE1A7F9B8EE7EB4A7C0F9E162BCE33576B315ECECBB6406837BF51F5",
"FFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC632551",
"01")
case "secp384r1":
return C.load_curve_hex("secp384r1",
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFF0000000000000000FFFFFFFF",
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFF0000000000000000FFFFFFFC",
"B3312FA7E23EE7E4988E056BE3F82D19181D9C6EFE8141120314088F5013875AC656398D8A2ED19D2A85C8EDD3EC2AEF",
"AA87CA22BE8B05378EB1C71EF320AD746E1D3B628BA79B9859F741E082542A385502F25DBF55296C3A545E3872760AB7",
"3617DE4A96262C6F5D9E98BF9292DC29F8F41DBD289A147CE9DA3113B5F0B8C00A60B1CE1D7E819D7A431D7C90EA0E5F",
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFC7634D81F4372DDF581A0DB248B0A77AECEC196ACCC52973",
"01")
case "secp521r1":
return C.load_curve_hex("secp521r1",
"01FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
"01FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFC",
"0051953EB9618E1C9A1F929A21A0B68540EEA2DA725B99B315F3B8B489918EF109E156193951EC7E937B1652C0BD3BB1BF073573DF883D2C34F1EF451FD46B503F00",
"00C6858E06B70404E9CD9E3ECB662395B4429C648139053FB521F828AF606B4D3DBAA14B5E77EFE75928FE1DC127A2FFA8DE3348B3C1856A429BF97E7E31C2E5BD66",
"011839296A789A3BC0045C8A5FB42C7D1BD998F54449579B446817AFBD17273E662C97EE72995EF42640C550B9013FAD0761353C7086A272C24088BE94769FD16650",
"01FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFA51868783BF2F966B7FCC0148F709A5D03BB5C9B8899C47AEBB6FB71E91386409",
"01")
}
print("this hits if no name fits\n")
return C
}
/* sets Curve parameters to p,a,b,gx,gy,n,h string*/
func (C *Curve) load_curve_hex(name, p, a, b, gx, gy, n, h string) *Curve {
return C.load_curve(name, p, a, b, gx, gy, n, h, 16)
}
func (C *Curve) load_curve(name, p, a, b, gx, gy, n, h string, base int) *Curve {
C.name = name
C.p.SetString(p, base)
C.a.SetString(a, base)
C.b.SetString(b, base)
C.G.SetString(gx, gy, base)
C.n.SetString(n, base)
C.h.SetString(h, base)
return C
}
func (C *Curve) Print() {
fmt.Printf("%s\n", C.String())
}
func (C *Curve) String() string {
s := ""
s = "Curve: " + C.name + "\n"
s += "p: " + C.p.String() + "\n"
s += "a: " + C.a.String() + "\n"
s += "b: " + C.b.String() + "\n"
s += "n: " + C.n.String() + "\n"
s += "h: " + C.h.String()
s += C.G.String()
return s
}
func (C *Curve) Test() {
C.GetCurve("secp112r1")
C.Print()
}
func NewCurve() *Curve {
C := new(Curve)
C.G = NewPoint()
C.p = new(big.Int)
C.a = new(big.Int)
C.b = new(big.Int)
C.n = new(big.Int)
C.h = new(big.Int)
return C
}
// func (C *Curve)GetCurve(name string) *Curve{
// return Curves[name]
// }
type Curve struct {
name string
p *big.Int //Prime
a *big.Int //'a' parameter of the elliptic curve
b *big.Int //'b' parameter of the elliptic curve
G *Point //Generator point of the curve, also known as base point.
n *big.Int
h *big.Int
}
|
// Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package expression
import (
"fmt"
"unsafe"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/parser/terror"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/collate"
)
// NewOne stands for a number 1.
func NewOne() *Constant {
retT := types.NewFieldType(mysql.TypeTiny)
retT.AddFlag(mysql.UnsignedFlag) // shrink range to avoid integral promotion
retT.SetFlen(1)
retT.SetDecimal(0)
return &Constant{
Value: types.NewDatum(1),
RetType: retT,
}
}
// NewZero stands for a number 0.
func NewZero() *Constant {
retT := types.NewFieldType(mysql.TypeTiny)
retT.AddFlag(mysql.UnsignedFlag) // shrink range to avoid integral promotion
retT.SetFlen(1)
retT.SetDecimal(0)
return &Constant{
Value: types.NewDatum(0),
RetType: retT,
}
}
// NewUInt64Const stands for constant of a given number.
func NewUInt64Const(num int) *Constant {
retT := types.NewFieldType(mysql.TypeLonglong)
retT.AddFlag(mysql.UnsignedFlag) // shrink range to avoid integral promotion
retT.SetFlen(mysql.MaxIntWidth)
retT.SetDecimal(0)
return &Constant{
Value: types.NewDatum(num),
RetType: retT,
}
}
// NewUInt64ConstWithFieldType stands for constant of a given number with specified fieldType.
func NewUInt64ConstWithFieldType(num uint64, fieldType *types.FieldType) *Constant {
return &Constant{
Value: types.NewDatum(num),
RetType: fieldType,
}
}
// NewInt64Const stands for constant of a given number.
func NewInt64Const(num int64) *Constant {
retT := types.NewFieldType(mysql.TypeLonglong)
retT.SetFlen(mysql.MaxIntWidth)
retT.SetDecimal(0)
return &Constant{
Value: types.NewDatum(num),
RetType: retT,
}
}
// NewNull stands for null constant.
func NewNull() *Constant {
retT := types.NewFieldType(mysql.TypeTiny)
retT.SetFlen(1)
retT.SetDecimal(0)
return &Constant{
Value: types.NewDatum(nil),
RetType: retT,
}
}
// NewNullWithFieldType stands for null constant with specified fieldType.
func NewNullWithFieldType(fieldType *types.FieldType) *Constant {
return &Constant{
Value: types.NewDatum(nil),
RetType: fieldType,
}
}
// Constant stands for a constant value.
type Constant struct {
Value types.Datum
RetType *types.FieldType
// DeferredExpr holds deferred function in PlanCache cached plan.
// it's only used to represent non-deterministic functions(see expression.DeferredFunctions)
// in PlanCache cached plan, so let them can be evaluated until cached item be used.
DeferredExpr Expression
// ParamMarker holds param index inside sessionVars.PreparedParams.
// It's only used to reference a user variable provided in the `EXECUTE` statement or `COM_EXECUTE` binary protocol.
ParamMarker *ParamMarker
hashcode []byte
collationInfo
}
// ParamMarker indicates param provided by COM_STMT_EXECUTE.
type ParamMarker struct {
ctx sessionctx.Context
order int
}
// GetUserVar returns the corresponding user variable presented in the `EXECUTE` statement or `COM_EXECUTE` command.
func (d *ParamMarker) GetUserVar() types.Datum {
sessionVars := d.ctx.GetSessionVars()
return sessionVars.PlanCacheParams.GetParamValue(d.order)
}
// String implements fmt.Stringer interface.
func (c *Constant) String() string {
if c.ParamMarker != nil {
dt := c.ParamMarker.GetUserVar()
c.Value.SetValue(dt.GetValue(), c.RetType)
} else if c.DeferredExpr != nil {
return c.DeferredExpr.String()
}
return fmt.Sprintf("%v", c.Value.GetValue())
}
// MarshalJSON implements json.Marshaler interface.
func (c *Constant) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf("%q", c)), nil
}
// Clone implements Expression interface.
func (c *Constant) Clone() Expression {
con := *c
return &con
}
// GetType implements Expression interface.
func (c *Constant) GetType() *types.FieldType {
if c.ParamMarker != nil {
// GetType() may be called in multi-threaded context, e.g, in building inner executors of IndexJoin,
// so it should avoid data race. We achieve this by returning different FieldType pointer for each call.
tp := types.NewFieldType(mysql.TypeUnspecified)
dt := c.ParamMarker.GetUserVar()
types.InferParamTypeFromDatum(&dt, tp)
return tp
}
return c.RetType
}
// VecEvalInt evaluates this expression in a vectorized manner.
func (c *Constant) VecEvalInt(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error {
if c.DeferredExpr == nil {
return genVecFromConstExpr(ctx, c, types.ETInt, input, result)
}
return c.DeferredExpr.VecEvalInt(ctx, input, result)
}
// VecEvalReal evaluates this expression in a vectorized manner.
func (c *Constant) VecEvalReal(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error {
if c.DeferredExpr == nil {
return genVecFromConstExpr(ctx, c, types.ETReal, input, result)
}
return c.DeferredExpr.VecEvalReal(ctx, input, result)
}
// VecEvalString evaluates this expression in a vectorized manner.
func (c *Constant) VecEvalString(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error {
if c.DeferredExpr == nil {
return genVecFromConstExpr(ctx, c, types.ETString, input, result)
}
return c.DeferredExpr.VecEvalString(ctx, input, result)
}
// VecEvalDecimal evaluates this expression in a vectorized manner.
func (c *Constant) VecEvalDecimal(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error {
if c.DeferredExpr == nil {
return genVecFromConstExpr(ctx, c, types.ETDecimal, input, result)
}
return c.DeferredExpr.VecEvalDecimal(ctx, input, result)
}
// VecEvalTime evaluates this expression in a vectorized manner.
func (c *Constant) VecEvalTime(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error {
if c.DeferredExpr == nil {
return genVecFromConstExpr(ctx, c, types.ETTimestamp, input, result)
}
return c.DeferredExpr.VecEvalTime(ctx, input, result)
}
// VecEvalDuration evaluates this expression in a vectorized manner.
func (c *Constant) VecEvalDuration(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error {
if c.DeferredExpr == nil {
return genVecFromConstExpr(ctx, c, types.ETDuration, input, result)
}
return c.DeferredExpr.VecEvalDuration(ctx, input, result)
}
// VecEvalJSON evaluates this expression in a vectorized manner.
func (c *Constant) VecEvalJSON(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error {
if c.DeferredExpr == nil {
return genVecFromConstExpr(ctx, c, types.ETJson, input, result)
}
return c.DeferredExpr.VecEvalJSON(ctx, input, result)
}
func (c *Constant) getLazyDatum(row chunk.Row) (dt types.Datum, isLazy bool, err error) {
if c.ParamMarker != nil {
return c.ParamMarker.GetUserVar(), true, nil
} else if c.DeferredExpr != nil {
dt, err = c.DeferredExpr.Eval(row)
return dt, true, err
}
return types.Datum{}, false, nil
}
// Traverse implements the TraverseDown interface.
func (c *Constant) Traverse(action TraverseAction) Expression {
return action.Transform(c)
}
// Eval implements Expression interface.
func (c *Constant) Eval(row chunk.Row) (types.Datum, error) {
if dt, lazy, err := c.getLazyDatum(row); lazy {
if err != nil {
return c.Value, err
}
if dt.IsNull() {
c.Value.SetNull()
return c.Value, nil
}
if c.DeferredExpr != nil {
sf, sfOk := c.DeferredExpr.(*ScalarFunction)
if sfOk {
if dt.Kind() != types.KindMysqlDecimal {
val, err := dt.ConvertTo(sf.GetCtx().GetSessionVars().StmtCtx, c.RetType)
if err != nil {
return dt, err
}
return val, nil
}
if err := c.adjustDecimal(dt.GetMysqlDecimal()); err != nil {
return dt, err
}
}
}
return dt, nil
}
return c.Value, nil
}
// EvalInt returns int representation of Constant.
func (c *Constant) EvalInt(ctx sessionctx.Context, row chunk.Row) (int64, bool, error) {
dt, lazy, err := c.getLazyDatum(row)
if err != nil {
return 0, false, err
}
if !lazy {
dt = c.Value
}
if c.GetType().GetType() == mysql.TypeNull || dt.IsNull() {
return 0, true, nil
} else if dt.Kind() == types.KindBinaryLiteral {
val, err := dt.GetBinaryLiteral().ToInt(ctx.GetSessionVars().StmtCtx)
return int64(val), err != nil, err
} else if c.GetType().Hybrid() || dt.Kind() == types.KindString {
res, err := dt.ToInt64(ctx.GetSessionVars().StmtCtx)
return res, false, err
} else if dt.Kind() == types.KindMysqlBit {
uintVal, err := dt.GetBinaryLiteral().ToInt(ctx.GetSessionVars().StmtCtx)
return int64(uintVal), false, err
}
return dt.GetInt64(), false, nil
}
// EvalReal returns real representation of Constant.
func (c *Constant) EvalReal(ctx sessionctx.Context, row chunk.Row) (float64, bool, error) {
dt, lazy, err := c.getLazyDatum(row)
if err != nil {
return 0, false, err
}
if !lazy {
dt = c.Value
}
if c.GetType().GetType() == mysql.TypeNull || dt.IsNull() {
return 0, true, nil
}
if c.GetType().Hybrid() || dt.Kind() == types.KindBinaryLiteral || dt.Kind() == types.KindString {
res, err := dt.ToFloat64(ctx.GetSessionVars().StmtCtx)
return res, false, err
}
return dt.GetFloat64(), false, nil
}
// EvalString returns string representation of Constant.
func (c *Constant) EvalString(ctx sessionctx.Context, row chunk.Row) (string, bool, error) {
dt, lazy, err := c.getLazyDatum(row)
if err != nil {
return "", false, err
}
if !lazy {
dt = c.Value
}
if c.GetType().GetType() == mysql.TypeNull || dt.IsNull() {
return "", true, nil
}
res, err := dt.ToString()
return res, false, err
}
// EvalDecimal returns decimal representation of Constant.
func (c *Constant) EvalDecimal(ctx sessionctx.Context, row chunk.Row) (*types.MyDecimal, bool, error) {
dt, lazy, err := c.getLazyDatum(row)
if err != nil {
return nil, false, err
}
if !lazy {
dt = c.Value
}
if c.GetType().GetType() == mysql.TypeNull || dt.IsNull() {
return nil, true, nil
}
res, err := dt.ToDecimal(ctx.GetSessionVars().StmtCtx)
if err != nil {
return nil, false, err
}
if err := c.adjustDecimal(res); err != nil {
return nil, false, err
}
return res, false, nil
}
func (c *Constant) adjustDecimal(d *types.MyDecimal) error {
// Decimal Value's precision and frac may be modified during plan building.
_, frac := d.PrecisionAndFrac()
if frac < c.GetType().GetDecimal() {
return d.Round(d, c.GetType().GetDecimal(), types.ModeHalfUp)
}
return nil
}
// EvalTime returns DATE/DATETIME/TIMESTAMP representation of Constant.
func (c *Constant) EvalTime(ctx sessionctx.Context, row chunk.Row) (val types.Time, isNull bool, err error) {
dt, lazy, err := c.getLazyDatum(row)
if err != nil {
return types.ZeroTime, false, err
}
if !lazy {
dt = c.Value
}
if c.GetType().GetType() == mysql.TypeNull || dt.IsNull() {
return types.ZeroTime, true, nil
}
return dt.GetMysqlTime(), false, nil
}
// EvalDuration returns Duration representation of Constant.
func (c *Constant) EvalDuration(ctx sessionctx.Context, row chunk.Row) (val types.Duration, isNull bool, err error) {
dt, lazy, err := c.getLazyDatum(row)
if err != nil {
return types.Duration{}, false, err
}
if !lazy {
dt = c.Value
}
if c.GetType().GetType() == mysql.TypeNull || dt.IsNull() {
return types.Duration{}, true, nil
}
return dt.GetMysqlDuration(), false, nil
}
// EvalJSON returns JSON representation of Constant.
func (c *Constant) EvalJSON(ctx sessionctx.Context, row chunk.Row) (types.BinaryJSON, bool, error) {
dt, lazy, err := c.getLazyDatum(row)
if err != nil {
return types.BinaryJSON{}, false, err
}
if !lazy {
dt = c.Value
}
if c.GetType().GetType() == mysql.TypeNull || dt.IsNull() {
return types.BinaryJSON{}, true, nil
}
return dt.GetMysqlJSON(), false, nil
}
// Equal implements Expression interface.
func (c *Constant) Equal(ctx sessionctx.Context, b Expression) bool {
y, ok := b.(*Constant)
if !ok {
return false
}
_, err1 := y.Eval(chunk.Row{})
_, err2 := c.Eval(chunk.Row{})
if err1 != nil || err2 != nil {
return false
}
con, err := c.Value.Compare(ctx.GetSessionVars().StmtCtx, &y.Value, collate.GetBinaryCollator())
if err != nil || con != 0 {
return false
}
return true
}
// IsCorrelated implements Expression interface.
func (c *Constant) IsCorrelated() bool {
return false
}
// ConstItem implements Expression interface.
func (c *Constant) ConstItem(sc *stmtctx.StatementContext) bool {
return !sc.UseCache || (c.DeferredExpr == nil && c.ParamMarker == nil)
}
// Decorrelate implements Expression interface.
func (c *Constant) Decorrelate(_ *Schema) Expression {
return c
}
// HashCode implements Expression interface.
func (c *Constant) HashCode(sc *stmtctx.StatementContext) []byte {
if len(c.hashcode) > 0 {
return c.hashcode
}
if c.DeferredExpr != nil {
c.hashcode = c.DeferredExpr.HashCode(sc)
return c.hashcode
}
if c.ParamMarker != nil {
c.hashcode = append(c.hashcode, parameterFlag)
c.hashcode = codec.EncodeInt(c.hashcode, int64(c.ParamMarker.order))
return c.hashcode
}
_, err := c.Eval(chunk.Row{})
if err != nil {
terror.Log(err)
}
c.hashcode = append(c.hashcode, constantFlag)
c.hashcode = codec.HashCode(c.hashcode, c.Value)
return c.hashcode
}
// ResolveIndices implements Expression interface.
func (c *Constant) ResolveIndices(_ *Schema) (Expression, error) {
return c, nil
}
func (c *Constant) resolveIndices(_ *Schema) error {
return nil
}
// ResolveIndicesByVirtualExpr implements Expression interface.
func (c *Constant) ResolveIndicesByVirtualExpr(_ *Schema) (Expression, bool) {
return c, true
}
func (c *Constant) resolveIndicesByVirtualExpr(_ *Schema) bool {
return true
}
// RemapColumn remaps columns with provided mapping and returns new expression
func (c *Constant) RemapColumn(_ map[int64]*Column) (Expression, error) {
return c, nil
}
// Vectorized returns if this expression supports vectorized evaluation.
func (c *Constant) Vectorized() bool {
if c.DeferredExpr != nil {
return c.DeferredExpr.Vectorized()
}
return true
}
// SupportReverseEval checks whether the builtinFunc support reverse evaluation.
func (c *Constant) SupportReverseEval() bool {
if c.DeferredExpr != nil {
return c.DeferredExpr.SupportReverseEval()
}
return true
}
// ReverseEval evaluates the only one column value with given function result.
func (c *Constant) ReverseEval(sc *stmtctx.StatementContext, res types.Datum, rType types.RoundingType) (val types.Datum, err error) {
return c.Value, nil
}
// Coercibility returns the coercibility value which is used to check collations.
func (c *Constant) Coercibility() Coercibility {
if !c.HasCoercibility() {
c.SetCoercibility(deriveCoercibilityForConstant(c))
}
return c.collationInfo.Coercibility()
}
const emptyConstantSize = int64(unsafe.Sizeof(Constant{}))
// MemoryUsage return the memory usage of Constant
func (c *Constant) MemoryUsage() (sum int64) {
if c == nil {
return
}
sum = emptyConstantSize + c.Value.MemUsage() + int64(cap(c.hashcode))
if c.RetType != nil {
sum += c.RetType.MemoryUsage()
}
return
}
|
package humanize
import (
"testing"
. "github.com/smartystreets/goconvey/convey"
)
var st = `
package test
type STAR *int
var x *int
var y *int
`
func TestStartType(t *testing.T) {
Convey("Star test", t, func() {
var p = &Package{}
f, err := ParseFile(st, p)
So(err, ShouldBeNil)
p.Files = append(p.Files, f)
Convey("Normal define", func() {
i, err := p.FindType("STAR")
So(err, ShouldBeNil)
So(i.Name, ShouldEqual, "STAR")
So(i.Type, ShouldHaveSameTypeAs, &StarType{})
So(i.Type.(*StarType).Target.(*IdentType).Ident, ShouldEqual, "int")
So(i.Type.Package(), ShouldEqual, p)
})
Convey("Equality", func() {
So(p.Bind(), ShouldBeNil)
x, err := p.FindVariable("x")
So(err, ShouldBeNil)
So(x.Type, ShouldHaveSameTypeAs, &StarType{})
y, err := p.FindVariable("y")
So(err, ShouldBeNil)
So(x.Type.Equal(y.Type), ShouldBeTrue)
})
})
}
|
// Copyright 2020 Kuei-chun Chen. All rights reserved.
package mdb
import (
"bytes"
"context"
"errors"
"fmt"
"net/url"
"os"
"strings"
"sync"
"time"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/x/mongo/driver/connstring"
"github.com/simagix/gox"
"go.mongodb.org/mongo-driver/mongo"
)
// Sharded cluster
const Sharded = "sharded"
// Replica set
const Replica = "replica"
// Standalone server
const Standalone = "standalone"
// ClusterStats keeps slow ops struct
type ClusterStats struct {
BuildInfo BuildInfo `bson:"buildInfo"`
CmdLineOpts CmdLineOpts `bson:"getCmdLineOpts"`
Cluster string `bson:"cluster"`
Databases *[]Database `bson:"databases"`
Host string `bson:"host"`
HostInfo HostInfo `bson:"hostInfo"`
Logger *gox.Logger `bson:"keyhole"`
OplogStats OplogStats `bson:"oplog"`
Process string `bson:"process"`
ReplSetGetStatus ReplSetGetStatus `bson:"replSetGetStatus"`
ServerStatus ServerStatus `bson:"serverStatus"`
Shards []Shard `bson:"shards"`
Version string `bson:"version"`
dbNames []string
fastMode bool
redact bool
signature string
verbose bool
}
// NewClusterStats returns *ClusterStats
func NewClusterStats(signature string) *ClusterStats {
s := ClusterStats{signature: signature}
return &s
}
// SetDBNames sets redact
func (p *ClusterStats) SetDBNames(dbNames []string) {
p.dbNames = dbNames
}
// SetFastMode sets fastMode mode
func (p *ClusterStats) SetFastMode(fastMode bool) {
p.fastMode = fastMode
}
// SetRedaction sets redact
func (p *ClusterStats) SetRedaction(redact bool) {
p.redact = redact
}
// SetVerbose sets verbose mode
func (p *ClusterStats) SetVerbose(verbose bool) {
p.verbose = verbose
}
// GetClusterStats collects cluster stats
func (p *ClusterStats) GetClusterStats(client *mongo.Client, connString connstring.ConnString) error {
var err error
p.Logger = gox.GetLogger(p.signature)
p.Logger.Info("GetClusterStats() begins")
if err = p.GetClusterStatsSummary(client); err != nil {
return err
}
if p.CmdLineOpts, err = GetCmdLineOpts(client); err != nil {
p.Logger.Info(fmt.Sprintf(`GetCmdLineOpts(): %v`, err))
}
if p.Cluster == Sharded { //collects from the primary of each shard
if p.Shards, err = GetShards(client); err != nil {
p.Logger.Infof(`GetShards(): %v`, err)
}
p.Logger.Infof("%v shards detected, collecting from all servers", len(p.Shards))
if p.Shards, err = p.GetServersStatsSummary(p.Shards, connString); err != nil {
p.Logger.Error(err)
}
p.Logger.Info("end collecting from all servers")
} else if p.Cluster == Replica && p.Process == "mongod" { //collects replica info
message := "replica detected, collecting from all servers"
p.Logger.Info(message)
if p.ReplSetGetStatus, err = GetReplSetGetStatus(client); err != nil {
p.Logger.Info(fmt.Sprintf(`GetReplSetGetStatus(): %v`, err))
}
setName := p.ServerStatus.Repl.SetName
hosts := []string{}
for _, member := range p.ReplSetGetStatus.Members {
hosts = append(hosts, member.Name)
}
s := fmt.Sprintf(`%v/%v`, setName, strings.Join(hosts, ","))
oneShard := []Shard{{ID: setName, State: 1, Host: s}}
if p.Shards, err = p.GetServersStatsSummary(oneShard, connString); err != nil {
p.Logger.Error(err)
}
p.Logger.Info("end collecting from all servers")
}
db := NewDatabaseStats(p.Logger.AppName)
db.SetNumberShards(len(p.Shards))
db.SetRedaction(p.redact)
db.SetVerbose(p.verbose)
db.SetFastMode(p.fastMode)
var databases []Database
if databases, err = db.GetAllDatabasesStats(client, p.dbNames); err != nil {
p.Logger.Info(fmt.Sprintf(`GetAllDatabasesStats(): %v`, err))
}
p.Databases = &databases
return nil
}
// GetClusterStatsSummary collects cluster stats
func (p *ClusterStats) GetClusterStatsSummary(client *mongo.Client) error {
var err error
p.Logger = gox.GetLogger(p.signature)
if p.BuildInfo, err = GetBuildInfo(client); err != nil {
return err
}
p.Version = p.BuildInfo.Version
if p.HostInfo, err = GetHostInfo(client); err != nil {
return err
}
if p.ServerStatus, err = GetServerStatus(client); err != nil {
return err
}
p.Host = p.ServerStatus.Host
p.Process = p.ServerStatus.Process
p.Cluster = GetClusterType(p.ServerStatus)
if p.Cluster == Replica && p.Process == "mongod" { //collects replica info
if p.OplogStats, err = GetOplogStats(client); err != nil {
return err
}
if p.ReplSetGetStatus, err = GetReplSetGetStatus(client); err != nil {
return err
}
} else if p.Cluster == Sharded {
if p.Shards, err = GetShards(client); err != nil {
return err
}
}
return nil
}
// GetServersStatsSummary returns cluster stats from all shards
func (p *ClusterStats) GetServersStatsSummary(shards []Shard, connString connstring.ConnString) ([]Shard, error) {
var err error
var uris []string
var smap = map[string]Shard{}
p.Logger = gox.GetLogger(p.signature)
for _, v := range shards {
v.Servers = []ClusterStats{}
smap[v.ID] = v
}
if uris, err = GetAllServerURIs(shards, connString); err != nil {
return shards, err
}
wg := gox.NewWaitGroup(6)
var mu sync.Mutex
var e error
for i, uri := range uris {
s := uri
cs, _ := connstring.Parse(s)
if cs.Password != "" {
s = strings.Replace(s, url.QueryEscape(cs.Password), "xxxxxx", 1)
}
p.Logger.Infof(`[t-%d] collect from %v`, i, s)
wg.Add(1)
go func(uri string, n int, logger *gox.Logger) {
defer wg.Done()
var sclient *mongo.Client
var err error
if sclient, err = NewMongoClient(uri); err != nil {
logger.Errorf(`[t-%d] error: %v`, n, err)
mu.Lock()
e = err
mu.Unlock()
return
}
tm := time.Now()
sclient.Ping(context.Background(), nil)
logger.Infof(`[t-%d] ping: %v`, n, time.Since(tm))
defer sclient.Disconnect(context.Background())
server := NewClusterStats(p.Logger.AppName)
if err = server.GetClusterStatsSummary(sclient); err != nil {
logger.Errorf(`[t-%d] error: %v`, n, err)
mu.Lock()
e = err
mu.Unlock()
return
}
mu.Lock()
node := smap[server.ServerStatus.Repl.SetName]
node.Servers = append(node.Servers, *server)
smap[server.ServerStatus.Repl.SetName] = node
mu.Unlock()
logger.Infof(`[t-%d] completed`, n)
}(uri, i, p.Logger)
}
wg.Wait()
if e != nil {
return shards, e
}
shards = []Shard{}
for _, v := range smap {
shards = append(shards, v)
}
return shards, nil
}
// GetClusterShortSummary returns one line summary
func (p *ClusterStats) GetClusterShortSummary(client *mongo.Client) string {
var err error
if err = p.GetClusterStatsSummary(client); err != nil {
return err.Error()
}
return p.GetShortSummary()
}
// GetShortSummary returns a short summary
func (p *ClusterStats) GetShortSummary() string {
edition := "community"
if len(p.BuildInfo.Modules) > 0 {
edition = p.BuildInfo.Modules[0]
}
numShardStr := ""
if p.Cluster == Sharded {
numShardStr = fmt.Sprintf(`(%v)`, len(p.Shards))
}
result := fmt.Sprintf(`MongoDB v%v %v %v (%v) %v %v%v %v cores %v mem`,
p.BuildInfo.Version, edition, p.HostInfo.System.Hostname, p.HostInfo.OS.Name,
p.ServerStatus.Process, p.Cluster, numShardStr, p.HostInfo.System.NumCores, p.HostInfo.System.MemSizeMB)
return result
}
// Print prints a cluster short summary
func (p *ClusterStats) Print() {
fmt.Println(p.GetShortSummary())
}
// OutputBSON writes bson data to a file
func (p *ClusterStats) OutputBSON() (string, []byte, error) {
var err error
var data []byte
var ofile string
if p.HostInfo.System.Hostname == "" {
result := `roles 'clusterMonitor' and 'readAnyDatabase' are required`
return ofile, data, errors.New(result)
}
os.Mkdir(outdir, 0755)
basename := p.HostInfo.System.Hostname
basename = strings.ReplaceAll(basename, ":", "_")
ofile = fmt.Sprintf(`%v/%v-stats.bson.gz`, outdir, basename)
i := 1
for DoesFileExist(ofile) {
ofile = fmt.Sprintf(`%v/%v.%d-stats.bson.gz`, outdir, basename, i)
i++
}
databases := p.Databases
p.Databases = nil
var summaries []Database
for _, db := range *databases {
dbSummary := Database{
Name: db.Name,
SizeOnDisk: db.SizeOnDisk,
Empty: db.Empty,
Shards: db.Shards,
Stats: db.Stats}
summaries = append(summaries, dbSummary)
}
p.Databases = &summaries
var buffer bytes.Buffer
if data, err = bson.Marshal(p); err != nil {
return ofile, data, err
}
nw := 0
var n int
for nw < len(data) {
if n, err = buffer.Write(data); err != nil {
return ofile, data, err
}
nw += n
}
for _, db := range *databases {
for _, coll := range db.Collections {
if data, err = bson.Marshal(coll); err != nil {
return ofile, data, err
}
nw := 0
var n int
for nw < len(data) {
if n, err = buffer.Write(data); err != nil {
return ofile, data, err
}
nw += n
}
}
}
if err = gox.OutputGzipped(buffer.Bytes(), ofile); err != nil {
return ofile, data, err
}
fmt.Printf("bson data written to %v\n", ofile)
return ofile, buffer.Bytes(), err
}
|
package domain
type BasketItems []BasketItem
func (this BasketItems) CountCodes(codes []string) map[string]int {
result := make(map[string]int)
for _, code := range codes {
val, ok := result[code]
if !ok {
val = 0
}
val++
result[code] = val
}
return result
}
func (this BasketItems) DistinctProducts() Products {
var result Products
codes := make(map[string]struct{})
for _, v := range this {
_, ok := codes[v.Product.Code]
if !ok {
codes[v.Product.Code] = struct{}{}
result = append(result, *v.Product)
}
}
return result
}
func (this BasketItems) Group() BasketItems {
var result BasketItems
items := make(map[string]*BasketItem)
for _, v := range this {
val, ok := items[v.Product.Code]
if !ok {
val = &BasketItem{Product: v.Product}
items[v.Product.Code] = val
}
val.Quantity += v.Quantity
}
for _, v := range items {
result = append(result, *v)
}
return result
}
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
cli "gopkg.in/urfave/cli.v2"
"github.com/johnwyles/vrddt-reboot/pkg/config"
"github.com/johnwyles/vrddt-reboot/pkg/reddit"
)
// InsertJSONToQueueCommand will take whatever garbage or valid URLs you throw in a
// JSON file formatted with unmarshaled Reddit Video structs and insert them
// to the Queue
func InsertJSONToQueueCommand(cfg *config.Config) *cli.Command {
return &cli.Command{
Action: insertJSONToQueue,
Before: beforeInsertJSONToQueue,
Flags: []cli.Flag{
&cli.StringFlag{
Aliases: []string{"f"},
EnvVars: []string{"VRDDT_ADMIN_INSERT_JSON_TO_QUEUE_FILE"},
Name: "json-file",
Usage: "Specifies the JSON file to load Reddit URLs from",
Value: "",
},
},
Name: "insert-json-to-queue",
Usage: "Blindly instert a JSON file of Reddit data to the Queue",
}
}
// beforeInsertJSONToQueue will validate that we have set a JSON file
func beforeInsertJSONToQueue(cliContext *cli.Context) (err error) {
if !cliContext.IsSet("json-file") {
cli.ShowCommandHelp(cliContext, cliContext.Command.Name)
err = fmt.Errorf("A JSON file was not supplied")
}
return
}
// insertJSONToQueue will throw whatever "json-file" argument as unmarshaled
// RedditVideo structs into the Queue
func insertJSONToQueue(cliContext *cli.Context) (err error) {
getLogger("insertJSONToQueue").Info().Msg("insertJSONToQueue()")
data, err := ioutil.ReadFile(cliContext.String("json-file"))
if err != nil {
return
}
var redditVideos []reddit.Video
if err = json.Unmarshal(data, &redditVideos); err != nil {
return
}
if err = services.Queue.Init(); err != nil {
return
}
if err = services.Queue.MakeClient(); err != nil {
return
}
// NOTE: This does NOT check the DB at all before inserting the video into
// the Queue so we can test if the API and Web tiers are doing their job as
// this should never occur
for _, redditVideo := range redditVideos {
message, err := json.Marshal(redditVideo)
if err != nil {
getLogger("insertJSONToQueue").Warn().Err(err).Msgf("Problem marshaling to JSON: %#v", redditVideo)
continue
}
getLogger("insertJSONToQueue").Debug().Msgf("Enqueuing video: %#v\n", redditVideo)
if err = services.Queue.Push(message); err != nil {
getLogger("insertJSONToQueue").Warn().Err(err).Msg("error pushing json onto queue")
}
}
return
}
|
package steam
// Friends related responses
type FriendList struct {
Friends []*Friend `json:"friends"`
}
type FriendResponse struct {
FriendList FriendList `json:"friendslist"`
}
type Friend struct {
Steamid string `json:"steamid"`
Relationship string `json:"relationship"`
FriendSince int `json:"friend_since"`
}
// Game stats related responses
type PLayerStatsResponse struct {
PlayerStats PlayerStats
}
type PlayerStats struct {
SteamId string
GameName string
Stats []*Stats
}
type Stats struct {
Name string
Value int
}
// User info related responses
type UserInfoResponse struct {
Response struct {
Players Player `json:"players"`
} `json:"response"`
}
type Player struct {
SteamID string `json:"steamid"`
Communityvisibilitystate int `json:"communityvisibilitystate"`
Profilestate int `json:"profilestate"`
PersonName string `json:"personaname"`
ProfileUrl string `json:"profileurl"`
Avatar string `json:"avatar"`
AvatarMedium string `json:"avatarmedium"`
AvatarFull string `json:"avatarfull"`
AvatarHash string `json:"avatarhash"`
PersonState int `json:"personastate"`
RealName string `json:"realname"`
Primaryclanid string `json:"primaryclanid"`
TimeCreated int `json:"timecreated"`
PersonStateFlag int `json:"personastateflags"`
LocCounttryCode string `json:"loccountrycode"`
LocStateCode string `json:"locstatecode"`
LocCityID int `json:"loccityid"`
}
type GamesResponse struct {
Response struct {
TotalCount int `json:"total_count"`
Games []Game `json:"games"`
} `json:"response"`
}
type Game struct {
AppID int `json:"appid"`
Name string `json:"name"`
PlayTime2Weeks int `json:"playtime_2weeks"`
PlayTimeForever int `json:"playtime_forever"`
ImgIconUrl string `json:"img_icon_url"`
ImgLogoUrl string `json:"img_logo_url"`
PlayTimeWIndowsForever int `json:"playtime_windows_forever"`
PlayTimeMacForever int `json:"playtime_mac_forever"`
PlayTimeLinuxForever int `json:"playtime_linux_forever"`
}
|
package main
import (
"crypto/tls"
"flag"
"github.com/FrankSantoso/go-hydra-login-consent/internal/cfg"
"github.com/FrankSantoso/go-hydra-login-consent/internal/errutil"
"github.com/FrankSantoso/go-hydra-login-consent/internal/log"
"github.com/FrankSantoso/go-hydra-login-consent/internal/platform/mw"
"github.com/FrankSantoso/go-hydra-login-consent/internal/platform/resputil"
"github.com/go-chi/chi"
"github.com/go-chi/chi/middleware"
"github.com/go-openapi/strfmt"
"github.com/gorilla/csrf"
"github.com/ory/hydra-client-go/client"
"github.com/ory/hydra-client-go/client/admin"
"github.com/ory/hydra-client-go/models"
"html/template"
"net/http"
"net/url"
"os"
"strconv"
"time"
)
var (
cfgFile = flag.String("c", "config",
"config file containing spec in cfg package")
globalTimeout = 7 * time.Second
)
// pageTemplates hosts login, logout, and consent templates as for now.
type pageTemplates struct {
login *template.Template
consent *template.Template
logout *template.Template
}
// srv is our consent/login server
type srv struct {
skipSSL bool
hClient *client.OryHydra
templates *pageTemplates
logger *log.Log
}
func main() {
l := log.NewLogger("LOGIN_CONSENT", "HYDRA-LOGIN-CONSENT")
flag.Parse()
if cfgFile == nil {
l.Logger.Fatal().Msg("Config option must not be empty")
}
conf, err := cfg.ReadConfig(*cfgFile)
if err != nil {
l.Logger.Fatal().Msgf("Error while parsing config: %v", err)
}
r := chi.NewRouter()
csrfMw := csrf.Protect([]byte(
conf.CsrfConf.Key),
csrf.SameSite(csrf.SameSiteLaxMode),
csrf.Secure(false),
)
// for development only
r.Use(csrfMw)
r.Use(middleware.Recoverer)
r.Use(middleware.Timeout(globalTimeout))
r.Use(mw.ReqLoggerMw(l))
s, err := newSrv(conf, l)
if err != nil {
l.Logger.Fatal().Msgf("Error while creating new server: %v", err)
}
r.Get("/login", s.getLogin)
r.Get("/consent", s.getConsentPage)
r.Post("/login", s.postLogin)
r.Post("/consent", s.postConsent)
l.Logger.Log().Msgf("Serving at: " + strconv.Itoa(conf.HydraConf.Port))
http.ListenAndServe(":"+strconv.Itoa(conf.HydraConf.Port), r)
}
func newPageTemplates(path string) (*pageTemplates, error) {
if _, err := os.Stat(path); os.IsNotExist(err) {
return nil, err
}
loginTpl, err := template.ParseFiles(path + "login.html")
if err != nil {
return nil, err
}
consentTpl, err := template.ParseFiles(path + "consent.html")
if err != nil {
return nil, err
}
return &pageTemplates{
login: loginTpl,
consent: consentTpl,
logout: nil,
}, nil
}
func newSrv(conf *cfg.Cfg, l *log.Log) (*srv, error) {
adminURI, err := url.Parse(conf.HydraConf.Admin)
if err != nil {
return nil, err
}
tpls, err := newPageTemplates(conf.TemplateDir.Path)
if err != nil {
return nil, err
}
return &srv{
hClient: client.NewHTTPClientWithConfig(
nil,
&client.TransportConfig{
Host: adminURI.Host,
BasePath: adminURI.Path,
Schemes: []string{adminURI.Scheme},
},
),
logger: l,
templates: tpls,
skipSSL: conf.HydraConf.SkipSSL,
}, nil
}
func parseReqForm(w http.ResponseWriter, r *http.Request, l *log.Log) bool {
err := r.ParseForm()
if err != nil {
l.Logger.Err(err).Msg("Error while parsing form")
resputil.RenderErr(err, http.StatusBadRequest)
return false
}
return true
}
func (s *srv) getLogin(w http.ResponseWriter, r *http.Request) {
challenge := r.URL.Query().Get("login_challenge")
qData := map[string]interface{}{
csrf.TemplateTag: csrf.TemplateField(r),
"login_challenge": challenge,
}
err := s.templates.login.Execute(w, qData)
if err != nil {
s.logger.Logger.Err(err).Msg("Error executing login template")
resputil.RenderErr(err, http.StatusInternalServerError)
}
}
func (s *srv) postLogin(w http.ResponseWriter, r *http.Request) {
ok := parseReqForm(w, r, s.logger)
if !ok {
return
}
username, usernameSet := r.Form["email"]
password, passwordSet := r.Form["password"]
challenge, challengeSet := r.Form["challenge"]
if !usernameSet || !passwordSet || !challengeSet || !s.authLogin(username[0], password[0]) {
w.WriteHeader(http.StatusForbidden)
return
}
httpclient := &http.Client{}
if s.skipSSL {
// #nosec
httpclient.Transport = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
}
loginParams := admin.NewGetLoginRequestParamsWithHTTPClient(httpclient)
loginParams.SetTimeout(globalTimeout)
loginParams.LoginChallenge = challenge[0]
resp, err := s.hClient.Admin.GetLoginRequest(loginParams)
if err != nil {
s.logger.Logger.Err(err).Msg("Error processing login request")
// w.WriteHeader(http.StatusInternalServerError)
resputil.RenderErr(err, http.StatusInternalServerError)
return
}
loginOKRequest := admin.NewAcceptLoginRequestParamsWithHTTPClient(httpclient)
b := &models.AcceptLoginRequest{
Subject: &username[0],
}
loginOKRequest.SetBody(b)
loginOKRequest.SetTimeout(globalTimeout)
loginOKRequest.LoginChallenge = resp.Payload.Challenge
}
func (s *srv) getConsentPage(w http.ResponseWriter, r *http.Request) {
httpClient := &http.Client{}
if s.skipSSL {
httpClient.Transport = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
}
consentParams := admin.NewGetConsentRequestParamsWithHTTPClient(httpClient)
consentParams.SetTimeout(globalTimeout)
consentParams.ConsentChallenge = r.URL.Query().Get("cc")
consentReq, err := s.hClient.Admin.GetConsentRequest(consentParams)
if err != nil {
s.logger.Logger.Err(err).Msg("Error while getting consent request")
resputil.RenderErr(err, http.StatusInternalServerError)
return
}
templateData := map[string]interface{}{
"User": consentReq.Payload.Subject,
"Challenge": consentParams.ConsentChallenge,
"Scope": consentReq.Payload.RequestedScope,
}
if consentReq.Payload.Client != nil {
templateData["ClientName"] = consentReq.Payload.Client.ClientName
templateData["ClientID"] = consentReq.Payload.Client.ClientID
}
err = s.templates.consent.Execute(w, templateData)
if err != nil {
s.logger.Logger.Err(err).Msg("Error executing consent template")
resputil.RenderErr(err, http.StatusInternalServerError)
}
}
func (s *srv) postConsent(w http.ResponseWriter, r *http.Request) {
ok := parseReqForm(w, r, s.logger)
if !ok {
return
}
allowed, found := r.Form["submit"]
if !found {
s.logger.Logger.Error().Msg(errutil.ErrMissingConsent.Error())
resputil.RenderErr(errutil.ErrMissingConsent, http.StatusBadRequest)
return
}
switch allowed[0] {
case "accept":
s.acceptConsentRequest(w, r)
case "reject":
s.rejectConsentRequest(w, r)
default:
s.logger.Logger.Error().Msg(errutil.ErrInvalidRequest.Error())
resputil.RenderErr(errutil.ErrInvalidRequest, http.StatusBadRequest)
return
}
}
func (s *srv) acceptConsentRequest(w http.ResponseWriter, req *http.Request) {
httpclient := &http.Client{}
if s.skipSSL {
// #nosec
httpclient.Transport = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
}
getConsentRequest := admin.NewGetConsentRequestParamsWithHTTPClient(httpclient)
getConsentRequest.SetTimeout(globalTimeout)
getConsentRequest.ConsentChallenge = req.URL.Query().Get("consent_challenge")
getConsentRequestResponse, err := s.hClient.Admin.GetConsentRequest(getConsentRequest)
if err != nil {
s.logger.Logger.Err(err).Msg("Error getting consent request")
resputil.RenderErr(err, http.StatusInternalServerError)
return
}
_, remember := req.Form["remember"]
b := &models.AcceptConsentRequest{
GrantScope: req.Form["grant_scope"],
GrantAccessTokenAudience: getConsentRequestResponse.Payload.RequestedAccessTokenAudience,
Remember: remember,
HandledAt: strfmt.DateTime(time.Now()),
}
consentOKRequest := admin.NewAcceptConsentRequestParamsWithHTTPClient(httpclient)
consentOKRequest.SetBody(b)
consentOKRequest.SetTimeout(globalTimeout)
consentOKRequest.ConsentChallenge = req.URL.Query().Get("consent_challenge")
consentOKResponse, err := s.hClient.Admin.AcceptConsentRequest(consentOKRequest)
if err != nil {
s.logger.Logger.Err(err).Msg("Error getting consent response")
resputil.RenderErr(err, http.StatusInternalServerError)
return
}
http.Redirect(w, req, consentOKResponse.Payload.RedirectTo, http.StatusFound)
}
func (s *srv) rejectConsentRequest(w http.ResponseWriter, req *http.Request) {
httpclient := &http.Client{}
if s.skipSSL {
// #nosec
httpclient.Transport = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
}
consentDeniedRequest := admin.NewRejectConsentRequestParamsWithHTTPClient(httpclient)
b := &models.RejectRequest{
Error: "access_denied",
ErrorDescription: "The resource owner denied the request",
}
consentDeniedRequest.SetBody(b)
consentDeniedRequest.SetTimeout(globalTimeout)
consentDeniedRequest.ConsentChallenge = req.URL.Query().Get("consent_challenge")
consentDenyResponse, err := s.hClient.Admin.RejectConsentRequest(consentDeniedRequest)
if err != nil {
s.logger.Logger.Err(err).Msg("Error getting consent response")
resputil.RenderErr(err, http.StatusInternalServerError)
return
}
http.Redirect(w, req, consentDenyResponse.Payload.RedirectTo, http.StatusFound)
}
// authLogin authenticates user login credentials,
// currently authenticating all users
func (s *srv) authLogin(usr, pwd string) bool {
return true
}
|
// Copyright 2021 BoCloud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package netconf
import (
"io/ioutil"
"gopkg.in/yaml.v2"
)
type TunnelEndpoint struct {
ID string `yaml:"id,omitempty"`
Name string `yaml:"name,omitempty"`
IP string `yaml:"ip,omitempty"`
Subnets []string `yaml:"subnets,omitempty"`
}
type NetworkConf struct {
TunnelEndpoint `yaml:"-,inline"`
Peers []TunnelEndpoint `yaml:"peers,omitempty"`
}
func LoadNetworkConf(path string) (NetworkConf, error) {
var conf NetworkConf
data, err := ioutil.ReadFile(path)
if err != nil {
return conf, err
}
return conf, yaml.Unmarshal(data, &conf)
}
|
package model
import (
"context"
"fmt"
"log"
"time"
"Users/pingjing/docker/goPractice/owning/database"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo/options"
)
type Shopping struct {
Accounts []User `bson:"accounts" json:"accounts"`
}
type User struct {
Name string `bson:"name" json:"name"`
Password string `bson:"password" json:"password"`
Phone string `bson:"phone" json:"phone"`
Products []Product `bson:"products" json:"products"`
}
// uppercase an exported to json
type Product struct {
ProductId string `bson:"productId" json:"productId"`
ProductName string `bson:"productName" json:"productName"`
Price string `bson:"price" json:"price"`
Category string `bson:"category" json:"category"`
PurchaseDate string `bson:"purchaseDate" json:"purchaseDate"`
}
const (
collection = "shopping_lists"
)
// Get all product list
// 放置型別為 collection 的指標
func (p *Product) FindAll() error {
var shoppingLists []*Product
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
c := database.Connect(collection)
// opts := options.Find().SetLimit(2)
// Passing bson.M{} as the filter matches all documents in the collection
cur, err := c.Find(ctx, bson.M{})
if err != nil {
log.Fatal("Error on Finding all the documents ", err)
}
// Finding multiple documents returns a cursor
// Iterating through the cursor allows us to decode documents one at a time
for cur.Next(ctx) {
list := Product{}
// create a value into which the single document can be decoded
if err := cur.Decode(&list); err != nil {
log.Fatal("Error on Decoding the document ", err)
}
shoppingLists = append(shoppingLists, &list)
}
if err := cur.Err(); err != nil {
return err
}
return cur.Close(ctx)
}
// Get single product list
func (p *Product) FindOne(query bson.M) error {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
c := database.Connect(collection)
err := c.FindOne(ctx, query).Decode(&p)
return err
}
// Insert a new single product
func (p *Product) Insert(product Product) error {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
c := database.Connect(collection)
result, err := c.InsertOne(ctx, product)
if err != nil {
log.Fatal(err)
}
objectID := result.InsertedID.(primitive.ObjectID)
fmt.Println(objectID)
return err
}
// Update a product
func (p *Product) Update(product Product) error {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
c := database.Connect(collection)
opts := options.Update().SetUpsert(true)
// find the document for which the _id field matches id and set the email to "newemail@example.com"
filter := bson.M{"productId": product.ProductId}
update := bson.M{"$set": product}
_, err := c.UpdateOne(ctx, filter, update, opts)
if err != nil {
log.Fatal(err)
}
return err
}
// Delete a product
|
/**
* @Author: korei
* @Description:
* @File: init.go
* @Version: 1.0.0
* @Date: 2020/11/17 下午12:05
*/
package config
import "github.com/BurntSushi/toml"
var GlobalConfig Config
type Config struct{
DBConfig DBConfig
AdminConfig AdminConfig
MachineConfig MachineConfig
}
type AdminConfig struct {
Pass string
Name string
}
type MachineConfig struct {
KindOfDrink uint32
}
type DBConfig struct{
User string
Pass string
Host string
Port string
Name string
}
func init_local() {
// init db
GlobalConfig.DBConfig.User = "root"
GlobalConfig.DBConfig.Pass = "test"
GlobalConfig.DBConfig.Host = "127.0.0.1"
GlobalConfig.DBConfig.Port = "3306"
GlobalConfig.DBConfig.Name = "ASMDB"
//init machine
GlobalConfig.MachineConfig.KindOfDrink = 16
//admin config
GlobalConfig.AdminConfig.Name = "admin"
GlobalConfig.AdminConfig.Pass = "password"
}
func init(){
configPath := "./config/config"
_,err := toml.DecodeFile(configPath,&GlobalConfig)
if err!= nil {
panic(err)
}
} |
package main
import (
"fmt"
)
func main() {
ch := make(chan int, 1)
for i := 0; i < 10; i++ {
select {
case x := <-ch:
fmt.Printf(" ch -> x is %d\n", x)
case ch <- i:
fmt.Printf(" i->ch is %d\n", i)
}
}
}
|
package unittest
import (
"encoding/json"
g8sv1alpha1 "github.com/giantswarm/apiextensions/pkg/apis/cluster/v1alpha1"
"k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
cmav1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1"
"github.com/giantswarm/aws-operator/pkg/annotation"
"github.com/giantswarm/aws-operator/pkg/label"
)
func DefaultMachineDeployment() cmav1alpha1.MachineDeployment {
cr := cmav1alpha1.MachineDeployment{
ObjectMeta: v1.ObjectMeta{
Annotations: map[string]string{
annotation.MachineDeploymentSubnet: "10.100.8.0/24",
},
Labels: map[string]string{
label.Cluster: "8y5ck",
label.MachineDeployment: "al9qy",
label.OperatorVersion: "7.3.0",
},
},
}
spec := g8sv1alpha1.AWSMachineDeploymentSpec{
NodePool: g8sv1alpha1.AWSMachineDeploymentSpecNodePool{
Description: "Test node pool for cluster in template rendering unit test.",
Machine: g8sv1alpha1.AWSMachineDeploymentSpecNodePoolMachine{
DockerVolumeSizeGB: 100,
KubeletVolumeSizeGB: 100,
},
Scaling: g8sv1alpha1.AWSMachineDeploymentSpecNodePoolScaling{
Max: 5,
Min: 3,
},
},
Provider: g8sv1alpha1.AWSMachineDeploymentSpecProvider{
AvailabilityZones: []string{
"eu-central-1a",
"eu-central-1c",
},
Worker: g8sv1alpha1.AWSMachineDeploymentSpecProviderWorker{
InstanceType: "m5.2xlarge",
},
},
}
return mustCMAMachineDeploymentWithG8sProviderSpec(cr, spec)
}
func mustCMAMachineDeploymentWithG8sProviderSpec(cr cmav1alpha1.MachineDeployment, providerExtension g8sv1alpha1.AWSMachineDeploymentSpec) cmav1alpha1.MachineDeployment {
var err error
if cr.Spec.Template.Spec.ProviderSpec.Value == nil {
cr.Spec.Template.Spec.ProviderSpec.Value = &runtime.RawExtension{}
}
cr.Spec.Template.Spec.ProviderSpec.Value.Raw, err = json.Marshal(&providerExtension)
if err != nil {
panic(err)
}
return cr
}
|
package downloader
import (
"io"
)
// Updater will take care about everything related to updates.
type Updater interface {
io.Closer
Update(target string) error
}
|
package teams
// Conferences maps team abbreviations to conferences
var Conferences = map[string][]string{
"east": []string{"atl", "chi", "clb", "dc", "fcc", "mtl", "ner", "nyc", "nyrb", "orl", "phi", "tfc"},
"west": []string{"dal", "hou", "col", "lag", "lafc", "min", "por", "rsl", "sj", "sea", "kc", "van"},
}
// ConferenceFor returns the conference associated with a team
func ConferenceFor(abbrv string) string {
for conference, teams := range Conferences {
for _, team := range teams {
if team == abbrv {
return conference
}
}
}
return ""
}
// Name is aliased for unmarshaling
type Name string
// Team contains team data
type Team struct {
ID int `json:"id,string"`
Name *Name
}
// Teams is the canonical list of team names and abbreviations
var Teams = map[string]string{
"atl": "Atlanta United FC",
"chi": "Chicago Fire FC",
"col": "Colorado Rapids",
"clb": "Columbus Crew SC",
"dc": "D.C. United",
"dal": "FC Dallas",
"fcc": "FC Cincinnati",
"hou": "Houston Dynamo",
"mia": "Inter Miami CF",
"lag": "LA Galaxy",
"lafc": "Los Angeles FC",
"min": "Minnesota United FC",
"mtl": "Montreal Impact",
"nsc": "Nashville SC",
"ner": "New England Revolution",
"nyc": "New York City FC",
"nyrb": "New York Red Bulls",
"orl": "Orlando City SC",
"phi": "Philadelphia Union",
"por": "Portland Timbers",
"rsl": "Real Salt Lake",
"sj": "San Jose Earthquakes",
"sea": "Seattle Sounders FC",
"kc": "Sporting Kansas City",
"tfc": "Toronto FC",
"van": "Vancouver Whitecaps FC",
}
// UnmarshalText Coerces team Name to the right one
func (n *Name) UnmarshalText(data []byte) error {
switch string(data) {
case "Atlanta United":
*n = "Atlanta United FC"
case "Chicago Fire":
*n = "Chicago Fire FC"
case "Columbus Crew":
*n = "Columbus Crew SC"
case "DC United":
*n = "D.C. United"
case "Minnesota United":
*n = "Minnesota United FC"
case "New England Rev.":
*n = "New England Revolution"
case "Orlando City":
*n = "Orlando City SC"
case "Vancouver Whitecaps":
*n = "Vancouver Whitecaps FC"
default:
if err := NameIsValid(string(data)); err != nil {
return err
}
*n = Name(string(data))
}
return nil
}
|
package main
import (
"errors"
"flag"
"fmt"
"io/ioutil"
"os"
"github.com/sirupsen/logrus"
"github.com/ti-community-infra/tichi/internal/pkg/externalplugins"
"sigs.k8s.io/yaml"
)
// options specifies command line parameters.
type options struct {
externalPluginConfigPath string
}
func (o *options) DefaultAndValidate() error {
if o.externalPluginConfigPath == "" {
return errors.New("required flag --external-plugin-config-path was unset")
}
return nil
}
// parseOptions is used to parse command line parameters.
func parseOptions() (options, error) {
o := options{}
if err := o.gatherOptions(flag.CommandLine, os.Args[1:]); err != nil {
return options{}, err
}
return o, nil
}
func (o *options) gatherOptions(flag *flag.FlagSet, args []string) error {
flag.StringVar(&o.externalPluginConfigPath, "external-plugin-config-path", "",
"Path to external_plugin_config.yaml.")
if err := flag.Parse(args); err != nil {
return fmt.Errorf("parse flags: %v", err)
}
if err := o.DefaultAndValidate(); err != nil {
return fmt.Errorf("invalid options: %v", err)
}
return nil
}
func main() {
o, err := parseOptions()
if err != nil {
logrus.Fatalf("Error parsing options - %v", err)
}
if err := validate(o); err != nil {
logrus.WithError(err).Fatal("Validation failed.")
} else {
logrus.Info("checkpluginconfig passes without any error!")
}
}
func validate(o options) error {
bytes, err := ioutil.ReadFile(o.externalPluginConfigPath)
if err != nil {
return err
}
config := &externalplugins.Configuration{}
if err := yaml.Unmarshal(bytes, config); err != nil {
return err
}
return config.Validate()
}
|
package middleware
import (
"net/http"
"github.com/gorilla/mux"
"github.com/root-gg/plik/server/context"
)
// User middleware for all the /user/{userID} routes.
func User(ctx *context.Context, next http.Handler) http.Handler {
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
if ctx.GetUser() == nil {
ctx.Unauthorized("You must be authenticated, please login first")
return
}
// Get the user id from the url params
vars := mux.Vars(req)
userID := vars["userID"]
if userID == "" {
ctx.MissingParameter("user id")
return
}
if userID != ctx.GetUser().ID {
if !ctx.IsAdmin() {
ctx.Forbidden("you need administrator privileges")
return
}
// Get user from session
user, err := ctx.GetMetadataBackend().GetUser(userID)
if err != nil {
ctx.InternalServerError("unable to get user", err)
return
}
if user == nil {
ctx.NotFound("user not found")
return
}
ctx.SaveOriginalUser()
ctx.SetUser(user)
}
next.ServeHTTP(resp, req)
})
}
|
package main
var father []int
// 初始化并查集
func initUFS(length int) {
father = make([]int, length)
for i := 0; i < length; i++ {
father[i] = i
}
}
// 合并并查集
func unionUFS(a, b int) {
father1, father2 := findFather(a), findFather(b)
father[father1] = father2
}
// 找爸爸
func findFather(a int) int {
if a == father[a] {
return a
}
father[a] = findFather(father[a])
return father[a]
}
func regionsBySlashes(grid []string) int {
if len(grid) == 0 {
return 0
}
length := len(grid)
initUFS(length * length * 4)
for i := 0; i < length; i++ {
for t := 0; t < length; t++ {
start := 4 * (i*length + t)
switch grid[i][t] {
case ' ':
unionUFS(start, start+1)
unionUFS(start, start+2)
unionUFS(start, start+3)
case '\\':
unionUFS(start, start+1)
unionUFS(start+2, start+3)
case '/':
unionUFS(start, start+3)
unionUFS(start+1, start+2)
}
if i > 0 {
unionUFS(start, start-length*4+2)
}
if t > 0 {
unionUFS(start-3, start+3)
}
}
}
ans := 0
for i := 0; i < length*length*4; i++ {
if i == findFather(i) {
ans++
}
}
return ans
}
/*
总结
1. 这个并查集才用了抽象的思维,
把每一个小块抽象为了4块,并且按照下面的标号进行标记。
3 00000 1
33 000 11
333 0 111
33 2 11
3 222 1
注意这4部分是大小是相同的的!
如果遇到'/' 那就把1区域和2区域合并,0和3合并。
遇到'\'就把0和1合并,2和3合并
遇到' ' 就把所有区域合并。
每一块结束后,再与周围的进行合并
最后再计算有多少连通块就可以了。
*/
|
/*
* dagdig
*
* PFN 2019 Internship Challenge
*
* API version: 1.0.0
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package openapi
type Error struct {
// Error code
Code int32 `json:"code"`
// Error message
Message string `json:"message"`
}
|
package sensorcollection
import (
"testing"
"fmt"
)
func TestConnection(t *testing.T){
getStationResponse := GetSensorServiceGetStationsResponse()
for _,s := range getStationResponse.Stations {
for _, p := range s.Parameters {
fmt.Printf("%v\n", p.Devices)
}
}
}
|
package ventilator
import (
"github.com/Mvilstrup/mosquito/communication/errors"
"github.com/Mvilstrup/mosquito/communication/messages"
zmq "github.com/alecthomas/gozmq"
)
type Ventilator struct {
// ZeroMQ specific variables
context *zmq.Context // Context
sender *zmq.Socket // sender for clients & workers
}
func New(endpoint string) (*Ventilator, error) {
context, err := zmq.NewContext()
if err != nil {
return nil, com_errors.ErrZMQContext
}
sender, err := context.NewSocket(zmq.PUSH)
if err != nil {
return nil, com_errors.ErrZMQConnect
}
sender.Bind(endpoint)
return &Ventilator{
context: context,
sender: sender,
}, nil
}
func (ventilator *Ventilator) Send(message *messages.Message) error {
msg, err := messages.EncodeMessage(message)
if err != nil {
return err
}
ventilator.sender.Send(msg, 0)
return nil
}
func (ventilator *Ventilator) Close() {
ventilator.sender.Close()
}
|
package api
import(
"log"
"net/http"
"encoding/json"
//"github.com/gorilla/mux"
"github.com/acmakhoa/smsapp/worker"
"github.com/acmakhoa/smsapp/device"
)
type DeviceAPI struct{}
type DeviceSelectRequest struct{
Name string `json:"name"`
}
func (_ *DeviceAPI) ListHandler(w http.ResponseWriter, r *http.Request){
listDevices := worker.GetAvaliableNameModem()
data := map[string]interface{}{"devices":listDevices}
Response(w,data)
}
func (_ *DeviceAPI) ReScanHandler(w http.ResponseWriter, r *http.Request){
//listDevices := worker.GetAvaliableModem()
var modems []*worker.GSMModem
//Find Usb Device
devices:=device.FindDevices()
//log.Println("list device",jj)
for i := 0; i < len(devices); i++ {
//dev := fmt.Sprintf("DEVICE%v", i)
_port := devices[i]
_baud := 115200 //appConfig.Get(dev, "BAUDRATE")
//_devid, _ := appConfig.Get(dev, "DEVID")
m := &worker.GSMModem{Port: _port, Baud: _baud, Devid: ""}
//log.Println("gsm modem :",m)
err := m.Connect()
if err!=nil{
log.Println("rescan: connet error :", err)
continue
}
modems = append(modems, m)
}
data:=map[string]interface{}{"modems":modems}
Response(w,data)
}
func (_ *DeviceAPI) SelectHandler(w http.ResponseWriter, r *http.Request){
w.Header().Set("Content-type", "application/json")
r.ParseForm()
decoder := json.NewDecoder(r.Body)
t:=DeviceSelectRequest{}
_ = decoder.Decode(&t)
log.Println("decoder:",t)
modemSelected:=t.Name
if modemSelected!=""{
log.Println("Select Handler ::",modemSelected)
modems:=worker.GetAvaliableModem()
for i:=0;i<len(modems);i++{
log.Println("modem::",modems[i])
if modems[i].Port==modemSelected{
var aModem []*worker.GSMModem
aModem =append(aModem,modems[i])
worker.InitWorker(aModem,10,1,1,1,1)
}
}
}
Response(w,map[string]interface{}{})
}
|
package loader
import (
"fmt"
"path/filepath"
"github.com/pkg/errors"
yaml "gopkg.in/yaml.v2"
"github.com/devspace-cloud/devspace/pkg/devspace/config/versions/latest"
"github.com/devspace-cloud/devspace/pkg/devspace/deploy/deployer/helm/merge"
"github.com/devspace-cloud/devspace/pkg/util/yamlutil"
)
// ValidInitialSyncStrategy checks if strategy is valid
func ValidInitialSyncStrategy(strategy latest.InitialSyncStrategy) bool {
return strategy == "" ||
strategy == latest.InitialSyncStrategyMirrorLocal ||
strategy == latest.InitialSyncStrategyMirrorRemote ||
strategy == latest.InitialSyncStrategyKeepAll ||
strategy == latest.InitialSyncStrategyPreferLocal ||
strategy == latest.InitialSyncStrategyPreferRemote ||
strategy == latest.InitialSyncStrategyPreferNewest
}
func validate(config *latest.Config) error {
if config.Dev != nil {
if config.Dev.Ports != nil {
for index, port := range config.Dev.Ports {
if port.ImageName == "" && port.LabelSelector == nil {
return errors.Errorf("Error in config: imageName and label selector are nil in port config at index %d", index)
}
if port.PortMappings == nil {
return errors.Errorf("Error in config: portMappings is empty in port config at index %d", index)
}
}
}
if config.Dev.Sync != nil {
for index, sync := range config.Dev.Sync {
if ValidInitialSyncStrategy(sync.InitialSync) == false {
return errors.Errorf("Error in config: sync.initialSync is not valid '%s' at index %d", sync.InitialSync, index)
}
}
}
if config.Dev.Interactive != nil {
for index, imageConf := range config.Dev.Interactive.Images {
if imageConf.Name == "" {
return errors.Errorf("Error in config: Unnamed interactive image config at index %d", index)
}
}
}
}
if config.Commands != nil {
for index, command := range config.Commands {
if command.Name == "" {
return errors.Errorf("commands[%d].name is required", index)
}
if command.Command == "" {
return errors.Errorf("commands[%d].command is required", index)
}
}
}
if config.Hooks != nil {
for index, hookConfig := range config.Hooks {
if hookConfig.Command == "" {
return errors.Errorf("hooks[%d].command is required", index)
}
}
}
if config.Images != nil {
// images lists all the image names in order to check for duplicates
images := map[string]bool{}
for imageConfigName, imageConf := range config.Images {
if imageConfigName == "" {
return errors.Errorf("images keys cannot be an empty string")
}
if imageConf == nil {
return errors.Errorf("images.%s is empty and should at least contain an image name", imageConfigName)
}
if imageConf.Image == "" {
return errors.Errorf("images.%s.image is required", imageConfigName)
}
if imageConf.Build != nil && imageConf.Build.Custom != nil && imageConf.Build.Custom.Command == "" {
return errors.Errorf("images.%s.build.custom.command is required", imageConfigName)
}
if imageConf.Image == "" {
return fmt.Errorf("images.%s.image is required", imageConfigName)
}
if images[imageConf.Image] {
return errors.Errorf("multiple image definitions with the same image name are not allowed")
}
images[imageConf.Image] = true
}
}
if config.Deployments != nil {
for index, deployConfig := range config.Deployments {
if deployConfig.Name == "" {
return errors.Errorf("deployments[%d].name is required", index)
}
if deployConfig.Helm == nil && deployConfig.Kubectl == nil {
return errors.Errorf("Please specify either helm or kubectl as deployment type in deployment %s", deployConfig.Name)
}
if deployConfig.Helm != nil && (deployConfig.Helm.Chart == nil || deployConfig.Helm.Chart.Name == "") && (deployConfig.Helm.ComponentChart == nil || *deployConfig.Helm.ComponentChart == false) {
return errors.Errorf("deployments[%d].helm.chart and deployments[%d].helm.chart.name or deployments[%d].helm.componentChart is required", index, index, index)
}
if deployConfig.Kubectl != nil && deployConfig.Kubectl.Manifests == nil {
return errors.Errorf("deployments[%d].kubectl.manifests is required", index)
}
if deployConfig.Helm != nil && deployConfig.Helm.ComponentChart != nil && *deployConfig.Helm.ComponentChart == true {
// Load override values from path
overwriteValues := map[interface{}]interface{}{}
if deployConfig.Helm.ValuesFiles != nil {
for _, overridePath := range deployConfig.Helm.ValuesFiles {
overwriteValuesPath, err := filepath.Abs(overridePath)
if err != nil {
return errors.Errorf("deployments[%d].helm.valuesFiles: Error retrieving absolute path from %s: %v", index, overridePath, err)
}
overwriteValuesFromPath := map[interface{}]interface{}{}
err = yamlutil.ReadYamlFromFile(overwriteValuesPath, overwriteValuesFromPath)
if err == nil {
merge.Values(overwriteValues).MergeInto(overwriteValuesFromPath)
}
}
}
// Load override values from data and merge them
if deployConfig.Helm.Values != nil {
merge.Values(overwriteValues).MergeInto(deployConfig.Helm.Values)
}
bytes, err := yaml.Marshal(overwriteValues)
if err != nil {
return errors.Errorf("deployments[%d].helm: Error marshaling overwrite values: %v", index, err)
}
componentValues := &latest.ComponentConfig{}
err = yaml.UnmarshalStrict(bytes, componentValues)
if err != nil {
return errors.Errorf("deployments[%d].helm.componentChart: component values are incorrect: %v", index, err)
}
}
}
}
return nil
}
|
package main
import "fmt"
func main() {
nums := []int{2,7,11,15}
fmt.Println(twoSum(nums, 9))
fmt.Println(twoSum1(nums, 9))
}
func twoSum(nums []int, target int) []int {
hashmap := make(map[int]int)
for i, v := range nums{
another_num := target - v
if _, ok := hashmap[another_num]; ok {
return []int{hashmap[another_num], i}
}
hashmap[v] = i
}
return nil
}
func twoSum1(nums []int, target int) []int {
m := make(map[int]int)
for i, v := range nums {
if j, ok := m[target - v]; ok && i != j {
return []int{i, j}
}
m[v] = i
}
return []int{}
} |
/*
Copyright © 2022 SUSE LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package factoryreset
import (
"encoding/json"
"errors"
"fmt"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"syscall"
dockerconfig "github.com/docker/docker/cli/config"
"github.com/rancher-sandbox/rancher-desktop/src/go/rdctl/pkg/autostart"
"github.com/rancher-sandbox/rancher-desktop/src/go/rdctl/pkg/directories"
"github.com/sirupsen/logrus"
)
func DeleteData(removeKubernetesCache bool) error {
if err := autostart.EnsureAutostart(false); err != nil {
logrus.Errorf("Failed to remove autostart configuration: %s", err)
}
return map[string]func(bool) error{
"darwin": deleteDarwinData,
"linux": deleteLinuxData,
"windows": unregisterAndDeleteWindowsData,
}[runtime.GOOS](removeKubernetesCache)
}
func getStandardDirs() (string, string, string, error) {
configDir, err := os.UserConfigDir()
if err != nil {
return "", "", "", err
}
cacheDir, err := os.UserCacheDir()
if err != nil {
return "", "", "", err
}
homeDir, err := os.UserHomeDir()
if err != nil {
return "", "", "", err
}
return configDir, cacheDir, homeDir, nil
}
func deleteDarwinData(removeKubernetesCache bool) error {
configDir, cacheDir, homeDir, err := getStandardDirs()
if err != nil {
return err
}
libraryPath := path.Join(homeDir, "Library")
altAppHomePath := path.Join(homeDir, ".rd")
appHomePath := path.Join(configDir, "rancher-desktop")
cachePath := path.Join(cacheDir, "rancher-desktop")
logsPath := os.Getenv("RD_LOGS_DIR")
if logsPath == "" {
logsPath = path.Join(libraryPath, "Logs", "rancher-desktop")
}
settingsPath := path.Join(libraryPath, "Preferences", "rancher-desktop")
updaterPath := path.Join(configDir, "Caches", "rancher-desktop-updater")
pathList := []string{
altAppHomePath,
appHomePath,
logsPath,
settingsPath,
updaterPath,
}
if removeKubernetesCache {
pathList = append(pathList, cachePath)
}
return deleteUnixLikeData(homeDir, altAppHomePath, path.Join(homeDir, ".config"), pathList)
}
func deleteLinuxData(removeKubernetesCache bool) error {
configHomePath, cacheHomePath, homeDir, err := getStandardDirs()
if err != nil {
return err
}
dataDir := os.Getenv("XDG_DATA_HOME")
if dataDir == "" {
dataDir = path.Join(homeDir, ".local", "share")
}
altAppHomePath := path.Join(homeDir, ".rd")
cachePath := path.Join(cacheHomePath, "rancher-desktop")
configPath := path.Join(configHomePath, "rancher-desktop")
electronConfigPath := path.Join(configHomePath, "Rancher Desktop")
dataHomePath := path.Join(dataDir, "rancher-desktop")
pathList := []string{
altAppHomePath,
configPath,
electronConfigPath,
path.Join(homeDir, ".local", "state", "rancher-desktop"),
}
logsPath := os.Getenv("RD_LOGS_DIR")
if logsPath != "" {
pathList = append(pathList, logsPath, path.Join(dataHomePath, "lima"))
} else {
pathList = append(pathList, path.Join(dataHomePath))
}
if removeKubernetesCache {
pathList = append(pathList, cachePath)
}
return deleteUnixLikeData(homeDir, altAppHomePath, configHomePath, pathList)
}
func unregisterAndDeleteWindowsData(removeKubernetesCache bool) error {
if err := unregisterWSL(); err != nil {
logrus.Errorf("could not unregister WSL: %s", err)
return err
}
if err := deleteWindowsData(!removeKubernetesCache, "rancher-desktop"); err != nil {
logrus.Errorf("could not delete data: %s", err)
return err
}
if err := clearDockerContext(); err != nil {
logrus.Errorf("could not clear docker context: %s", err)
return err
}
logrus.Infoln("successfully cleared data.")
return nil
}
// Most of the errors in this function are reported, but we continue to try to delete things,
// because there isn't really a dependency graph here.
// For example, if we can't delete the Lima VM, that doesn't mean we can't remove docker files
// or pull the path settings out of the shell profile files.
func deleteUnixLikeData(homeDir string, altAppHomePath string, configHomePath string, pathList []string) error {
if err := deleteLimaVM(); err != nil {
logrus.Errorf("Error trying to delete the Lima VM: %s\n", err)
}
for _, currentPath := range pathList {
if err := os.RemoveAll(currentPath); err != nil {
logrus.Errorf("Error trying to remove %s: %s", currentPath, err)
}
}
if err := clearDockerContext(); err != nil {
logrus.Errorf("Error trying to clear the docker context %s", err)
}
if err := removeDockerCliPlugins(altAppHomePath); err != nil {
logrus.Errorf("Error trying to remove docker plugins %s", err)
}
rawPaths := []string{
".bashrc",
".bash_profile",
".bash_login",
".profile",
".zshrc",
".cshrc",
".tcshrc",
}
for i, s := range rawPaths {
rawPaths[i] = path.Join(homeDir, s)
}
rawPaths = append(rawPaths, path.Join(configHomePath, "fish", "config.fish"))
return removePathManagement(rawPaths)
}
func deleteLimaVM() error {
if err := directories.SetupLimaHome(); err != nil {
return err
}
execPath, err := os.Executable()
if err != nil {
return err
}
execPath, err = filepath.EvalSymlinks(execPath)
if err != nil {
return err
}
limactl := path.Join(path.Dir(path.Dir(execPath)), "lima", "bin", "limactl")
return exec.Command(limactl, "delete", "-f", "0").Run()
}
func removeDockerCliPlugins(altAppHomePath string) error {
cliPluginsDir := path.Join(dockerconfig.Dir(), "cli-plugins")
entries, err := os.ReadDir(cliPluginsDir)
if err != nil {
if errors.Is(err, syscall.ENOENT) {
// Nothing left to do here, since there is no cli-plugins dir
return nil
}
return err
}
for _, entry := range entries {
if entry.Type()&os.ModeSymlink != os.ModeSymlink {
continue
}
fullPathName := path.Join(cliPluginsDir, entry.Name())
target, err := os.Readlink(fullPathName)
if err != nil {
logrus.Errorf("Failed to follow the symbolic link for file %s: error: %s\n", fullPathName, err)
continue
}
if strings.HasPrefix(target, path.Join(altAppHomePath, "bin")+"/") {
os.Remove(fullPathName)
}
}
return nil
}
func removePathManagement(dotFiles []string) error {
const startTarget = `### MANAGED BY RANCHER DESKTOP START \(DO NOT EDIT\)`
const endTarget = `### MANAGED BY RANCHER DESKTOP END \(DO NOT EDIT\)`
// bash files etc. break if they contain \r's, so don't worry about them
ptn := regexp.MustCompile(fmt.Sprintf(`(?ms)^(?P<preMarkerText>.*?)(?P<preMarkerNewlines>\n*)^%s.*?^%s\s*?$(?P<postMarkerNewlines>\n*)(?P<postMarkerText>.*)$`, startTarget, endTarget))
for _, dotFile := range dotFiles {
byteContents, err := os.ReadFile(dotFile)
if err != nil {
if !errors.Is(err, syscall.ENOENT) {
logrus.Errorf("Error trying to read %s: %s\n", dotFile, err)
}
continue
}
contents := string(byteContents)
parts := ptn.FindStringSubmatch(contents)
if len(parts) == 0 {
continue
}
preMarkerTextIndex := ptn.SubexpIndex("preMarkerText")
preMarkerNewlineIndex := ptn.SubexpIndex("preMarkerNewlines")
postMarkerNewlineIndex := ptn.SubexpIndex("postMarkerNewlines")
postMarkerTextIndex := ptn.SubexpIndex("postMarkerText")
if len(parts[preMarkerTextIndex]) == 0 && len(parts[postMarkerTextIndex]) == 0 {
// Nothing of interest left in this file, so delete it
err = os.RemoveAll(dotFile)
if err != nil {
// but continue processing the other files
logrus.Errorf("Failed to delete file %s (error %s)\n", dotFile, err)
}
continue
}
newParts := []string{parts[preMarkerTextIndex]}
preMarkerNewlines := parts[preMarkerNewlineIndex]
postMarkerNewlines := parts[postMarkerNewlineIndex]
if len(preMarkerNewlines) == 1 {
newParts = append(newParts, preMarkerNewlines)
} else if len(preMarkerNewlines) > 1 {
// One of the newlines was inserted by the dotfile manager, but keep the others
newParts = append(newParts, preMarkerNewlines[1:])
}
if len(parts[postMarkerTextIndex]) > 0 {
if len(postMarkerNewlines) > 1 {
// Either there was a newline before the marker block, and we have copied
// it into the new file,
// or the marker block was at the start of the file, in which case we can
// drop one of the post-marker block newlines
newParts = append(newParts, postMarkerNewlines[1:])
}
newParts = append(newParts, parts[postMarkerTextIndex])
}
newContents := strings.Join(newParts, "")
filestat, err := os.Stat(dotFile)
if err != nil {
return fmt.Errorf("error trying to stat %s: %w", dotFile, err)
}
if err = os.WriteFile(dotFile, []byte(newContents), filestat.Mode()); err != nil {
logrus.Errorf("error trying to update %s: %s\n", dotFile, err)
}
}
return nil
}
type dockerConfigType map[string]interface{}
type PartialMeta struct {
Metadata struct {
Description string
}
}
/**
* cleanupDockerContextFiles - normally RD will remove any contexts from .docker/contexts/meta that it owns.
* This function checks the dir for any contexts that were left behind, and deletes them.
*/
func cleanupDockerContextFiles() {
os.RemoveAll(path.Join(dockerconfig.Dir(), "contexts", "meta", "b547d66a5de60e5f0843aba28283a8875c2ad72e99ba076060ef9ec7c09917c8"))
}
func clearDockerContext() error {
// Ignore failure to delete this next file:
os.Remove(path.Join(dockerconfig.Dir(), "plaintext-credentials.config.json"))
cleanupDockerContextFiles()
configFilePath := path.Join(dockerconfig.Dir(), "config.json")
dockerConfigContents := make(dockerConfigType)
contents, err := os.ReadFile(configFilePath)
if err != nil {
if errors.Is(err, syscall.ENOENT) {
// Nothing left to do here, since the file doesn't exist
return nil
}
return fmt.Errorf("factory-reset: error trying to read docker config.json: %w", err)
}
if err = json.Unmarshal(contents, &dockerConfigContents); err != nil {
// If we can't json-unmarshal ~/.docker/config, nothing left to do
return nil
}
currentContextName, ok := dockerConfigContents["currentContext"]
if !ok {
return nil
}
if currentContextName != "rancher-desktop" {
return nil
}
delete(dockerConfigContents, "currentContext")
contents, err = json.MarshalIndent(dockerConfigContents, "", " ")
if err != nil {
return err
}
scratchFile, err := os.CreateTemp(dockerconfig.Dir(), "tmpconfig.json")
if err != nil {
return err
}
err = os.WriteFile(scratchFile.Name(), contents, 0600)
scratchFile.Close()
if err != nil {
return err
}
return os.Rename(scratchFile.Name(), configFilePath)
}
|
package controllers
import (
"net/http"
"github.com/gin-gonic/gin"
"github.com/jinzhu/gorm"
"github.com/ranggarifqi/go-ecommerce-api/models"
)
// RoleController struct
type RoleController struct {
DB *gorm.DB
}
// GetAll Role
func (rc *RoleController) GetAll(c *gin.Context) {
var roles []models.Role
var result gin.H
err := rc.DB.Find(&roles).Error
if err != nil {
result = gin.H{"result": err.Error(), "success": false}
c.JSON(http.StatusBadRequest, result)
return
}
result = gin.H{"result": roles, "count": len(roles), "success": true}
c.JSON(http.StatusOK, result)
}
//GetByID Controller
func (rc *RoleController) GetByID(c *gin.Context) {
var role models.Role
var result gin.H
id := c.Param("id")
err := rc.DB.First(&role, id).Error
if err != nil {
result = gin.H{
"result": err.Error(),
"count": 0,
"success": false,
}
c.JSON(http.StatusBadRequest, result)
return
}
result = gin.H{
"result": role,
"count": 1,
"success": true,
}
c.JSON(http.StatusOK, result)
}
// Create controller
func (rc *RoleController) Create(c *gin.Context) {
var role models.Role
var result gin.H
err := c.ShouldBindJSON(&role)
if err != nil {
result = gin.H{"result": err.Error(), "success": false}
c.JSON(http.StatusBadRequest, result)
return
}
err = rc.DB.Create(&role).Error
if err != nil {
result = gin.H{"result": err.Error(), "success": false}
c.JSON(http.StatusBadRequest, result)
return
}
result = gin.H{"result": role, "success": true}
c.JSON(http.StatusOK, result)
}
// PatchAttributes controller
func (rc *RoleController) PatchAttributes(c *gin.Context) {
var role models.Role
var result gin.H
id := c.Param("id")
err := rc.DB.First(&role, id).Error
if err != nil {
result = gin.H{"result": err.Error(), "success": false}
c.JSON(http.StatusBadRequest, result)
return
}
err = c.ShouldBindJSON(&role)
if err != nil {
result = gin.H{"result": err.Error(), "success": false}
c.JSON(http.StatusBadRequest, result)
return
}
err = rc.DB.Save(&role).Error
if err != nil {
result = gin.H{"result": err.Error(), "success": false}
c.JSON(http.StatusBadRequest, result)
return
}
result = gin.H{"result": role, "success": true}
c.JSON(http.StatusOK, result)
}
//DeleteByID controller
func (rc *RoleController) DeleteByID(c *gin.Context) {
var role models.Role
var result gin.H
id := c.Param("id")
err := rc.DB.First(&role, id).Error
if err != nil {
result = gin.H{"result": err.Error(), "success": false}
c.JSON(http.StatusBadRequest, result)
return
}
err = rc.DB.Delete(&role).Error
if err != nil {
result = gin.H{"result": err.Error(), "success": false}
c.JSON(http.StatusBadRequest, result)
return
}
result = gin.H{"result": "Data Deleted SuccessFully", "success": true}
c.JSON(http.StatusOK, result)
}
|
package leetcode
/*Given two binary trees and imagine that when you put one of them to cover the other,
some nodes of the two trees are overlapped while the others are not.
You need to merge them into a new binary tree.
The merge rule is that if two nodes overlap, then sum node values up as the new value of the merged node.
Otherwise, the NOT null node will be used as the node of new tree.
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/merge-two-binary-trees
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。*/
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func mergeTrees(t1 *TreeNode, t2 *TreeNode) *TreeNode {
if t1 == nil && t2 == nil {
return nil
} else if t1 != nil && t2 == nil {
t1.Left = mergeTrees(t1.Left, nil)
t1.Right = mergeTrees(t1.Right, nil)
return t1
} else if t1 == nil && t2 != nil {
t2.Left = mergeTrees(nil, t2.Left)
t2.Right = mergeTrees(nil, t2.Right)
return t2
} else {
t1.Val += t2.Val
t1.Left = mergeTrees(t1.Left, t2.Left)
t1.Right = mergeTrees(t1.Right, t2.Right)
return t1
}
}
|
package main
import "fmt"
/*
指针作为参数:
引用传递
值传递
其实本质上来说 都是值传递,
传递指针 其实也是传递一个值,只是这个值 是一个地址而已。
数组是值类型 , 直接拷贝一份数据
*/
func main() {
a := 1
var arr = [4]int{1, 2, 3, 4}
fun1(a)
fmt.Println("fun1() 调用后,a=", a)
fun2(&a)
fmt.Println("fun2() 调用后,a=", a)
fun3(arr)
fmt.Println("fun3() 调用后,arr=", arr)
fun4(&arr)
fmt.Println("fun4() 调用后,arr=", arr)
s1 := []int{1, 2, 3, 4, 5, 6}
fmt.Println("s1=", s1)
}
func fun4(p *[4]int) {
p[0] = 10000
fmt.Println("fun3() 函数中修改arr[0]:", p[0])
}
func fun3(arr [4]int) {
arr[0] = 10000
fmt.Println("fun3() 函数中修改arr[0]:", arr[0])
}
func fun1(num int) {
fmt.Println("fun1() 函数中num:", num)
num = 100
fmt.Println("fun1() 函数 中修改num:", num)
}
func fun2(p *int) {
fmt.Println("fun2() 函数中num:", *p)
*p = 100
fmt.Println("fun2()函数中 修改num:", *p)
}
|
package flags
import (
"testing"
"github.com/10gen/realm-cli/internal/utils/test/assert"
)
func TestArg(t *testing.T) {
t.Run("should print only name when value is nil", func(t *testing.T) {
arg := Arg{Name: "test"}
assert.Equal(t, " --test", arg.String())
})
t.Run("should print name and value when set", func(t *testing.T) {
arg := Arg{"test", "value"}
assert.Equal(t, " --test value", arg.String())
})
}
|
package user
import (
"context"
"encoding/json"
"net/http"
"github.com/julienschmidt/httprouter"
)
type Controller struct {
Service *Service
}
func (c *Controller) RegisterUser(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
username, password, ok := r.BasicAuth()
if !ok || !LoginAdmin(username, password) {
w.WriteHeader(http.StatusUnauthorized)
return
}
var user User
err := json.NewDecoder(r.Body).Decode(&user)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
ctx := context.Background()
user, err = c.Service.RegisterUser(ctx, user.Name)
handle(err)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
err = json.NewEncoder(w).Encode(&struct {
User User
Password string
}{
User: user,
Password: user.Password,
})
handle(err)
}
func (c *Controller) GetUser(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
username, password, ok := r.BasicAuth()
if !ok || username != ps.ByName("name") {
w.WriteHeader(http.StatusUnauthorized)
return
}
user := User{
Name: ps.ByName("name"),
Password: password,
}
ctx := context.Background()
ok, err := c.Service.LoginUser(ctx, user)
handle(err)
if !ok {
w.WriteHeader(http.StatusUnauthorized)
return
}
user, err = c.Service.FindUserByName(ctx, user.Name)
handle(err)
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(user)
}
func (c *Controller) DeleteUser(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
username, password, ok := r.BasicAuth()
if !ok || !LoginAdmin(username, password) {
w.WriteHeader(http.StatusUnauthorized)
return
}
user := User{
Name: ps.ByName("name"),
}
ctx := context.Background()
err := c.Service.DeleteUser(ctx, user)
handle(err)
w.WriteHeader(http.StatusOK)
}
var (
RequestDayOperation = "RequestDay"
)
type patchRequest struct {
Operation string `json:"op"`
Path string `json:"path"`
Value json.RawMessage `json:"value"`
}
func (c *Controller) PatchUser(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
username, password, ok := r.BasicAuth()
if !ok || username != ps.ByName("name") {
w.WriteHeader(http.StatusUnauthorized)
return
}
user := User{
Name: ps.ByName("name"),
Password: password,
}
ctx := context.Background()
ok, err := c.Service.LoginUser(ctx, user)
handle(err)
if !ok {
w.WriteHeader(http.StatusUnauthorized)
return
}
user, err = c.Service.FindUserByName(ctx, user.Name)
handle(err)
var patch patchRequest
err = json.NewDecoder(r.Body).Decode(&patch)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
if patch.Operation == "replace" && patch.Path == "/WeeklyRequests" {
var weeklyRequests WeeklyRequests
err = json.Unmarshal(patch.Value, &weeklyRequests)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
user.WeeklyRequests = weeklyRequests
user, err = c.Service.UpdateUser(ctx, user)
handle(err)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
err = json.NewEncoder(w).Encode(&user)
handle(err)
return
} else if patch.Operation == "replace" && patch.Path == "/Password" {
var password string
err = json.Unmarshal(patch.Value, &password)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
user.Password = hashAndSalt(password)
user, err = c.Service.UpdateUser(ctx, user)
handle(err)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
err = json.NewEncoder(w).Encode(&user)
handle(err)
return
}
w.WriteHeader(http.StatusBadRequest)
}
func handle(err error) {
if err != nil {
panic(err)
}
}
|
package day2
import (
"fmt"
"log"
)
var puzzleInput = []int{
1, 12, 2, 3,
1, 1, 2, 3,
1, 3, 4, 3,
1, 5, 0, 3,
2, 10, 1, 19,
1, 5, 19, 23,
1, 23, 5, 27,
2, 27, 10, 31,
1, 5, 31, 35,
2, 35, 6, 39,
1, 6, 39, 43,
2, 13, 43, 47,
2, 9, 47, 51,
1, 6, 51, 55,
1, 55, 9, 59,
2, 6, 59, 63,
1, 5, 63, 67,
2, 67, 13, 71,
1, 9, 71, 75,
1, 75, 9, 79,
2, 79, 10, 83,
1, 6, 83, 87,
1, 5, 87, 91,
1, 6, 91, 95,
1, 95, 13, 99,
1, 10, 99, 103,
2, 6, 103, 107,
1, 107, 5, 111,
1, 111, 13, 115,
1, 115, 13, 119,
1, 13, 119, 123,
2, 123, 13, 127,
1, 127, 6, 131,
1, 131, 9, 135,
1, 5, 135, 139,
2, 139, 6, 143,
2, 6, 143, 147,
1, 5, 147, 151,
1, 151, 2, 155,
1, 9, 155, 0,
99,
2, 14, 0, 0,
}
func calculate(in []int) (out []int) {
i := 0
out = append(out, in...)
for {
code := out[i+0]
if code == 99 {
break
}
arg1 := out[i+1] // Position of the first parameter
arg2 := out[i+2] // Position of the second parameter
rPos := out[i+3] // Position of the result
var result int
switch code {
case 1:
result = out[arg1] + out[arg2]
case 2:
result = out[arg1] * out[arg2]
default:
log.Fatalf("Incorrect operation code (%d)! Program exiting.\n", code)
}
out[rPos] = result
i += 4
}
return
}
func reverse(in []int, result int) (noun, verb int) {
var tmpIn []int
tmpIn = append(tmpIn, in...)
for noun = 0; noun <= 99; noun++ {
for verb = 0; verb <= 99; verb++ {
tmpIn[1] = noun
tmpIn[2] = verb
tmpRes := calculate(tmpIn)
if tmpRes[0] == result {
return
}
}
}
log.Fatalf("Cannot determine the pairs what gives you the result %d\n", result)
return
}
func Run() {
// Part #1
fmt.Println(calculate(puzzleInput))
// Part #2
noun, verb := reverse(puzzleInput, 19690720)
fmt.Println(100*noun + verb)
}
|
package view
import (
"bytes"
"io/ioutil"
"strings"
"github.com/aimof/yomuRSS/domain"
"github.com/mattn/godown"
"github.com/rivo/tview"
)
type View interface {
AddArticles(a domain.Articles)
Run() error
}
type view struct {
flex *tview.Flex
list *tview.List
textview *tview.TextView
app *tview.Application
articles domain.Articles
}
func NewView() *view {
return &view{
flex: tview.NewFlex(),
list: tview.NewList(),
textview: tview.NewTextView(),
app: tview.NewApplication(),
}
}
func (v *view) AddArticles(articles domain.Articles) {
v.articles = articles
v.list.AddItem("quit", "Press to quit.", 'q', func() { v.app.Stop() })
for _, a := range v.articles {
v.list.AddItem(a.Title, a.PublishedAt, 's', func() {})
v.list.SetSelectedFunc(func(i int, _ string, _ string, _ rune) {
v.textview.Clear()
if len(v.articles[i].Content) != 0 {
b := make([]byte, 0, 10000)
buf := bytes.NewBuffer(b)
err := godown.Convert(buf, strings.NewReader(v.articles[i].Content), nil)
if err != nil {
v.textview.SetText(v.articles[i].Content)
return
}
md, err := ioutil.ReadAll(buf)
if err != nil {
v.textview.SetText(v.articles[i].Content)
}
v.textview.SetText(string(md))
} else {
v.textview.SetText(v.articles[i].Description)
}
})
}
}
func (v *view) Run() error {
v.flex.AddItem(v.list, 0, 2, true).AddItem(v.textview, 0, 3, false)
v.app.SetRoot(v.flex, true)
if err := v.app.Run(); err != nil {
return err
}
return nil
}
|
package main
import "fmt"
//channel
//複数のゴルーチン間でのデータ受け渡しをする為に設計されたデータ構造。
//キュー(先入先出)
//宣言、操作
func main() {
//宣言
//双方向
var ch1 chan int
//受信専用
//var ch2 <- chan int
//送信専用
//var ch3 -> chan int
ch1 = make(chan int)
ch2 := make(chan int)
//バッファサイズを調べる
fmt.Println(cap(ch1))
fmt.Println(cap(ch2))
//バッファサイズを指定できる
ch3 := make(chan int, 5)
fmt.Println(cap(ch3))
//送信
ch3 <- 1
fmt.Println(len(ch3))
ch3 <- 2
ch3 <- 3
fmt.Println("len", len(ch3))
//受信
i := <-ch3
fmt.Println(i)
fmt.Println("len", len(ch3))
i2 := <-ch3
fmt.Println(i2)
fmt.Println("len", len(ch3))
fmt.Println(<-ch3)
fmt.Println("len", len(ch3))
/*
バッファを超えた場合,
デットロックになる。
*/
//ch3 <- 1
//ch3 <- 2
//ch3 <- 3
//ch3 <- 4
//ch3 <- 5
//ch3 <- 6
}
|
// +build integration
package imintegration_test
import "testing"
func Test(t *testing.T) {
t.Error("found error in 'Integration Test'")
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
cloudschedulerpb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/cloudscheduler/cloudscheduler_go_proto"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudscheduler"
)
// JobServer implements the gRPC interface for Job.
type JobServer struct{}
// ProtoToJobAppEngineHttpTargetHttpMethodEnum converts a JobAppEngineHttpTargetHttpMethodEnum enum from its proto representation.
func ProtoToCloudschedulerJobAppEngineHttpTargetHttpMethodEnum(e cloudschedulerpb.CloudschedulerJobAppEngineHttpTargetHttpMethodEnum) *cloudscheduler.JobAppEngineHttpTargetHttpMethodEnum {
if e == 0 {
return nil
}
if n, ok := cloudschedulerpb.CloudschedulerJobAppEngineHttpTargetHttpMethodEnum_name[int32(e)]; ok {
e := cloudscheduler.JobAppEngineHttpTargetHttpMethodEnum(n[len("CloudschedulerJobAppEngineHttpTargetHttpMethodEnum"):])
return &e
}
return nil
}
// ProtoToJobHttpTargetHttpMethodEnum converts a JobHttpTargetHttpMethodEnum enum from its proto representation.
func ProtoToCloudschedulerJobHttpTargetHttpMethodEnum(e cloudschedulerpb.CloudschedulerJobHttpTargetHttpMethodEnum) *cloudscheduler.JobHttpTargetHttpMethodEnum {
if e == 0 {
return nil
}
if n, ok := cloudschedulerpb.CloudschedulerJobHttpTargetHttpMethodEnum_name[int32(e)]; ok {
e := cloudscheduler.JobHttpTargetHttpMethodEnum(n[len("CloudschedulerJobHttpTargetHttpMethodEnum"):])
return &e
}
return nil
}
// ProtoToJobStateEnum converts a JobStateEnum enum from its proto representation.
func ProtoToCloudschedulerJobStateEnum(e cloudschedulerpb.CloudschedulerJobStateEnum) *cloudscheduler.JobStateEnum {
if e == 0 {
return nil
}
if n, ok := cloudschedulerpb.CloudschedulerJobStateEnum_name[int32(e)]; ok {
e := cloudscheduler.JobStateEnum(n[len("CloudschedulerJobStateEnum"):])
return &e
}
return nil
}
// ProtoToJobPubsubTarget converts a JobPubsubTarget object from its proto representation.
func ProtoToCloudschedulerJobPubsubTarget(p *cloudschedulerpb.CloudschedulerJobPubsubTarget) *cloudscheduler.JobPubsubTarget {
if p == nil {
return nil
}
obj := &cloudscheduler.JobPubsubTarget{
TopicName: dcl.StringOrNil(p.GetTopicName()),
Data: dcl.StringOrNil(p.GetData()),
}
return obj
}
// ProtoToJobAppEngineHttpTarget converts a JobAppEngineHttpTarget object from its proto representation.
func ProtoToCloudschedulerJobAppEngineHttpTarget(p *cloudschedulerpb.CloudschedulerJobAppEngineHttpTarget) *cloudscheduler.JobAppEngineHttpTarget {
if p == nil {
return nil
}
obj := &cloudscheduler.JobAppEngineHttpTarget{
HttpMethod: ProtoToCloudschedulerJobAppEngineHttpTargetHttpMethodEnum(p.GetHttpMethod()),
AppEngineRouting: ProtoToCloudschedulerJobAppEngineHttpTargetAppEngineRouting(p.GetAppEngineRouting()),
RelativeUri: dcl.StringOrNil(p.GetRelativeUri()),
Body: dcl.StringOrNil(p.GetBody()),
}
return obj
}
// ProtoToJobAppEngineHttpTargetAppEngineRouting converts a JobAppEngineHttpTargetAppEngineRouting object from its proto representation.
func ProtoToCloudschedulerJobAppEngineHttpTargetAppEngineRouting(p *cloudschedulerpb.CloudschedulerJobAppEngineHttpTargetAppEngineRouting) *cloudscheduler.JobAppEngineHttpTargetAppEngineRouting {
if p == nil {
return nil
}
obj := &cloudscheduler.JobAppEngineHttpTargetAppEngineRouting{
Service: dcl.StringOrNil(p.GetService()),
Version: dcl.StringOrNil(p.GetVersion()),
Instance: dcl.StringOrNil(p.GetInstance()),
Host: dcl.StringOrNil(p.GetHost()),
}
return obj
}
// ProtoToJobHttpTarget converts a JobHttpTarget object from its proto representation.
func ProtoToCloudschedulerJobHttpTarget(p *cloudschedulerpb.CloudschedulerJobHttpTarget) *cloudscheduler.JobHttpTarget {
if p == nil {
return nil
}
obj := &cloudscheduler.JobHttpTarget{
Uri: dcl.StringOrNil(p.GetUri()),
HttpMethod: ProtoToCloudschedulerJobHttpTargetHttpMethodEnum(p.GetHttpMethod()),
Body: dcl.StringOrNil(p.GetBody()),
OAuthToken: ProtoToCloudschedulerJobHttpTargetOAuthToken(p.GetOauthToken()),
OidcToken: ProtoToCloudschedulerJobHttpTargetOidcToken(p.GetOidcToken()),
}
return obj
}
// ProtoToJobHttpTargetOAuthToken converts a JobHttpTargetOAuthToken object from its proto representation.
func ProtoToCloudschedulerJobHttpTargetOAuthToken(p *cloudschedulerpb.CloudschedulerJobHttpTargetOAuthToken) *cloudscheduler.JobHttpTargetOAuthToken {
if p == nil {
return nil
}
obj := &cloudscheduler.JobHttpTargetOAuthToken{
ServiceAccountEmail: dcl.StringOrNil(p.GetServiceAccountEmail()),
Scope: dcl.StringOrNil(p.GetScope()),
}
return obj
}
// ProtoToJobHttpTargetOidcToken converts a JobHttpTargetOidcToken object from its proto representation.
func ProtoToCloudschedulerJobHttpTargetOidcToken(p *cloudschedulerpb.CloudschedulerJobHttpTargetOidcToken) *cloudscheduler.JobHttpTargetOidcToken {
if p == nil {
return nil
}
obj := &cloudscheduler.JobHttpTargetOidcToken{
ServiceAccountEmail: dcl.StringOrNil(p.GetServiceAccountEmail()),
Audience: dcl.StringOrNil(p.GetAudience()),
}
return obj
}
// ProtoToJobStatus converts a JobStatus object from its proto representation.
func ProtoToCloudschedulerJobStatus(p *cloudschedulerpb.CloudschedulerJobStatus) *cloudscheduler.JobStatus {
if p == nil {
return nil
}
obj := &cloudscheduler.JobStatus{
Code: dcl.Int64OrNil(p.GetCode()),
Message: dcl.StringOrNil(p.GetMessage()),
}
for _, r := range p.GetDetails() {
obj.Details = append(obj.Details, *ProtoToCloudschedulerJobStatusDetails(r))
}
return obj
}
// ProtoToJobStatusDetails converts a JobStatusDetails object from its proto representation.
func ProtoToCloudschedulerJobStatusDetails(p *cloudschedulerpb.CloudschedulerJobStatusDetails) *cloudscheduler.JobStatusDetails {
if p == nil {
return nil
}
obj := &cloudscheduler.JobStatusDetails{
TypeUrl: dcl.StringOrNil(p.GetTypeUrl()),
Value: dcl.StringOrNil(p.GetValue()),
}
return obj
}
// ProtoToJobRetryConfig converts a JobRetryConfig object from its proto representation.
func ProtoToCloudschedulerJobRetryConfig(p *cloudschedulerpb.CloudschedulerJobRetryConfig) *cloudscheduler.JobRetryConfig {
if p == nil {
return nil
}
obj := &cloudscheduler.JobRetryConfig{
RetryCount: dcl.Int64OrNil(p.GetRetryCount()),
MaxRetryDuration: dcl.StringOrNil(p.GetMaxRetryDuration()),
MinBackoffDuration: dcl.StringOrNil(p.GetMinBackoffDuration()),
MaxBackoffDuration: dcl.StringOrNil(p.GetMaxBackoffDuration()),
MaxDoublings: dcl.Int64OrNil(p.GetMaxDoublings()),
}
return obj
}
// ProtoToJob converts a Job resource from its proto representation.
func ProtoToJob(p *cloudschedulerpb.CloudschedulerJob) *cloudscheduler.Job {
obj := &cloudscheduler.Job{
Name: dcl.StringOrNil(p.GetName()),
Description: dcl.StringOrNil(p.GetDescription()),
PubsubTarget: ProtoToCloudschedulerJobPubsubTarget(p.GetPubsubTarget()),
AppEngineHttpTarget: ProtoToCloudschedulerJobAppEngineHttpTarget(p.GetAppEngineHttpTarget()),
HttpTarget: ProtoToCloudschedulerJobHttpTarget(p.GetHttpTarget()),
Schedule: dcl.StringOrNil(p.GetSchedule()),
TimeZone: dcl.StringOrNil(p.GetTimeZone()),
UserUpdateTime: dcl.StringOrNil(p.GetUserUpdateTime()),
State: ProtoToCloudschedulerJobStateEnum(p.GetState()),
Status: ProtoToCloudschedulerJobStatus(p.GetStatus()),
ScheduleTime: dcl.StringOrNil(p.GetScheduleTime()),
LastAttemptTime: dcl.StringOrNil(p.GetLastAttemptTime()),
RetryConfig: ProtoToCloudschedulerJobRetryConfig(p.GetRetryConfig()),
AttemptDeadline: dcl.StringOrNil(p.GetAttemptDeadline()),
Project: dcl.StringOrNil(p.GetProject()),
Location: dcl.StringOrNil(p.GetLocation()),
}
return obj
}
// JobAppEngineHttpTargetHttpMethodEnumToProto converts a JobAppEngineHttpTargetHttpMethodEnum enum to its proto representation.
func CloudschedulerJobAppEngineHttpTargetHttpMethodEnumToProto(e *cloudscheduler.JobAppEngineHttpTargetHttpMethodEnum) cloudschedulerpb.CloudschedulerJobAppEngineHttpTargetHttpMethodEnum {
if e == nil {
return cloudschedulerpb.CloudschedulerJobAppEngineHttpTargetHttpMethodEnum(0)
}
if v, ok := cloudschedulerpb.CloudschedulerJobAppEngineHttpTargetHttpMethodEnum_value["JobAppEngineHttpTargetHttpMethodEnum"+string(*e)]; ok {
return cloudschedulerpb.CloudschedulerJobAppEngineHttpTargetHttpMethodEnum(v)
}
return cloudschedulerpb.CloudschedulerJobAppEngineHttpTargetHttpMethodEnum(0)
}
// JobHttpTargetHttpMethodEnumToProto converts a JobHttpTargetHttpMethodEnum enum to its proto representation.
func CloudschedulerJobHttpTargetHttpMethodEnumToProto(e *cloudscheduler.JobHttpTargetHttpMethodEnum) cloudschedulerpb.CloudschedulerJobHttpTargetHttpMethodEnum {
if e == nil {
return cloudschedulerpb.CloudschedulerJobHttpTargetHttpMethodEnum(0)
}
if v, ok := cloudschedulerpb.CloudschedulerJobHttpTargetHttpMethodEnum_value["JobHttpTargetHttpMethodEnum"+string(*e)]; ok {
return cloudschedulerpb.CloudschedulerJobHttpTargetHttpMethodEnum(v)
}
return cloudschedulerpb.CloudschedulerJobHttpTargetHttpMethodEnum(0)
}
// JobStateEnumToProto converts a JobStateEnum enum to its proto representation.
func CloudschedulerJobStateEnumToProto(e *cloudscheduler.JobStateEnum) cloudschedulerpb.CloudschedulerJobStateEnum {
if e == nil {
return cloudschedulerpb.CloudschedulerJobStateEnum(0)
}
if v, ok := cloudschedulerpb.CloudschedulerJobStateEnum_value["JobStateEnum"+string(*e)]; ok {
return cloudschedulerpb.CloudschedulerJobStateEnum(v)
}
return cloudschedulerpb.CloudschedulerJobStateEnum(0)
}
// JobPubsubTargetToProto converts a JobPubsubTarget object to its proto representation.
func CloudschedulerJobPubsubTargetToProto(o *cloudscheduler.JobPubsubTarget) *cloudschedulerpb.CloudschedulerJobPubsubTarget {
if o == nil {
return nil
}
p := &cloudschedulerpb.CloudschedulerJobPubsubTarget{}
p.SetTopicName(dcl.ValueOrEmptyString(o.TopicName))
p.SetData(dcl.ValueOrEmptyString(o.Data))
mAttributes := make(map[string]string, len(o.Attributes))
for k, r := range o.Attributes {
mAttributes[k] = r
}
p.SetAttributes(mAttributes)
return p
}
// JobAppEngineHttpTargetToProto converts a JobAppEngineHttpTarget object to its proto representation.
func CloudschedulerJobAppEngineHttpTargetToProto(o *cloudscheduler.JobAppEngineHttpTarget) *cloudschedulerpb.CloudschedulerJobAppEngineHttpTarget {
if o == nil {
return nil
}
p := &cloudschedulerpb.CloudschedulerJobAppEngineHttpTarget{}
p.SetHttpMethod(CloudschedulerJobAppEngineHttpTargetHttpMethodEnumToProto(o.HttpMethod))
p.SetAppEngineRouting(CloudschedulerJobAppEngineHttpTargetAppEngineRoutingToProto(o.AppEngineRouting))
p.SetRelativeUri(dcl.ValueOrEmptyString(o.RelativeUri))
p.SetBody(dcl.ValueOrEmptyString(o.Body))
mHeaders := make(map[string]string, len(o.Headers))
for k, r := range o.Headers {
mHeaders[k] = r
}
p.SetHeaders(mHeaders)
return p
}
// JobAppEngineHttpTargetAppEngineRoutingToProto converts a JobAppEngineHttpTargetAppEngineRouting object to its proto representation.
func CloudschedulerJobAppEngineHttpTargetAppEngineRoutingToProto(o *cloudscheduler.JobAppEngineHttpTargetAppEngineRouting) *cloudschedulerpb.CloudschedulerJobAppEngineHttpTargetAppEngineRouting {
if o == nil {
return nil
}
p := &cloudschedulerpb.CloudschedulerJobAppEngineHttpTargetAppEngineRouting{}
p.SetService(dcl.ValueOrEmptyString(o.Service))
p.SetVersion(dcl.ValueOrEmptyString(o.Version))
p.SetInstance(dcl.ValueOrEmptyString(o.Instance))
p.SetHost(dcl.ValueOrEmptyString(o.Host))
return p
}
// JobHttpTargetToProto converts a JobHttpTarget object to its proto representation.
func CloudschedulerJobHttpTargetToProto(o *cloudscheduler.JobHttpTarget) *cloudschedulerpb.CloudschedulerJobHttpTarget {
if o == nil {
return nil
}
p := &cloudschedulerpb.CloudschedulerJobHttpTarget{}
p.SetUri(dcl.ValueOrEmptyString(o.Uri))
p.SetHttpMethod(CloudschedulerJobHttpTargetHttpMethodEnumToProto(o.HttpMethod))
p.SetBody(dcl.ValueOrEmptyString(o.Body))
p.SetOauthToken(CloudschedulerJobHttpTargetOAuthTokenToProto(o.OAuthToken))
p.SetOidcToken(CloudschedulerJobHttpTargetOidcTokenToProto(o.OidcToken))
mHeaders := make(map[string]string, len(o.Headers))
for k, r := range o.Headers {
mHeaders[k] = r
}
p.SetHeaders(mHeaders)
return p
}
// JobHttpTargetOAuthTokenToProto converts a JobHttpTargetOAuthToken object to its proto representation.
func CloudschedulerJobHttpTargetOAuthTokenToProto(o *cloudscheduler.JobHttpTargetOAuthToken) *cloudschedulerpb.CloudschedulerJobHttpTargetOAuthToken {
if o == nil {
return nil
}
p := &cloudschedulerpb.CloudschedulerJobHttpTargetOAuthToken{}
p.SetServiceAccountEmail(dcl.ValueOrEmptyString(o.ServiceAccountEmail))
p.SetScope(dcl.ValueOrEmptyString(o.Scope))
return p
}
// JobHttpTargetOidcTokenToProto converts a JobHttpTargetOidcToken object to its proto representation.
func CloudschedulerJobHttpTargetOidcTokenToProto(o *cloudscheduler.JobHttpTargetOidcToken) *cloudschedulerpb.CloudschedulerJobHttpTargetOidcToken {
if o == nil {
return nil
}
p := &cloudschedulerpb.CloudschedulerJobHttpTargetOidcToken{}
p.SetServiceAccountEmail(dcl.ValueOrEmptyString(o.ServiceAccountEmail))
p.SetAudience(dcl.ValueOrEmptyString(o.Audience))
return p
}
// JobStatusToProto converts a JobStatus object to its proto representation.
func CloudschedulerJobStatusToProto(o *cloudscheduler.JobStatus) *cloudschedulerpb.CloudschedulerJobStatus {
if o == nil {
return nil
}
p := &cloudschedulerpb.CloudschedulerJobStatus{}
p.SetCode(dcl.ValueOrEmptyInt64(o.Code))
p.SetMessage(dcl.ValueOrEmptyString(o.Message))
sDetails := make([]*cloudschedulerpb.CloudschedulerJobStatusDetails, len(o.Details))
for i, r := range o.Details {
sDetails[i] = CloudschedulerJobStatusDetailsToProto(&r)
}
p.SetDetails(sDetails)
return p
}
// JobStatusDetailsToProto converts a JobStatusDetails object to its proto representation.
func CloudschedulerJobStatusDetailsToProto(o *cloudscheduler.JobStatusDetails) *cloudschedulerpb.CloudschedulerJobStatusDetails {
if o == nil {
return nil
}
p := &cloudschedulerpb.CloudschedulerJobStatusDetails{}
p.SetTypeUrl(dcl.ValueOrEmptyString(o.TypeUrl))
p.SetValue(dcl.ValueOrEmptyString(o.Value))
return p
}
// JobRetryConfigToProto converts a JobRetryConfig object to its proto representation.
func CloudschedulerJobRetryConfigToProto(o *cloudscheduler.JobRetryConfig) *cloudschedulerpb.CloudschedulerJobRetryConfig {
if o == nil {
return nil
}
p := &cloudschedulerpb.CloudschedulerJobRetryConfig{}
p.SetRetryCount(dcl.ValueOrEmptyInt64(o.RetryCount))
p.SetMaxRetryDuration(dcl.ValueOrEmptyString(o.MaxRetryDuration))
p.SetMinBackoffDuration(dcl.ValueOrEmptyString(o.MinBackoffDuration))
p.SetMaxBackoffDuration(dcl.ValueOrEmptyString(o.MaxBackoffDuration))
p.SetMaxDoublings(dcl.ValueOrEmptyInt64(o.MaxDoublings))
return p
}
// JobToProto converts a Job resource to its proto representation.
func JobToProto(resource *cloudscheduler.Job) *cloudschedulerpb.CloudschedulerJob {
p := &cloudschedulerpb.CloudschedulerJob{}
p.SetName(dcl.ValueOrEmptyString(resource.Name))
p.SetDescription(dcl.ValueOrEmptyString(resource.Description))
p.SetPubsubTarget(CloudschedulerJobPubsubTargetToProto(resource.PubsubTarget))
p.SetAppEngineHttpTarget(CloudschedulerJobAppEngineHttpTargetToProto(resource.AppEngineHttpTarget))
p.SetHttpTarget(CloudschedulerJobHttpTargetToProto(resource.HttpTarget))
p.SetSchedule(dcl.ValueOrEmptyString(resource.Schedule))
p.SetTimeZone(dcl.ValueOrEmptyString(resource.TimeZone))
p.SetUserUpdateTime(dcl.ValueOrEmptyString(resource.UserUpdateTime))
p.SetState(CloudschedulerJobStateEnumToProto(resource.State))
p.SetStatus(CloudschedulerJobStatusToProto(resource.Status))
p.SetScheduleTime(dcl.ValueOrEmptyString(resource.ScheduleTime))
p.SetLastAttemptTime(dcl.ValueOrEmptyString(resource.LastAttemptTime))
p.SetRetryConfig(CloudschedulerJobRetryConfigToProto(resource.RetryConfig))
p.SetAttemptDeadline(dcl.ValueOrEmptyString(resource.AttemptDeadline))
p.SetProject(dcl.ValueOrEmptyString(resource.Project))
p.SetLocation(dcl.ValueOrEmptyString(resource.Location))
return p
}
// applyJob handles the gRPC request by passing it to the underlying Job Apply() method.
func (s *JobServer) applyJob(ctx context.Context, c *cloudscheduler.Client, request *cloudschedulerpb.ApplyCloudschedulerJobRequest) (*cloudschedulerpb.CloudschedulerJob, error) {
p := ProtoToJob(request.GetResource())
res, err := c.ApplyJob(ctx, p)
if err != nil {
return nil, err
}
r := JobToProto(res)
return r, nil
}
// applyCloudschedulerJob handles the gRPC request by passing it to the underlying Job Apply() method.
func (s *JobServer) ApplyCloudschedulerJob(ctx context.Context, request *cloudschedulerpb.ApplyCloudschedulerJobRequest) (*cloudschedulerpb.CloudschedulerJob, error) {
cl, err := createConfigJob(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyJob(ctx, cl, request)
}
// DeleteJob handles the gRPC request by passing it to the underlying Job Delete() method.
func (s *JobServer) DeleteCloudschedulerJob(ctx context.Context, request *cloudschedulerpb.DeleteCloudschedulerJobRequest) (*emptypb.Empty, error) {
cl, err := createConfigJob(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteJob(ctx, ProtoToJob(request.GetResource()))
}
// ListCloudschedulerJob handles the gRPC request by passing it to the underlying JobList() method.
func (s *JobServer) ListCloudschedulerJob(ctx context.Context, request *cloudschedulerpb.ListCloudschedulerJobRequest) (*cloudschedulerpb.ListCloudschedulerJobResponse, error) {
cl, err := createConfigJob(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
resources, err := cl.ListJob(ctx, request.GetProject(), request.GetLocation())
if err != nil {
return nil, err
}
var protos []*cloudschedulerpb.CloudschedulerJob
for _, r := range resources.Items {
rp := JobToProto(r)
protos = append(protos, rp)
}
p := &cloudschedulerpb.ListCloudschedulerJobResponse{}
p.SetItems(protos)
return p, nil
}
func createConfigJob(ctx context.Context, service_account_file string) (*cloudscheduler.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return cloudscheduler.NewClient(conf), nil
}
|
package dao
import (
_ "github.com/go-sql-driver/mysql"
"github.com/jinzhu/gorm"
"ssq-spider/configure"
)
type MysqlDB struct {
dbUrl string
dbClient *gorm.DB
}
var mysqlDB MysqlDB
func init() {
mysqlDB.dbUrl = configure.GlobalConfig.Mysql.Url
mysqlDB.dbClient = nil
}
func NewMysqlDBClient() (*gorm.DB, error) {
if mysqlDB.dbClient != nil {
if err := mysqlDB.dbClient.DB().Ping(); err == nil {
return mysqlDB.dbClient, nil
}
_ = mysqlDB.dbClient.Close()
}
db, err := gorm.Open("mysql", configure.GlobalConfig.Mysql.Url)
if err != nil {
panic("open mysql connect error: " + err.Error())
}
mysqlDB.dbClient = db
return mysqlDB.dbClient, nil
}
|
package api
import "time"
type ReportID int64
type ReportStorage interface {
Save(*Report) error
ByWebsite(WebsiteID) (*Report, error)
}
type Report struct {
ID ReportID `db:"id"`
UserID UserID `db:"user_id"`
WebsiteID WebsiteID `db:"website_id"`
Matches []*Match `db:"-"`
WebsiteURL string `db:"-"`
LoadedIn string `db:"loaded_in"`
StartedIn string `db:"started_in"`
ResourceCheckIn string `db:"resource_check_in"`
HTMLCheckIn string `db:"html_check_in"`
TotalIn string `db:"total_in"`
CreatedAt *time.Time `db:"created_at"`
}
|
package crs
import (
"runtime"
"sync"
)
// New ...
func New(size uint) *LFU {
list := &LFU{
limit: size,
}
list.ClearSoft()
return list
}
type LFU struct {
sync.RWMutex
items []LFUItem
table map[Snowflake]int
nilTable []int
limit uint // 0 == unlimited
size uint
misses uint64 // opposite of cache hits
hits uint64
}
func (list *LFU) Size() uint {
return list.size
}
func (list *LFU) Cap() uint {
return list.limit
}
func (list *LFU) ClearSoft() {
for i := range list.items {
// TODO: is this needed?
list.items[i].Val = nil
}
list.items = make([]LFUItem, list.limit)
list.ClearTables()
}
func (list *LFU) ClearHard() {
list.ClearSoft()
runtime.GC()
}
func (list *LFU) ClearTableNils() {
size := 0
for key := range list.table {
if list.table[key] != -1 {
size++
}
}
// TODO: create a tmp slice which holds only valid entries, and loop through those instead of re-looping?
newTable := make(map[Snowflake]int, list.limit)
for key := range list.table {
if list.table[key] != -1 {
newTable[key] = list.table[key]
}
}
list.table = newTable
}
func (list *LFU) ClearTables() {
list.table = make(map[Snowflake]int)
list.nilTable = make([]int, list.limit)
for i := 0; i < int(list.limit); i++ {
list.nilTable[i] = i
}
}
// Set set adds a new content to the list or returns false if the content already exists
func (list *LFU) Set(id Snowflake, newItem *LFUItem) {
newItem.ID = id
if key, exists := list.table[id]; exists && key != -1 {
list.items[key].Val = newItem.Val
return
}
if list.limit > 0 && list.size >= list.limit {
// if limit is reached, replace the content of the least recently counter (lru)
list.removeLFU()
}
var key int
if len(list.nilTable) > 0 {
key = list.nilTable[len(list.nilTable)-1]
list.nilTable = list.nilTable[:len(list.nilTable)-1]
list.items[key] = *newItem
} else {
key = len(list.items)
list.items = append(list.items, *newItem)
}
list.table[id] = key
list.size++
}
func (list *LFU) removeLFU() {
lfuKey := 0
lfu := list.items[lfuKey]
var i int
for i = range list.items {
if list.items[i].counter < lfu.counter {
// TODO: create a link to lowest counter for later?
lfu = list.items[i]
lfuKey = i
}
}
list.deleteUnsafe(lfuKey, lfu.ID)
}
// RefreshAfterDiscordUpdate ...
func (list *LFU) RefreshAfterDiscordUpdate(item *LFUItem) {
item.increment()
}
// Get get an content from the list.
func (list *LFU) Get(id Snowflake) (ret *LFUItem, exists bool) {
var key int
if key, exists = list.table[id]; exists && key != -1 {
ret = &list.items[key]
ret.increment()
list.hits++
} else {
exists = false // if key == -1, exists might still be true
list.misses++
}
return
}
func (list *LFU) deleteUnsafe(key int, id Snowflake) {
list.table[id] = -1
list.items[key].Val = nil // prepare for GC
list.nilTable = append(list.nilTable, key)
list.size--
}
// Delete ...
func (list *LFU) Delete(id Snowflake) {
if key, exists := list.table[id]; exists && key != -1 {
list.deleteUnsafe(key, id)
}
}
// CreateCacheableItem ...
func (list *LFU) CreateCacheableItem(content interface{}) *LFUItem {
return newLFUItem(content)
}
// Efficiency ...
func (list *LFU) Efficiency() float64 {
if list.hits == 0 {
return 0.0
}
return float64(list.hits) / float64(list.misses+list.hits)
}
|
package main
import (
"bytes"
"encoding/binary"
"errors"
"log"
"os"
"os/signal"
"syscall"
"github.com/cilium/ebpf/link"
"github.com/cilium/ebpf/ringbuf"
"github.com/cilium/ebpf/rlimit"
"golang.org/x/sys/unix"
)
// $BPF_CLANG and $BPF_CFLAGS are set by the Makefile.
//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -cc $BPF_CLANG -cflags $BPF_CFLAGS -type event bpf ringbuffer.c -- -I../headers
func main() {
// Name of the kernel function to trace.
fn := "sys_execve"
// Subscribe to signals for terminating the program.
stopper := make(chan os.Signal, 1)
signal.Notify(stopper, os.Interrupt, syscall.SIGTERM)
// Allow the current process to lock memory for eBPF resources.
if err := rlimit.RemoveMemlock(); err != nil {
log.Fatal(err)
}
// Load pre-compiled programs and maps into the kernel.
objs := bpfObjects{}
if err := loadBpfObjects(&objs, nil); err != nil {
log.Fatalf("loading objects: %v", err)
}
defer objs.Close()
// Open a Kprobe at the entry point of the kernel function and attach the
// pre-compiled program. Each time the kernel function enters, the program
// will emit an event containing pid and command of the execved task.
kp, err := link.Kprobe(fn, objs.KprobeExecve, nil)
if err != nil {
log.Fatalf("opening kprobe: %s", err)
}
defer kp.Close()
// Open a ringbuf reader from userspace RINGBUF map described in the
// eBPF C program.
rd, err := ringbuf.NewReader(objs.Events)
if err != nil {
log.Fatalf("opening ringbuf reader: %s", err)
}
defer rd.Close()
// Close the reader when the process receives a signal, which will exit
// the read loop.
go func() {
<-stopper
if err := rd.Close(); err != nil {
log.Fatalf("closing ringbuf reader: %s", err)
}
}()
log.Println("Waiting for events..")
// bpfEvent is generated by bpf2go.
var event bpfEvent
for {
record, err := rd.Read()
if err != nil {
if errors.Is(err, ringbuf.ErrClosed) {
log.Println("Received signal, exiting..")
return
}
log.Printf("reading from reader: %s", err)
continue
}
// Parse the ringbuf event entry into a bpfEvent structure.
if err := binary.Read(bytes.NewBuffer(record.RawSample), binary.LittleEndian, &event); err != nil {
log.Printf("parsing ringbuf event: %s", err)
continue
}
log.Printf("pid: %d\tcomm: %s\n", event.Pid, unix.ByteSliceToString(event.Comm[:]))
}
}
|
// +build !race
package consulutil
import (
"bytes"
"reflect"
"sync"
"testing"
"time"
. "github.com/anthonybishopric/gotcha"
"github.com/hashicorp/consul/api"
)
// PairRecord is a record of a single update to the Consul KV store
type PairRecord struct {
// "create", "update", "delete", or "close"
Change string
// k/v details
Key string
Value string
}
func (r PairRecord) IsCreate(key string) bool {
return r.Change == "create" && r.Key == key
}
func (r PairRecord) IsUpdate(pair *api.KVPair) bool {
return r.Change == "update" && r.Key == pair.Key && r.Value == string(pair.Value)
}
func (r PairRecord) IsDelete(key string) bool {
return r.Change == "delete" && r.Key == key
}
func (r PairRecord) IsClose(key string) bool {
return r.Change == "close" && r.Key == key
}
type PairRecords []PairRecord
// Filter returns a slice of records for the given key
func (rs PairRecords) Filter(key string) PairRecords {
newRs := make(PairRecords, 0)
for _, r := range rs {
if r.Key == key {
newRs = append(newRs, r)
}
}
return newRs
}
// PairRecorder can subscribe to a watch stream and record all notifications it receives.
type PairRecorder struct {
T *testing.T
Mutex sync.Mutex
Cond *sync.Cond
Records []PairRecord
}
func NewRecorder(t *testing.T) *PairRecorder {
p := &PairRecorder{
T: t,
Records: make([]PairRecord, 0),
}
p.Cond = sync.NewCond(&p.Mutex)
return p
}
func (p *PairRecorder) WaitFor(length int) PairRecords {
p.Mutex.Lock()
defer p.Mutex.Unlock()
for len(p.Records) < length {
p.Cond.Wait()
}
return PairRecords(p.Records[:])
}
func (p *PairRecorder) RecordList() PairRecords {
p.Mutex.Lock()
defer p.Mutex.Unlock()
return PairRecords(p.Records[:])
}
func (p *PairRecorder) Append(change, key string, value []byte) {
p.Mutex.Lock()
defer p.Mutex.Unlock()
p.Records = append(p.Records, PairRecord{change, key, string(value)})
p.Cond.Broadcast()
}
// Handler is a NewKeyHandler that will process new keys and arrange for their values to
// be recorded.
func (p *PairRecorder) Handler(key string) chan<- *api.KVPair {
p.T.Logf("%s create", key)
p.Append("create", key, nil)
updates := make(chan *api.KVPair)
go func() {
for pair := range updates {
if pair != nil {
p.T.Logf("%s = %s", key, string(pair.Value))
p.Append("update", key, pair.Value)
} else {
p.T.Logf("%s delete", key)
p.Append("delete", key, nil)
}
}
p.T.Logf("%s done", key)
p.Append("close", key, nil)
}()
return updates
}
func kvToMap(pairs api.KVPairs) map[string]string {
m := make(map[string]string)
for _, pair := range pairs {
m[pair.Key] = string(pair.Value)
}
return m
}
func kvMatch(m map[string]string, pair *api.KVPair) bool {
val, ok := m[pair.Key]
return ok && val == string(pair.Value)
}
func kvEqual(a, b *api.KVPair) bool {
if a == nil {
return b == nil
}
if b == nil {
return false
}
return a.Key == b.Key && (bytes.Compare(a.Value, b.Value) == 0)
}
func testLogger(t *testing.T) chan<- error {
c := make(chan error)
go func() {
for err := range c {
t.Log(err)
}
}()
return c
}
// TestWatchPrefix verifies some basic operations of the WatchPrefix() function. It should
// find existing data, send new updates when the data changes, and ignore changes outside
// its prefix.
func TestWatchPrefix(t *testing.T) {
t.Parallel()
f := NewFixture(t)
defer f.Stop()
done := make(chan struct{})
defer func() {
if done != nil {
close(done)
}
}()
pairsChan := make(chan api.KVPairs)
kv1a := &api.KVPair{Key: "prefix/hello", Value: []byte("world")}
kv1b := &api.KVPair{Key: "prefix/hello", Value: []byte("computer")}
kv2a := &api.KVPair{Key: "prefix/test", Value: []byte("foo")}
kv3a := &api.KVPair{Key: "something", Value: []byte("different")}
// Process existing data
f.Client.KV().Put(kv1a, nil)
go WatchPrefix("prefix/", f.Client.KV(), pairsChan, done, testLogger(t), 0, 0)
pairs := kvToMap(<-pairsChan)
if !kvMatch(pairs, kv1a) {
t.Error("existing data not recognized")
}
// Get an updates when the data changes (create, modify, delete)
f.Client.KV().Put(kv1b, nil)
pairs = kvToMap(<-pairsChan)
if !kvMatch(pairs, kv1b) {
t.Error("value not updated")
}
f.Client.KV().Put(kv2a, nil)
pairs = kvToMap(<-pairsChan)
if !kvMatch(pairs, kv2a) {
t.Error("did not find new value")
}
if !kvMatch(pairs, kv1b) {
t.Error("old value disappeared")
}
f.Client.KV().Delete(kv1a.Key, nil)
pairs = kvToMap(<-pairsChan)
if _, ok := pairs[kv1a.Key]; ok {
t.Error("did not register deletion")
}
// The watcher should ignore kv3a, which is outside its prefix
f.Client.KV().Put(kv3a, nil)
f.Client.KV().Delete(kv2a.Key, nil)
pairs = kvToMap(<-pairsChan)
if _, ok := pairs[kv3a.Key]; ok {
t.Error("found a key with the wrong prefix")
}
close(done)
done = nil
for p := range pairsChan {
pairs = kvToMap(p)
if _, ok := pairs[kv3a.Key]; ok {
t.Error("found a key with the wrong prefix")
}
}
}
// TestWatchKeys verifies some basic operations of the WatchKeys() function. It
// should find existing keys, send new updates when the data changes, and
// ignore changes outside its prefix.
func TestWatchKeys(t *testing.T) {
t.Parallel()
f := NewFixture(t)
defer f.Stop()
done := make(chan struct{})
defer func() {
if done != nil {
close(done)
}
}()
kv1a := &api.KVPair{Key: "prefix/hello", Value: []byte("these")}
kv1b := &api.KVPair{Key: "prefix/hello", Value: []byte("do")}
kv2a := &api.KVPair{Key: "prefix/test", Value: []byte("not")}
kv3a := &api.KVPair{Key: "something", Value: []byte("matter")}
// Process existing data
f.Client.KV().Put(kv1a, nil)
keysChan := WatchKeys("prefix/", f.Client.KV(), done, 0)
keys := <-keysChan
expected := []string{kv1a.Key}
if !reflect.DeepEqual(keys.Keys, expected) {
t.Errorf("existing data not recognized, wanted %s but got %s", expected, keys.Keys)
}
// Get an updates when the data changes (create, modify, delete)
f.Client.KV().Put(kv1b, nil)
keys = <-keysChan
expected = []string{kv1b.Key}
if !reflect.DeepEqual(keys.Keys, expected) {
t.Errorf("keys changed inappropriately: wanted %s got %s", expected, keys.Keys)
}
f.Client.KV().Put(kv2a, nil)
keys = <-keysChan
expected = []string{kv1a.Key, kv2a.Key}
if !reflect.DeepEqual(keys.Keys, expected) {
t.Errorf("did not find new key: wanted %s got %s", expected, keys.Keys)
}
f.Client.KV().Delete(kv1a.Key, nil)
keys = <-keysChan
expected = []string{kv2a.Key}
if !reflect.DeepEqual(keys.Keys, expected) {
t.Errorf("did not notice key deletion: wanted %s got %s", expected, keys.Keys)
}
// The watcher should ignore kv3a, which is outside its prefix
f.Client.KV().Put(kv3a, nil)
f.Client.KV().Delete(kv2a.Key, nil)
keys = <-keysChan
if len(keys.Keys) != 0 {
t.Errorf("watch did not ignore keys outside of prefix: got %s but should have been 0 keys", keys.Keys)
}
close(done)
done = nil
for range keysChan {
t.Error("found a key after quitting")
}
}
func TestWatchSingle(t *testing.T) {
t.Parallel()
f := NewFixture(t)
defer f.Stop()
done := make(chan struct{})
defer func() {
if done != nil {
close(done)
}
}()
kvpChan := make(chan *api.KVPair)
kv1a := &api.KVPair{Key: "hello", Value: []byte("world")}
kv1b := &api.KVPair{Key: "hello", Value: []byte("computer")}
kv2a := &api.KVPair{Key: "hello/goodbye", Value: []byte("foo")}
// Process existing data
f.Client.KV().Put(kv1a, nil)
go WatchSingle("hello", f.Client.KV(), kvpChan, done, testLogger(t))
if !kvEqual(kv1a, <-kvpChan) {
t.Error("existing data not recognized")
}
// Get updates when the data changes (modify, delete, create)
f.Client.KV().Put(kv1b, nil)
if !kvEqual(kv1b, <-kvpChan) {
t.Error("value not updated")
}
f.Client.KV().Delete("hello", nil)
if !kvEqual(nil, <-kvpChan) {
t.Error("value not deleted")
}
f.Client.KV().Put(kv1a, nil)
if !kvEqual(kv1a, <-kvpChan) {
t.Error("value not recreated")
}
// Ignore other keys
f.Client.KV().Put(kv2a, nil)
select {
case <-kvpChan:
t.Error("found a key that was not being watched")
default:
}
close(done)
done = nil
for range kvpChan {
t.Error("found a key that was never modified")
}
}
// TestWatchNewKeysSimple is a simple test for WatchNewKeys() basic functionality. Create
// a key, change it, then delete it.
func TestWatchNewKeysSimple(t *testing.T) {
t.Parallel()
pairsInput := make(chan api.KVPairs)
defer close(pairsInput)
recorder := NewRecorder(t)
go WatchNewKeys(pairsInput, recorder.Handler, nil)
pairsInput <- api.KVPairs{}
key := "hello"
kv1 := &api.KVPair{Key: key, Value: []byte("world"), CreateIndex: 1, ModifyIndex: 1}
kv2 := &api.KVPair{Key: key, Value: []byte("computer"), CreateIndex: 1, ModifyIndex: 2}
// Create a key
pairsInput <- api.KVPairs{kv1} // put hello
rs := recorder.WaitFor(2)
if !rs[0].IsCreate(key) || !rs[1].IsUpdate(kv1) {
t.Error("unexpected record sequence")
}
// Change the key
pairsInput <- api.KVPairs{kv2} // put hello
rs = recorder.WaitFor(3)
if !rs[2].IsUpdate(kv2) {
t.Error("unexpected record sequence")
}
// Delete the key
pairsInput <- api.KVPairs{} // delete hello
rs = recorder.WaitFor(5)
if !rs[3].IsDelete(key) || !rs[4].IsClose(key) {
t.Error("unexpected record sequence")
}
t.Log("full record sequence", recorder.RecordList())
}
// TestWatchNewKeysIgnore verifies that the watcher can handle keys that are ignored.
func TestWatchNewKeysIgnore(t *testing.T) {
t.Parallel()
var newKeyCounter int
pairsInput := make(chan api.KVPairs)
done := make(chan struct{})
defer close(done)
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
WatchNewKeys(
pairsInput,
func(key string) chan<- *api.KVPair {
t.Log("new key:", key)
newKeyCounter++
return nil
},
done,
)
wg.Done()
}()
pairsInput <- api.KVPairs{}
kv1a := &api.KVPair{Key: "foo", Value: []byte("A"), CreateIndex: 1, ModifyIndex: 1}
kv1b := &api.KVPair{Key: "foo", Value: []byte("B"), CreateIndex: 1, ModifyIndex: 2}
kv2a := &api.KVPair{Key: "bar", Value: []byte("A"), CreateIndex: 3, ModifyIndex: 3}
// Perform a batch of writes
pairsInput <- api.KVPairs{kv1a} // New key
pairsInput <- api.KVPairs{kv1b} // Update should have no effect
pairsInput <- api.KVPairs{kv1b, kv2a} // Another new key
close(pairsInput)
// Wait for updates to be noticed
wg.Wait()
if newKeyCounter != 2 {
t.Errorf("writes had 2 new keys, found %d", newKeyCounter)
}
}
// TestWatchNewKeysMulti writes to two different keys and verifies that their update
// notifications are independent.
func TestWatchNewKeysMulti(t *testing.T) {
t.Parallel()
pairsInput := make(chan api.KVPairs)
defer close(pairsInput)
recorder := NewRecorder(t)
go WatchNewKeys(pairsInput, recorder.Handler, nil)
pairsInput <- api.KVPairs{}
key1 := "foo"
key2 := "bar"
kv1a := &api.KVPair{Key: key1, Value: []byte("1A"), CreateIndex: 1, ModifyIndex: 1}
kv1b := &api.KVPair{Key: key1, Value: []byte("1B"), CreateIndex: 1, ModifyIndex: 2}
kv1c := &api.KVPair{Key: key1, Value: []byte("1C"), CreateIndex: 1, ModifyIndex: 3}
kv2a := &api.KVPair{Key: key2, Value: []byte("2A"), CreateIndex: 4, ModifyIndex: 4}
kv2b := &api.KVPair{Key: key2, Value: []byte("2B"), CreateIndex: 4, ModifyIndex: 5}
kv2c := &api.KVPair{Key: key2, Value: []byte("2C"), CreateIndex: 4, ModifyIndex: 6}
pairsInput <- api.KVPairs{kv1a} // put foo=1A
pairsInput <- api.KVPairs{kv1b} // put foo=1B
pairsInput <- api.KVPairs{kv1b, kv2a} // put bar=2A
rs := recorder.WaitFor(5)
rs1 := rs.Filter(key1)
rs2 := rs.Filter(key2)
t.Log("rs1", rs1)
t.Log("rs2", rs2)
if !(len(rs1) == 3 && rs1[0].IsCreate(key1) && rs1[1].IsUpdate(kv1a) && rs1[2].IsUpdate(kv1b)) ||
!(len(rs2) == 2 && rs2[0].IsCreate(key2) && rs2[1].IsUpdate(kv2a)) {
t.Error("unexpected record sequence")
}
pairsInput <- api.KVPairs{kv1c, kv2a} // put foo=1C
pairsInput <- api.KVPairs{kv1c, kv2b} // put bar=2B
pairsInput <- api.KVPairs{kv2b} // delete foo
pairsInput <- api.KVPairs{kv2c} // put bar=2C
rs = recorder.WaitFor(10)
rs1 = rs.Filter(key1)
rs2 = rs.Filter(key2)
t.Log("rs1", rs1)
t.Log("rs2", rs2)
if !(len(rs1) == 6 && rs1[3].IsUpdate(kv1c) && rs1[4].IsDelete(key1) && rs1[5].IsClose(key1)) ||
!(len(rs2) == 4 && rs2[2].IsUpdate(kv2b) && rs2[3].IsUpdate(kv2c)) {
t.Error("unexpected record sequence")
}
t.Log("full record sequence", recorder.RecordList())
}
// TestWatchNewKeysExit verifies that the watcher is capable of exiting early and that it
// will notify its subscribers.
func TestWatchNewKeysExit(t *testing.T) {
t.Parallel()
pairsInput := make(chan api.KVPairs)
recorder := NewRecorder(t)
done := make(chan struct{})
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
WatchNewKeys(pairsInput, recorder.Handler, done)
wg.Done()
}()
pairsInput <- api.KVPairs{}
key1 := "foo"
kv1a := &api.KVPair{Key: key1, Value: []byte("1A"), CreateIndex: 1, ModifyIndex: 1}
pairsInput <- api.KVPairs{kv1a}
rs := recorder.WaitFor(2)
if !rs[0].IsCreate(key1) || !rs[1].IsUpdate(kv1a) {
t.Errorf("error creating key")
}
// Ask the watcher to exit, eventually
close(done)
// Because the watcher is asynchronous, it might need to consume more input before
// exiting. In practice, the "done" signal should also stop the producer.
exiting := make(chan struct{})
defer close(exiting)
go func() {
for {
select {
case pairsInput <- api.KVPairs{kv1a}: // no change
case <-exiting:
return
}
}
}()
wg.Wait()
rs = recorder.WaitFor(3)
if !rs[2].IsClose(key1) {
t.Errorf("subscriber did not receive close notification")
}
}
// TestWatchNewKeysExistingData verifies that the watcher will find existing keys (i.e.,
// when its first input is nonempty) and report them as new data.
func TestWatchNewKeysExistingData(t *testing.T) {
t.Parallel()
pairsInput := make(chan api.KVPairs)
recorder := NewRecorder(t)
go WatchNewKeys(pairsInput, recorder.Handler, nil)
kv1a := &api.KVPair{Key: "test", Value: []byte("1A"), CreateIndex: 1, ModifyIndex: 1}
pairsInput <- api.KVPairs{kv1a}
rs := recorder.WaitFor(2)
if !rs[0].IsCreate("test") || !rs[1].IsUpdate(kv1a) {
t.Error("error picking up existing data")
}
}
func TestWatchDiff(t *testing.T) {
t.Parallel()
f := NewFixture(t)
defer f.Stop()
done := make(chan struct{})
defer func() {
if done != nil {
close(done)
}
}()
kv1a := &api.KVPair{Key: "prefix/hello", Value: []byte("world")}
kv1b := &api.KVPair{Key: "prefix/hello", Value: []byte("computer")}
kv2a := &api.KVPair{Key: "prefix/test", Value: []byte("foo")}
kv2b := &api.KVPair{Key: "prefix/test", Value: []byte("bar")}
kv3a := &api.KVPair{Key: "something", Value: []byte("different")}
// Process existing data
var changes *WatchedChanges
if _, err := f.Client.KV().Put(kv1a, nil); err != nil {
t.Error("Unexpected error during put operation")
}
if _, err := f.Client.KV().Put(kv2a, nil); err != nil {
t.Error("Unexpected error during put operation")
}
watchedCh, errCh := WatchDiff("prefix/", f.Client.KV(), done)
go func() {
for err := range errCh {
t.Log(err)
}
}()
select {
case changes = <-watchedCh:
case <-time.After(2 * time.Second):
t.Fatal("Expected something on channel but found nothing")
}
pairs := kvToMap(changes.Created)
if !kvMatch(pairs, kv1a) {
t.Error("did not find new value")
}
if !kvMatch(pairs, kv2a) {
t.Error("did not find new value")
}
Assert(t).AreEqual(len(changes.Created), 2, "Unexpected number of creates watched")
Assert(t).AreEqual(len(changes.Updated), 0, "Unexpected number of updates watched")
Assert(t).AreEqual(len(changes.Deleted), 0, "Unexpected number of deletes watched")
// Get an updates when the data changes (create, modify, delete)
if _, err := f.Client.KV().Put(kv1b, nil); err != nil {
t.Error("Unexpected error during put operation")
}
select {
case changes = <-watchedCh:
case <-time.After(2 * time.Second):
t.Fatal("Expected something on channel but found nothing")
}
pairs = kvToMap(changes.Updated)
if !kvMatch(pairs, kv1b) {
t.Error("value not updated")
}
Assert(t).AreEqual(len(changes.Created), 0, "Unexpected number of creates watched")
Assert(t).AreEqual(len(changes.Updated), 1, "Unexpected number of updates watched")
Assert(t).AreEqual(len(changes.Deleted), 0, "Unexpected number of deletes watched")
if _, err := f.Client.KV().Delete(kv1a.Key, nil); err != nil {
t.Error("Unexpected error during delete operation")
}
select {
case changes = <-watchedCh:
case <-time.After(2 * time.Second):
t.Fatal("Expected something on channel but found nothing")
}
pairs = kvToMap(changes.Deleted)
if _, ok := pairs[kv1a.Key]; !ok {
t.Error("did not register deletion")
}
Assert(t).AreEqual(len(changes.Created), 0, "Unexpected number of creates watched")
Assert(t).AreEqual(len(changes.Updated), 0, "Unexpected number of updates watched")
Assert(t).AreEqual(len(changes.Deleted), 1, "Unexpected number of deletes watched")
// Make sure the watcher can output both a created and updated kvPair
if _, err := f.Client.KV().Put(kv1a, nil); err != nil {
t.Error("Unexpected error during put operation")
}
if _, err := f.Client.KV().Put(kv2b, nil); err != nil {
t.Error("Unexpected error during put operation")
}
select {
case changes = <-watchedCh:
case <-time.After(2 * time.Second):
t.Fatal("Expected something on channel but found nothing")
}
pairs = kvToMap(changes.Created)
if !kvMatch(pairs, kv1a) {
t.Error("did not find new value")
}
pairs = kvToMap(changes.Updated)
if !kvMatch(pairs, kv2b) {
t.Error("value not updated")
}
Assert(t).AreEqual(len(changes.Created), 1, "Unexpected number of creates watched")
Assert(t).AreEqual(len(changes.Updated), 1, "Unexpected number of updates watched")
Assert(t).AreEqual(len(changes.Deleted), 0, "Unexpected number of deletes watched")
// The watcher should ignore kv3a, which is outside its prefix
if _, err := f.Client.KV().Put(kv3a, nil); err != nil {
t.Error("Unexpected error during put operation")
}
if _, err := f.Client.KV().Delete(kv2a.Key, nil); err != nil {
t.Error("Unexpected error during delete operation")
}
select {
case changes = <-watchedCh:
case <-time.After(2 * time.Second):
t.Fatal("Expected something on channel but found nothing")
}
pairs = kvToMap(changes.Created)
if _, ok := pairs[kv3a.Key]; ok {
t.Error("found a key with the wrong prefix")
}
Assert(t).AreEqual(len(changes.Created), 0, "Unexpected number of creates watched")
Assert(t).AreEqual(len(changes.Updated), 0, "Unexpected number of updates watched")
Assert(t).AreEqual(len(changes.Deleted), 1, "Unexpected number of deletes watched")
close(done)
done = nil
}
|
package servers
type StartMaintenanceModeReq struct {
ServerIds []string
}
|
/*
Copyright 2018 Intel Corporation.
SPDX-License-Identifier: Apache-2.0
*/
package oimcsidriver
import (
"context"
"fmt"
"github.com/kubernetes-csi/csi-test/pkg/sanity"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/intel/oim/pkg/log"
"github.com/intel/oim/pkg/log/testlog"
"github.com/intel/oim/pkg/oim-common"
"github.com/intel/oim/pkg/oim-controller"
"github.com/intel/oim/pkg/oim-registry"
"github.com/intel/oim/pkg/spec/oim/v0"
"github.com/intel/oim/test/pkg/spdk"
. "github.com/onsi/ginkgo"
)
// SudoMount provides wrappers around several commands used by the k8s
// mount utility code. It then runs those commands under pseudo. This
// allows building and running tests as normal users.
type SudoMount struct {
tmpDir string
searchPath string
}
func SetupSudoMount(t *testing.T) SudoMount {
tmpDir, err := ioutil.TempDir("", "sanity-node")
require.NoError(t, err)
s := SudoMount{
tmpDir: tmpDir,
searchPath: os.Getenv("PATH"),
}
for _, cmd := range []string{"mount", "umount", "blkid", "fsck", "mkfs.ext2", "mkfs.ext3", "mkfs.ext4"} {
wrapper := filepath.Join(s.tmpDir, cmd)
content := fmt.Sprintf(`#!/bin/sh
PATH=%q
if [ $(id -u) != 0 ]; then
exec sudo %s "$@"
else
exec %s "$@"
fi
`, s.searchPath, cmd, cmd)
err := ioutil.WriteFile(wrapper, []byte(content), 0777)
require.NoError(t, err)
}
os.Setenv("PATH", tmpDir+":"+s.searchPath)
return s
}
func (s SudoMount) Close() {
os.RemoveAll(s.tmpDir)
os.Setenv("PATH", s.searchPath)
}
// Runs tests in local SPDK mode.
//
// The corresponding test for non-local mode is in
// test/e2e/storage/oim-csi.go.
func TestSPDK(t *testing.T) {
// The sanity suite uses Ginkgo, so log via that.
log.SetOutput(GinkgoWriter)
ctx := context.Background()
defer spdk.Finalize()
if err := spdk.Init(); err != nil {
require.NoError(t, err)
}
if spdk.SPDK == nil {
t.Skip("No VHost.")
}
tmp, err := ioutil.TempDir("", "oim-driver")
require.NoError(t, err)
defer os.RemoveAll(tmp)
endpoint := "unix://" + tmp + "/oim-driver.sock"
driver, err := New(WithCSIEndpoint(endpoint), WithVHostEndpoint(spdk.SPDKPath))
require.NoError(t, err)
s, err := driver.Start(ctx)
require.NoError(t, err)
defer s.ForceStop(ctx)
sudo := SetupSudoMount(t)
defer sudo.Close()
// Now call the test suite.
config := sanity.Config{
TargetPath: tmp + "/target-path",
StagingPath: tmp + "/staging-path",
Address: endpoint,
TestVolumeSize: 1 * 1024 * 1024,
}
sanity.Test(t, &config)
}
// MockController implements oim.Controller.
type MockController struct {
MapVolumes []oim.MapVolumeRequest
UnmapVolumes []oim.UnmapVolumeRequest
}
func (m *MockController) MapVolume(ctx context.Context, in *oim.MapVolumeRequest) (*oim.MapVolumeReply, error) {
m.MapVolumes = append(m.MapVolumes, *in)
return &oim.MapVolumeReply{
PciAddress: &oim.PCIAddress{
Bus: 8,
Device: 7,
},
ScsiDisk: &oim.SCSIDisk{},
}, nil
}
func (m *MockController) UnmapVolume(ctx context.Context, in *oim.UnmapVolumeRequest) (*oim.UnmapVolumeReply, error) {
return &oim.UnmapVolumeReply{}, nil
}
func (m *MockController) ProvisionMallocBDev(ctx context.Context, in *oim.ProvisionMallocBDevRequest) (*oim.ProvisionMallocBDevReply, error) {
return &oim.ProvisionMallocBDevReply{}, nil
}
func (m *MockController) CheckMallocBDev(ctx context.Context, in *oim.CheckMallocBDevRequest) (*oim.CheckMallocBDevReply, error) {
return &oim.CheckMallocBDevReply{}, nil
}
// Runs tests with OIM registry and a mock controller.
// This can only be used to test the communication paths, but not
// the actual operation.
func TestMockOIM(t *testing.T) {
defer testlog.SetGlobal(t)()
ctx := context.Background()
adminCtx := oimregistry.RegistryClientContext(ctx, "user.admin")
var err error
tmp, err := ioutil.TempDir("", "oim-driver")
require.NoError(t, err)
defer os.RemoveAll(tmp)
controllerID := "host-0"
registryAddress := "unix://" + tmp + "/oim-registry.sock"
tlsConfig, err := oimcommon.LoadTLSConfig(os.ExpandEnv("${TEST_WORK}/ca/ca.crt"), os.ExpandEnv("${TEST_WORK}/ca/component.registry.key"), "")
require.NoError(t, err)
registry, err := oimregistry.New(oimregistry.TLS(tlsConfig))
require.NoError(t, err)
registryServer, service := registry.Server(registryAddress)
err = registryServer.Start(ctx, service)
require.NoError(t, err)
defer registryServer.ForceStop(ctx)
controllerAddress := "unix://" + tmp + "/oim-controller.sock"
controller := &MockController{}
require.NoError(t, err)
controllerCreds, err := oimcommon.LoadTLS(os.ExpandEnv("${TEST_WORK}/ca/ca.crt"),
os.ExpandEnv("${TEST_WORK}/ca/controller."+controllerID),
"component.registry")
require.NoError(t, err)
controllerServer, controllerService := oimcontroller.Server(controllerAddress, controller, controllerCreds)
err = controllerServer.Start(ctx, controllerService)
require.NoError(t, err)
defer controllerServer.ForceStop(ctx)
_, err = registry.SetValue(adminCtx, &oim.SetValueRequest{
Value: &oim.Value{
Path: controllerID + "/" + oimcommon.RegistryAddress,
Value: controllerAddress,
},
})
require.NoError(t, err)
endpoint := "unix://" + tmp + "/oim-driver.sock"
driver, err := New(WithCSIEndpoint(endpoint),
WithOIMRegistryAddress(registryAddress),
WithRegistryCreds(os.ExpandEnv("${TEST_WORK}/ca/ca.crt"), os.ExpandEnv("${TEST_WORK}/ca/host."+controllerID)),
WithOIMControllerID(controllerID),
)
require.NoError(t, err)
s, err := driver.Start(ctx)
require.NoError(t, err)
defer s.ForceStop(ctx)
// CSI does not use transport security for its Unix domain socket.
opts := oimcommon.ChooseDialOpts(endpoint, grpc.WithBlock(), grpc.WithInsecure())
conn, err := grpc.Dial(endpoint, opts...)
require.NoError(t, err)
csiClient := csi.NewNodeClient(conn)
// This will start waiting for a device that can never appear,
// so we force it to time out.
volumeID := "my-test-volume"
deadline, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
_, err = csiClient.NodeStageVolume(deadline,
&csi.NodeStageVolumeRequest{
VolumeId: volumeID,
StagingTargetPath: tmp + "/stagingtarget",
VolumeCapability: &csi.VolumeCapability{},
})
if assert.Error(t, err) {
// Both gRPC and waitForDevice will abort when the deadline is reached.
// Where the expiration is detected first is random, so the exact error
// message can vary.
//
// What we can test reliably is that we get a DeadlineExceeded gRPC code.
assert.Equal(t, status.Convert(err).Code(), codes.DeadlineExceeded, fmt.Sprintf("expected DeadlineExceeded, got: %s", err))
}
}
|
package MySQL
import "time"
type Admin struct {
Id int `gorm:"column:id; primary_key ; AUTO_INCREMENT" json:"id"`
Name string `gorm:"column:name" json:"name"`
Email string `gorm:"column:email" json:"email"`
Password string `gorm:"column:password" json:"password"`
Created_at *time.Time `gorm:"column:created_at" json:"created_at"`
Updated_at *time.Time `gorm:"column:updated_at" json:"updated_at"`
}
func (self Admin) TableName() string {
return "demo"
}
|
// Copyright 2014, Truveris Inc. All Rights Reserved.
// Use of this source code is governed by the ISC license in the LICENSE file.
package main
import (
"encoding/json"
"errors"
"os"
"github.com/jessevdk/go-flags"
)
type Cmd struct {
ConfigFile string `short:"c" description:"Configuration file" default:"/etc/sayd.conf"`
}
type Cfg struct {
// If defined, start a web server to list the aliases (e.g. :8989)
HTTPServerAddress string
}
var (
cfg = Cfg{}
cmd = Cmd{}
)
// Look in the current directory for an config.json file.
func ParseConfigFile() error {
file, err := os.Open(cmd.ConfigFile)
if err != nil {
return err
}
decoder := json.NewDecoder(file)
err = decoder.Decode(&cfg)
if err != nil {
return err
}
if cfg.HTTPServerAddress == "" {
return errors.New("'HTTPServerAddress' is not defined")
}
return nil
}
// Parse the command line arguments and populate the global cmd struct.
func ParseCommandLine() {
flagParser := flags.NewParser(&cmd, flags.PassDoubleDash)
_, err := flagParser.Parse()
if err != nil {
println("command line error: " + err.Error())
flagParser.WriteHelp(os.Stderr)
os.Exit(1)
}
}
|
package main
type Node struct {
maxScore int
ways int
}
func pathsWithMaxScore(board []string) []int {
dp := [105][105]Node{}
m, n := len(board), len(board[0])
mod := 1000000007
dp[m-1][n-1] = Node{0, 1}
for i := m - 1; i >= 0; i-- {
for t := n - 1; t >= 0; t-- {
ch := board[i][t]
if ch == 'X' {
dp[i][t] = Node{0, 0}
continue
}
candidates := []Node{
dp[i+1][t],
dp[i][t+1],
dp[i+1][t+1],
}
mx := 0
// 获取得分最高的路径
for j := 0; j <= 2; j++ {
mx = max(mx, candidates[j].maxScore)
}
// 统计有多少条路径可以得分最高
for j := 0; j <= 2; j++ {
if candidates[j].maxScore == mx {
dp[i][t].ways += candidates[j].ways
dp[i][t].ways %= mod
}
}
if ch != 'E' && ch != 'S' {
dp[i][t].maxScore = mx + int(ch-'0')
} else {
dp[i][t].maxScore = mx
}
}
}
// 如果没有路径,那么依照题意,得分也要置0
if dp[0][0].ways == 0 {
dp[0][0].maxScore = 0
}
return []int{dp[0][0].maxScore, dp[0][0].ways}
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
|
package singleton
import (
"github.com/stretchr/testify/assert"
"sync"
"testing"
)
func TestGetInstance(t *testing.T) {
for i := 0; i < 1000; i++ {
repository1, repository2, repository3, repository4 := create()
assert.Same(t, repository1, repository2)
assert.Same(t, repository1, repository3)
assert.Same(t, repository1, repository4)
}
}
func create() (repository1, repository2, repository3, repository4 ParamRepository) {
var wait sync.WaitGroup
wait.Add(2)
go func() {
repository1 = GetInstance()
wait.Done()
}()
go func() {
repository2 = GetInstance()
wait.Done()
}()
repository3 = GetInstance()
repository4 = GetInstance()
wait.Wait()
return
} |
// +build acceptance compute flavors
package v2
import (
"testing"
"github.com/gophercloud/gophercloud/acceptance/clients"
"github.com/gophercloud/gophercloud/openstack/compute/v2/flavors"
)
func TestFlavorsList(t *testing.T) {
client, err := clients.NewComputeV2Client()
if err != nil {
t.Fatalf("Unable to create a compute client: %v", err)
}
allPages, err := flavors.ListDetail(client, nil).AllPages()
if err != nil {
t.Fatalf("Unable to retrieve flavors: %v", err)
}
allFlavors, err := flavors.ExtractFlavors(allPages)
if err != nil {
t.Fatalf("Unable to extract flavor results: %v", err)
}
for _, flavor := range allFlavors {
PrintFlavor(t, &flavor)
}
}
func TestFlavorsGet(t *testing.T) {
client, err := clients.NewComputeV2Client()
if err != nil {
t.Fatalf("Unable to create a compute client: %v", err)
}
choices, err :=clients.AcceptanceTestChoicesFromEnv()
if err != nil {
t.Fatal(err)
}
flavor, err := flavors.Get(client, choices.FlavorID).Extract()
if err != nil {
t.Fatalf("Unable to get flavor information: %v", err)
}
PrintFlavor(t, flavor)
}
|
package handler
import (
"context"
"fmt"
"path/filepath"
corepb "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/core/v1"
sempb "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/sem/v1"
smspb "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/sms/v1"
subscriptionpb "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/subscription/v1"
jinmuidpb "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/user/v1"
generalpb "github.com/jinmukeji/proto/v3/gen/micro/idl/ptypes/v2"
"github.com/micro/go-micro/v2/client"
)
const (
rpcSmsServiceName = "com.himalife.srv.svc-sms-gw"
rpcSemServiceName = "com.himalife.srv.svc-sem-gw"
rpcServiceName = "com.himalife.srv.svc-jinmuid"
rpcBizServiceName = "com.himalife.srv.svc-biz-core"
subscriptionServiceName = "com.himalife.srv.svc-subscription"
)
// 初始化
func newJinmuIDServiceForTest() *JinmuIDService {
envFilepath := filepath.Join("testdata", "local.svc-jinmuid.env")
db, err := newTestingDbClientFromEnvFile(envFilepath)
if err != nil {
panic(fmt.Sprintln("failed to init db:", err))
}
encryptKey := newTestingEncryptKeyFromEnvFile(envFilepath)
smsSvc := smspb.NewSmsAPIService(rpcSmsServiceName, client.DefaultClient)
semSvc := sempb.NewSemAPIService(rpcSemServiceName, client.DefaultClient)
rpcUserManagerSvc := jinmuidpb.NewUserManagerAPIService(rpcServiceName, client.DefaultClient)
bizSvc := corepb.NewXimaAPIService(rpcBizServiceName, client.DefaultClient)
subscriptionSvc := subscriptionpb.NewSubscriptionManagerAPIService(subscriptionServiceName, client.DefaultClient)
return NewJinmuIDService(db, smsSvc, semSvc, rpcUserManagerSvc, bizSvc, subscriptionSvc, encryptKey)
}
// 短信注册
func getSignUpSerialNumber(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
resp := new(jinmuidpb.SmsNotificationResponse)
req := new(jinmuidpb.SmsNotificationRequest)
req.Phone = account.phone
req.Action = jinmuidpb.TemplateAction_TEMPLATE_ACTION_SIGN_UP
req.Language = generalpb.Language_LANGUAGE_ENGLISH
req.NationCode = account.nationCode
_ = jinmuIDService.SmsNotification(ctx, req, resp)
return resp.SerialNumber
}
// 短信登录
func getSignInSerialNumber(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
resp := new(jinmuidpb.SmsNotificationResponse)
req := new(jinmuidpb.SmsNotificationRequest)
req.Phone = account.phone
req.Action = jinmuidpb.TemplateAction_TEMPLATE_ACTION_SIGN_IN
req.Language = generalpb.Language_LANGUAGE_SIMPLIFIED_CHINESE
req.NationCode = account.nationCode
_ = jinmuIDService.SmsNotification(ctx, req, resp)
return resp.SerialNumber
}
// 获取短信验证码
func getSmsVerificationCode(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
respGetLatestVerificationCodes := new(jinmuidpb.GetLatestVerificationCodesResponse)
reqGetLatestVerificationCodes := new(jinmuidpb.GetLatestVerificationCodesRequest)
reqGetLatestVerificationCodes.SendTo = []*jinmuidpb.SingleGetLatestVerificationCode{
&jinmuidpb.SingleGetLatestVerificationCode{
SendVia: jinmuidpb.SendVia_SEND_VIA_PHONE_SEND_VIA,
Phone: account.phone,
NationCode: account.nationCode,
},
}
_ = jinmuIDService.GetLatestVerificationCodes(ctx, reqGetLatestVerificationCodes, respGetLatestVerificationCodes)
mvc := respGetLatestVerificationCodes.LatestVerificationCodes[0].VerificationCode
return mvc
}
// 获取邮件验证码
func getEmailVerificationCode(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
respGetLatestVerificationCodes := new(jinmuidpb.GetLatestVerificationCodesResponse)
reqGetLatestVerificationCodes := new(jinmuidpb.GetLatestVerificationCodesRequest)
reqGetLatestVerificationCodes.SendTo = []*jinmuidpb.SingleGetLatestVerificationCode{
&jinmuidpb.SingleGetLatestVerificationCode{
SendVia: jinmuidpb.SendVia_SEND_VIA_USERNAME_SEND_VIA,
Email: account.email,
},
}
_ = jinmuIDService.GetLatestVerificationCodes(ctx, reqGetLatestVerificationCodes, respGetLatestVerificationCodes)
mvc := respGetLatestVerificationCodes.LatestVerificationCodes[0].VerificationCode
return mvc
}
// 获取新邮件验证码
func getNewEmailVerificationCode(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
respGetLatestVerificationCodes := new(jinmuidpb.GetLatestVerificationCodesResponse)
reqGetLatestVerificationCodes := new(jinmuidpb.GetLatestVerificationCodesRequest)
reqGetLatestVerificationCodes.SendTo = []*jinmuidpb.SingleGetLatestVerificationCode{
&jinmuidpb.SingleGetLatestVerificationCode{
SendVia: jinmuidpb.SendVia_SEND_VIA_USERNAME_SEND_VIA,
Email: account.emailNew,
},
}
_ = jinmuIDService.GetLatestVerificationCodes(ctx, reqGetLatestVerificationCodes, respGetLatestVerificationCodes)
mvc := respGetLatestVerificationCodes.LatestVerificationCodes[0].VerificationCode
return mvc
}
// GetSetEmailSerialNumber 设置邮箱获取serialNumber
func getSetEmailSerialNumber(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
ctx, userID, _ := mockSigninByPhonePassword(ctx, jinmuIDService, account.phone, account.phonePassword, account.seed, account.nationCode)
resp := new(jinmuidpb.LoggedInEmailNotificationResponse)
req := new(jinmuidpb.LoggedInEmailNotificationRequest)
req.Email = account.email
req.Action = jinmuidpb.LoggedInSemTemplateAction_LOGGED_IN_SEM_TEMPLATE_ACTION_SET_SECURE_EMAIL
req.Language = generalpb.Language_LANGUAGE_SIMPLIFIED_CHINESE
req.UserId = userID
req.SendToNewIfModify = false
_ = jinmuIDService.LoggedInEmailNotification(ctx, req, resp)
return resp.SerialNumber
}
// GetUnSetEmailSerialNumber 解绑邮箱获取serialNumber
func getUnSetEmailSerialNumber(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
ctx, userID, _ := mockSigninByPhonePassword(ctx, jinmuIDService, account.phone, account.phonePassword, account.seed, account.nationCode)
resp := new(jinmuidpb.LoggedInEmailNotificationResponse)
req := new(jinmuidpb.LoggedInEmailNotificationRequest)
req.Email = account.email
req.Action = jinmuidpb.LoggedInSemTemplateAction_LOGGED_IN_SEM_TEMPLATE_ACTION_UNSET_SECURE_EMAIL
req.Language = generalpb.Language_LANGUAGE_SIMPLIFIED_CHINESE
req.UserId = userID
req.SendToNewIfModify = false
_ = jinmuIDService.LoggedInEmailNotification(ctx, req, resp)
return resp.SerialNumber
}
// GetModifyEmailSerialNumber 修改邮箱获取serialNumber
func getModifyEmailSerialNumber(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
ctx, userID, _ := mockSigninByPhonePassword(ctx, jinmuIDService, account.phone, account.phonePassword, account.seed, account.nationCode)
resp := new(jinmuidpb.LoggedInEmailNotificationResponse)
req := new(jinmuidpb.LoggedInEmailNotificationRequest)
req.Email = account.email
req.Action = jinmuidpb.LoggedInSemTemplateAction_LOGGED_IN_SEM_TEMPLATE_ACTION_MODIFY_SECURE_EMAIL
req.Language = generalpb.Language_LANGUAGE_SIMPLIFIED_CHINESE
req.UserId = userID
req.SendToNewIfModify = false
_ = jinmuIDService.LoggedInEmailNotification(ctx, req, resp)
return resp.SerialNumber
}
// GetModifyEmailSerialNumberNewEmail 修改邮箱获取serialNumber
func getModifyEmailSerialNumberNewEmail(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
ctx, userID, _ := mockSigninByPhonePassword(ctx, jinmuIDService, account.phone, account.phonePassword, account.seed, account.nationCode)
resp := new(jinmuidpb.LoggedInEmailNotificationResponse)
req := new(jinmuidpb.LoggedInEmailNotificationRequest)
req.Email = account.emailNew
req.Action = jinmuidpb.LoggedInSemTemplateAction_LOGGED_IN_SEM_TEMPLATE_ACTION_MODIFY_SECURE_EMAIL
req.Language = generalpb.Language_LANGUAGE_SIMPLIFIED_CHINESE
req.UserId = userID
req.SendToNewIfModify = true
_ = jinmuIDService.LoggedInEmailNotification(ctx, req, resp)
return resp.SerialNumber
}
// GetFindUserNameSerialNumber 通过邮箱找用户名serialNumber
func getFindUserNameSerialNumber(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
resp := new(jinmuidpb.NotLoggedInEmailNotificationResponse)
req := new(jinmuidpb.NotLoggedInEmailNotificationRequest)
req.Email = account.email
req.Action = jinmuidpb.NotLoggedInSemTemplateAction_NOT_LOGGED_IN_SEM_TEMPLATE_ACTION_FIND_USERNAME
req.Language = generalpb.Language_LANGUAGE_SIMPLIFIED_CHINESE
_ = jinmuIDService.NotLoggedInEmailNotification(ctx, req, resp)
return resp.SerialNumber
}
// GetVerificationNumber 获取VerificationNumber
func getVerificationNumber(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
// 发送通知
serialNumber := getModifyEmailSerialNumber(jinmuIDService, account)
// 获取最新验证码
mvc := getEmailVerificationCode(jinmuIDService, account)
resp := new(jinmuidpb.ValidateEmailVerificationCodeResponse)
req := new(jinmuidpb.ValidateEmailVerificationCodeRequest)
req.VerificationCode = mvc
req.SerialNumber = serialNumber
req.Email = account.email
req.VerificationType = account.verificationType
_ = jinmuIDService.ValidateEmailVerificationCode(ctx, req, resp)
return resp.VerificationNumber
}
func getSignUpVerificationNumber(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
mvc := getSmsVerificationCode(jinmuIDService, account)
serialNumber := getSignUpSerialNumber(jinmuIDService, account)
resp := new(jinmuidpb.ValidatePhoneVerificationCodeResponse)
req := new(jinmuidpb.ValidatePhoneVerificationCodeRequest)
req.Phone = account.phone
req.Mvc = mvc
req.SerialNumber = serialNumber
req.NationCode = account.nationCode
_ = jinmuIDService.ValidatePhoneVerificationCode(ctx, req, resp)
return resp.VerificationNumber
}
// 手机号获取短信验证码-香港手机号码
func getSignUpVerificationNumberHK(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
mvc := getSmsVerificationCodeHK(jinmuIDService, account)
serialNumber := getSignUpSerialNumberHK(jinmuIDService, account)
resp := new(jinmuidpb.ValidatePhoneVerificationCodeResponse)
req := new(jinmuidpb.ValidatePhoneVerificationCodeRequest)
req.Phone = account.phoneHK
req.Mvc = mvc
req.SerialNumber = serialNumber
req.NationCode = account.nationCodeHK
_ = jinmuIDService.ValidatePhoneVerificationCode(ctx, req, resp)
return resp.VerificationNumber
}
// 获取短信验证码-香港手机号码
func getSmsVerificationCodeHK(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
respGetLatestVerificationCodes := new(jinmuidpb.GetLatestVerificationCodesResponse)
reqGetLatestVerificationCodes := new(jinmuidpb.GetLatestVerificationCodesRequest)
reqGetLatestVerificationCodes.SendTo = []*jinmuidpb.SingleGetLatestVerificationCode{
&jinmuidpb.SingleGetLatestVerificationCode{
SendVia: jinmuidpb.SendVia_SEND_VIA_PHONE_SEND_VIA,
Phone: account.phoneHK,
NationCode: account.nationCodeHK,
},
}
_ = jinmuIDService.GetLatestVerificationCodes(ctx, reqGetLatestVerificationCodes, respGetLatestVerificationCodes)
mvc := respGetLatestVerificationCodes.LatestVerificationCodes[0].VerificationCode
return mvc
}
// 手机验证短信注册验证码是否正确-香港手机号码
func getSignUpSerialNumberHK(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
resp := new(jinmuidpb.SmsNotificationResponse)
req := new(jinmuidpb.SmsNotificationRequest)
req.Phone = account.phoneHK
req.Action = jinmuidpb.TemplateAction_TEMPLATE_ACTION_SIGN_UP
req.Language = generalpb.Language_LANGUAGE_ENGLISH
req.NationCode = account.nationCodeHK
_ = jinmuIDService.SmsNotification(ctx, req, resp)
return resp.SerialNumber
}
// 手机号获取短信验证码-美国手机号码
func getSignUpVerificationNumberUS(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
mvc := getSmsVerificationCodeUS(jinmuIDService, account)
serialNumber := getSignUpSerialNumberUS(jinmuIDService, account)
resp := new(jinmuidpb.ValidatePhoneVerificationCodeResponse)
req := new(jinmuidpb.ValidatePhoneVerificationCodeRequest)
req.Phone = account.phoneUS
req.Mvc = mvc
req.SerialNumber = serialNumber
req.NationCode = account.nationCodeUSA
_ = jinmuIDService.ValidatePhoneVerificationCode(ctx, req, resp)
return resp.VerificationNumber
}
// 获取短信验证码-美国手机号码
func getSmsVerificationCodeUS(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
respGetLatestVerificationCodes := new(jinmuidpb.GetLatestVerificationCodesResponse)
reqGetLatestVerificationCodes := new(jinmuidpb.GetLatestVerificationCodesRequest)
reqGetLatestVerificationCodes.SendTo = []*jinmuidpb.SingleGetLatestVerificationCode{
&jinmuidpb.SingleGetLatestVerificationCode{
SendVia: jinmuidpb.SendVia_SEND_VIA_PHONE_SEND_VIA,
Phone: account.phoneUS,
NationCode: account.nationCodeUSA,
},
}
_ = jinmuIDService.GetLatestVerificationCodes(ctx, reqGetLatestVerificationCodes, respGetLatestVerificationCodes)
mvc := respGetLatestVerificationCodes.LatestVerificationCodes[0].VerificationCode
return mvc
}
// 手机验证短信注册验证码是否正确-美国手机号码
func getSignUpSerialNumberUS(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
resp := new(jinmuidpb.SmsNotificationResponse)
req := new(jinmuidpb.SmsNotificationRequest)
req.Phone = account.phoneUS
req.Action = jinmuidpb.TemplateAction_TEMPLATE_ACTION_SIGN_UP
req.Language = generalpb.Language_LANGUAGE_ENGLISH
req.NationCode = account.nationCodeUSA
_ = jinmuIDService.SmsNotification(ctx, req, resp)
return resp.SerialNumber
}
// 手机号获取短信验证码-台湾手机号码
func getSignUpVerificationNumberTW(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
mvc := getSmsVerificationCodeTW(jinmuIDService, account)
serialNumber := getSignUpSerialNumberTW(jinmuIDService, account)
resp := new(jinmuidpb.ValidatePhoneVerificationCodeResponse)
req := new(jinmuidpb.ValidatePhoneVerificationCodeRequest)
req.Phone = account.phoneTW
req.Mvc = mvc
req.SerialNumber = serialNumber
req.NationCode = account.nationCodeTW
_ = jinmuIDService.ValidatePhoneVerificationCode(ctx, req, resp)
return resp.VerificationNumber
}
// 获取短信验证码-台湾手机号码
func getSmsVerificationCodeTW(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
respGetLatestVerificationCodes := new(jinmuidpb.GetLatestVerificationCodesResponse)
reqGetLatestVerificationCodes := new(jinmuidpb.GetLatestVerificationCodesRequest)
reqGetLatestVerificationCodes.SendTo = []*jinmuidpb.SingleGetLatestVerificationCode{
&jinmuidpb.SingleGetLatestVerificationCode{
SendVia: jinmuidpb.SendVia_SEND_VIA_PHONE_SEND_VIA,
Phone: account.phoneTW,
NationCode: account.nationCodeTW,
},
}
_ = jinmuIDService.GetLatestVerificationCodes(ctx, reqGetLatestVerificationCodes, respGetLatestVerificationCodes)
mvc := respGetLatestVerificationCodes.LatestVerificationCodes[0].VerificationCode
return mvc
}
// 手机验证短信注册验证码是否正确-台湾手机号码
func getSignUpSerialNumberTW(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
resp := new(jinmuidpb.SmsNotificationResponse)
req := new(jinmuidpb.SmsNotificationRequest)
req.Phone = account.phoneTW
req.Action = jinmuidpb.TemplateAction_TEMPLATE_ACTION_SIGN_UP
req.Language = generalpb.Language_LANGUAGE_ENGLISH
req.NationCode = account.nationCodeTW
_ = jinmuIDService.SmsNotification(ctx, req, resp)
return resp.SerialNumber
}
// 手机号获取短信验证码-澳门手机号码
func getSignUpVerificationNumberMacao(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
mvc := getSmsVerificationCodeMacao(jinmuIDService, account)
serialNumber := getSignUpSerialNumberMacao(jinmuIDService, account)
resp := new(jinmuidpb.ValidatePhoneVerificationCodeResponse)
req := new(jinmuidpb.ValidatePhoneVerificationCodeRequest)
req.Phone = account.phoneMacao
req.Mvc = mvc
req.SerialNumber = serialNumber
req.NationCode = account.nationCodeMacao
_ = jinmuIDService.ValidatePhoneVerificationCode(ctx, req, resp)
return resp.VerificationNumber
}
// 获取短信验证码-澳门手机号码
func getSmsVerificationCodeMacao(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
respGetLatestVerificationCodes := new(jinmuidpb.GetLatestVerificationCodesResponse)
reqGetLatestVerificationCodes := new(jinmuidpb.GetLatestVerificationCodesRequest)
reqGetLatestVerificationCodes.SendTo = []*jinmuidpb.SingleGetLatestVerificationCode{
&jinmuidpb.SingleGetLatestVerificationCode{
SendVia: jinmuidpb.SendVia_SEND_VIA_PHONE_SEND_VIA,
Phone: account.phoneMacao,
NationCode: account.nationCodeMacao,
},
}
_ = jinmuIDService.GetLatestVerificationCodes(ctx, reqGetLatestVerificationCodes, respGetLatestVerificationCodes)
mvc := respGetLatestVerificationCodes.LatestVerificationCodes[0].VerificationCode
return mvc
}
// 手机验证短信注册验证码是否正确-澳门手机号码
func getSignUpSerialNumberMacao(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
resp := new(jinmuidpb.SmsNotificationResponse)
req := new(jinmuidpb.SmsNotificationRequest)
req.Phone = account.phoneMacao
req.Action = jinmuidpb.TemplateAction_TEMPLATE_ACTION_SIGN_UP
req.Language = generalpb.Language_LANGUAGE_ENGLISH
req.NationCode = account.nationCodeMacao
_ = jinmuIDService.SmsNotification(ctx, req, resp)
return resp.SerialNumber
}
// 手机号获取短信验证码-加拿大手机号码
func getSignUpVerificationNumberCanada(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
mvc := getSmsVerificationCodeCanada(jinmuIDService, account)
serialNumber := getSignUpSerialNumberCanada(jinmuIDService, account)
resp := new(jinmuidpb.ValidatePhoneVerificationCodeResponse)
req := new(jinmuidpb.ValidatePhoneVerificationCodeRequest)
req.Phone = account.phoneCanada
req.Mvc = mvc
req.SerialNumber = serialNumber
req.NationCode = account.nationCodeUSA
_ = jinmuIDService.ValidatePhoneVerificationCode(ctx, req, resp)
return resp.VerificationNumber
}
// 获取短信验证码-加拿大手机号码
func getSmsVerificationCodeCanada(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
respGetLatestVerificationCodes := new(jinmuidpb.GetLatestVerificationCodesResponse)
reqGetLatestVerificationCodes := new(jinmuidpb.GetLatestVerificationCodesRequest)
reqGetLatestVerificationCodes.SendTo = []*jinmuidpb.SingleGetLatestVerificationCode{
&jinmuidpb.SingleGetLatestVerificationCode{
SendVia: jinmuidpb.SendVia_SEND_VIA_PHONE_SEND_VIA,
Phone: account.phoneCanada,
NationCode: account.nationCodeUSA,
},
}
_ = jinmuIDService.GetLatestVerificationCodes(ctx, reqGetLatestVerificationCodes, respGetLatestVerificationCodes)
mvc := respGetLatestVerificationCodes.LatestVerificationCodes[0].VerificationCode
return mvc
}
// 手机验证短信注册验证码是否正确-加拿大手机号码
func getSignUpSerialNumberCanada(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
resp := new(jinmuidpb.SmsNotificationResponse)
req := new(jinmuidpb.SmsNotificationRequest)
req.Phone = account.phoneCanada
req.Action = jinmuidpb.TemplateAction_TEMPLATE_ACTION_SIGN_UP
req.Language = generalpb.Language_LANGUAGE_ENGLISH
req.NationCode = account.nationCodeUSA
_ = jinmuIDService.SmsNotification(ctx, req, resp)
return resp.SerialNumber
}
// 手机号获取短信验证码-英国手机号码
func getSignUpVerificationNumberUK(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
mvc := getSmsVerificationCodeUK(jinmuIDService, account)
serialNumber := getSignUpSerialNumberUK(jinmuIDService, account)
resp := new(jinmuidpb.ValidatePhoneVerificationCodeResponse)
req := new(jinmuidpb.ValidatePhoneVerificationCodeRequest)
req.Phone = account.phoneUK
req.Mvc = mvc
req.SerialNumber = serialNumber
req.NationCode = account.nationCodeUK
_ = jinmuIDService.ValidatePhoneVerificationCode(ctx, req, resp)
return resp.VerificationNumber
}
// 获取短信验证码-英国手机号码
func getSmsVerificationCodeUK(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
respGetLatestVerificationCodes := new(jinmuidpb.GetLatestVerificationCodesResponse)
reqGetLatestVerificationCodes := new(jinmuidpb.GetLatestVerificationCodesRequest)
reqGetLatestVerificationCodes.SendTo = []*jinmuidpb.SingleGetLatestVerificationCode{
&jinmuidpb.SingleGetLatestVerificationCode{
SendVia: jinmuidpb.SendVia_SEND_VIA_PHONE_SEND_VIA,
Phone: account.phoneUK,
NationCode: account.nationCodeUK,
},
}
_ = jinmuIDService.GetLatestVerificationCodes(ctx, reqGetLatestVerificationCodes, respGetLatestVerificationCodes)
mvc := respGetLatestVerificationCodes.LatestVerificationCodes[0].VerificationCode
return mvc
}
// 手机验证短信注册验证码是否正确-英国手机号码
func getSignUpSerialNumberUK(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
resp := new(jinmuidpb.SmsNotificationResponse)
req := new(jinmuidpb.SmsNotificationRequest)
req.Phone = account.phoneUK
req.Action = jinmuidpb.TemplateAction_TEMPLATE_ACTION_SIGN_UP
req.Language = generalpb.Language_LANGUAGE_ENGLISH
req.NationCode = account.nationCodeUK
_ = jinmuIDService.SmsNotification(ctx, req, resp)
return resp.SerialNumber
}
// 手机号获取短信验证码-日本手机号码
func getSignUpVerificationNumberJP(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
mvc := getSmsVerificationCodeJP(jinmuIDService, account)
serialNumber := getSignUpSerialNumberJP(jinmuIDService, account)
resp := new(jinmuidpb.ValidatePhoneVerificationCodeResponse)
req := new(jinmuidpb.ValidatePhoneVerificationCodeRequest)
req.Phone = account.phoneJP
req.Mvc = mvc
req.SerialNumber = serialNumber
req.NationCode = account.nationCodeJP
_ = jinmuIDService.ValidatePhoneVerificationCode(ctx, req, resp)
return resp.VerificationNumber
}
// 获取短信验证码-日本手机号码
func getSmsVerificationCodeJP(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
respGetLatestVerificationCodes := new(jinmuidpb.GetLatestVerificationCodesResponse)
reqGetLatestVerificationCodes := new(jinmuidpb.GetLatestVerificationCodesRequest)
reqGetLatestVerificationCodes.SendTo = []*jinmuidpb.SingleGetLatestVerificationCode{
&jinmuidpb.SingleGetLatestVerificationCode{
SendVia: jinmuidpb.SendVia_SEND_VIA_PHONE_SEND_VIA,
Phone: account.phoneJP,
NationCode: account.nationCodeJP,
},
}
_ = jinmuIDService.GetLatestVerificationCodes(ctx, reqGetLatestVerificationCodes, respGetLatestVerificationCodes)
mvc := respGetLatestVerificationCodes.LatestVerificationCodes[0].VerificationCode
return mvc
}
// 手机验证短信注册验证码是否正确-日本手机号码
func getSignUpSerialNumberJP(jinmuIDService *JinmuIDService, account Account) string {
ctx := context.Background()
resp := new(jinmuidpb.SmsNotificationResponse)
req := new(jinmuidpb.SmsNotificationRequest)
req.Phone = account.phoneJP
req.Action = jinmuidpb.TemplateAction_TEMPLATE_ACTION_SIGN_UP
req.Language = generalpb.Language_LANGUAGE_ENGLISH
req.NationCode = account.nationCodeJP
_ = jinmuIDService.SmsNotification(ctx, req, resp)
return resp.SerialNumber
}
|
package montecarlo
// ActionBuilder is a type that can sets of possible actions.
type ActionBuilder interface {
BuildActions(defaultState State) ActionSet
}
// MasterBuilder builds all actions, based on a list of other ActionBuilders
type MasterBuilder struct {
SubBuilders []ActionBuilder
}
// BuildActions builds all actions
func (mb MasterBuilder) BuildActions(defaultState State) ActionSet {
actions := make(ActionSet)
for _, builder := range mb.SubBuilders {
actions.merge(builder.BuildActions(defaultState))
}
return actions
}
// add/overwrite all of the elements of b into this action builders current set
// of actions.
func (as ActionSet) merge(others ActionSet) {
for k, v := range others {
as[k] = v
}
}
|
package test
import (
"errors"
"github.com/mkj-gram/go_email_service/internal/emailprovider"
"github.com/mkj-gram/go_email_service/internal/emailsender"
"github.com/stretchr/testify/assert"
"testing"
)
type TestProvider struct {
send func(m emailprovider.Email) error
}
func (t TestProvider) Send(m emailprovider.Email) error {
return t.send(m)
}
type SuccessProvider struct{}
func (s SuccessProvider) Send(m emailprovider.Email) error {
return nil
}
type FailProvider struct{}
func (f FailProvider) Send(m emailprovider.Email) error {
return errors.New("Some error here")
}
func testProviderGenerator(index *int, err error) TestProvider {
return TestProvider{send: func(m emailprovider.Email) error {
*index += 1
return err
}}
}
func makeSimpleEmail() emailprovider.Email {
from, _ := emailprovider.MakeEmailAddress("Morten", "morten@example.com")
to, _ := emailprovider.MakeEmailAddress("Morten", "morten@example.com")
subject, _ := emailprovider.MakeSubject("this is a subject")
return emailprovider.Email{
From: from,
To: []emailprovider.EmailAddress{to},
Subject: subject,
Body: "this is a body"}
}
func TestSendChecksForProviders(t *testing.T) {
sender := emailsender.RoundRobinSender{
Providers: []emailprovider.Provider{},
}
err := sender.Send(emailprovider.Email{})
assert.NotNil(t, err)
}
func TestWillCallFirstProvider(t *testing.T) {
called := 0
sender := emailsender.RoundRobinSender{
Providers: []emailprovider.Provider{
testProviderGenerator(&called, nil),
},
}
err := sender.Send(makeSimpleEmail())
assert.Equal(t, 1, called)
assert.Nil(t, err)
}
func TestWillNotCallAfterSuccessProvider(t *testing.T) {
called := 0
sender := emailsender.RoundRobinSender{
Providers: []emailprovider.Provider{
SuccessProvider{},
testProviderGenerator(&called, nil),
},
}
sender.Send(makeSimpleEmail())
assert.Equal(t, 0, called, "Called second provider after success")
}
func TestWillCallAfterFailProvider(t *testing.T) {
called := 0
providers := []emailprovider.Provider{
FailProvider{},
testProviderGenerator(&called, nil),
}
sender := emailsender.RoundRobinSender{Providers: providers}
sender.Send(makeSimpleEmail())
assert.Equal(t, 1, called, "Not called second provider after fail")
}
func TestWillNotLoopOnFailingProviders(t *testing.T) {
providers := []emailprovider.Provider{
FailProvider{},
FailProvider{},
FailProvider{},
}
sender := emailsender.RoundRobinSender{Providers: providers}
// This will keep looping forever, if not implemented correctly
sender.Send(makeSimpleEmail())
}
func TestWillContinueWithLastSuccess(t *testing.T) {
counters := []int{0, 0, 0}
providers := []emailprovider.Provider{
testProviderGenerator(&counters[0], errors.New("This will fail")),
testProviderGenerator(&counters[1], nil),
testProviderGenerator(&counters[2], nil),
}
sender := emailsender.RoundRobinSender{Providers: providers}
sender.Send(makeSimpleEmail())
sender.Send(makeSimpleEmail())
assert.Equal(t, 1, counters[0], "Calling first provider again")
assert.Equal(t, 2, counters[1], "Not starting with last successful provider")
assert.Equal(t, 0, counters[2], "Not starting with last successful provider")
}
|
package main
import (
"fmt"
)
type iTable interface {
GetName() string
}
type table struct{}
func (t table) GetName() string {
return "11"
}
func t1(it iTable) {
fmt.Printf(it.GetName())
}
type Vertex struct {
X int
Y int
}
func main1() {
/**names := []string{"1", "2", "45"}
//a := "n"
// := &a
p := &names
for i, name := range names {
fmt.Printf("%v %v %s\n", &p, &names[i], name)
}
t1(table{})*/
v := Vertex{1, 2}
p := &v
p.X = 1e9
fmt.Println(v)
}
|
package main
type InputData struct {
Data []string `json:"data"`
}
type OutputData struct {
Data []Output `json:"YashOju"`
}
type Output struct {
Text string `json:"text"`
Entity string `json:"entity"`
Types string `json:"types"`
}
|
package http
import (
"fmt"
"github.com/gin-gonic/gin"
"github.com/tiagorlampert/CHAOS/internal/environment"
"github.com/tiagorlampert/CHAOS/internal/utils/template"
)
func NewRouter() *gin.Engine {
router := gin.Default()
router.Use(gin.Recovery())
router.Static("/static", "web/static")
router.HTMLRender = template.LoadTemplates("web")
return router
}
func NewServer(router *gin.Engine, configuration *environment.Configuration) error {
return router.Run(fmt.Sprintf(":%s", configuration.Server.Port))
}
|
package main
import "fmt"
func addP(c *int) int {
*c = *c + 1
return *c
}
func main() {
x := 10
fmt.Println(x)
fmt.Println(addP(&x))
fmt.Println(x)
}
|
// Example of internal unit test
// all public & private variables / types / functions etc... visable to test logic
package pool
import (
"fmt"
"strings"
"sync"
"testing"
)
func TestWorker(t *testing.T) {
wg := &sync.WaitGroup{}
todo := make(chan Work)
result := make(chan Work)
stop := make(chan struct{})
w := &Worker{
waiter: wg,
todo: todo,
results: result,
stop: stop,
}
// start the worker
go w.Start()
// create some work
expect := "HELLO"
x := Work{Before: strings.ToLower(expect)}
// place it on the channel
todo <- x
// close the input channel
// the worker is coded to shut down when the input channel closes
close(todo)
// read back the result
r := <-result
msg := fmt.Sprintf("Expected: %s, Got: %s", expect, r.After)
if r.After != expect {
t.Error(msg)
}
t.Log(msg)
wg.Wait()
}
|
package example
import "fmt"
// Numbered test constants
const (
T0 int = iota
T1
T2
)
// Something is a test string used in various scenarios.
const Something = "word"
// DefaultName is a mutable variable used to store a string used in various default scenarios when an override is
// not provided.
var DefaultName = "godork"
// Some other variables.
var (
Hi = "Hello"
Byte = "Good bye"
)
// SomeType is a struct holding the state of the example struct type.
// A bunch of content here
// indent
// stuff.
type SomeType struct {
// External is the verbleburble
External int
internal int
}
// ValueReceiver is an example of a method with a value rcvr
func (s SomeType) ValueReceiver(x int) {
fmt.Println(x)
}
// PointerReceiver is an example of a method with a pointer rcvr
func (s *SomeType) PointerReceiver(y int) {
fmt.Println(y)
}
// TopLevelFunk takes the args and does a thing with them!
func TopLevelFunk(derp int, names ...string) string {
return fmt.Sprintf("%v, %v", derp, names)
}
|
package main
import (
"encoding/json"
"errors"
"net/http"
)
type EstimatedFees struct {
Opening *int `json:"opening"`
MutualClose *int `json:"mutual_close"`
UnilateralClose *int `json:"unilateral_close"`
DelayedToUs *int `json:"delayed_to_us"`
HTLCResolution *int `json:"htlc_resolution"`
Penalty *int `json:"penalty"`
MinAcceptable *int `json:"min_acceptable"`
MaxAcceptable *int `json:"max_acceptable"`
}
func getFeeRatesFromEsplora() (feerates map[string]float64, err error) {
for _, endpoint := range esploras() {
w, errW := http.Get(endpoint + "/fee-estimates")
defer w.Body.Close()
if errW != nil {
err = errW
continue
}
if w.StatusCode >= 300 {
err = errors.New(endpoint + " error: " + w.Status)
return
}
err = json.NewDecoder(w.Body).Decode(&feerates)
return
}
err = errors.New("none of the esploras returned usable responses")
return
}
|
package models
import (
"errors"
"mall/utils"
"strconv"
"github.com/astaxie/beego/orm"
)
// PmsProductCategory 商品分类结构体
type PmsProductCategory struct {
Id int `json:"id"`
ParentId int `description:"上级分类的编号:0表示一级分类" json:"parent_id"`
Name string `description:"分类名称" orm:"size(64)" json:"name"`
Level int `description:"分类级别:0->1级;1->2级" json:"level"`
ProductCount int `description:"商品数量" json:"product_count"`
ProductUnit string `description:"商品单位" orm:"size(64)" json:"product_unit"`
NavStatus int `description:"是否显示在导航栏:0->不显示;1->显示" json:"nav_status"`
ShowStatus int `description:"显示状态:0->不显示;1->显示" json:"show_status"`
Sort int `description:"排序" json:"sort"`
Icon string `description:"图标" json:"icon"`
Keywords string `description:"关键字" json:"keywords"`
Description string `description:"描述" orm:"type(text)" json:"description"`
// PmsProduct []*PmsProduct `orm:"reverse(many)"`
}
// TableName 自定义表名
func (u *PmsProductCategory) TableName() string {
return "pms_product_category"
}
// AddPmsProductCategory 新增商品分类
func AddPmsProductCategory(u PmsProductCategory) (id int64, err error) {
// one2one 插入
// 创建一个 ormer 对象
o := orm.NewOrm()
// 开启事务
err = o.Begin()
// 插入主表
productCategory := PmsProductCategory{
ParentId: u.ParentId,
Name: u.Name,
Level: u.Level,
ProductCount: u.ProductCount,
ProductUnit: u.ProductUnit,
NavStatus: u.NavStatus,
ShowStatus: u.ShowStatus,
Sort: u.Sort,
Icon: u.Icon,
Keywords: u.Keywords,
Description: u.Description,
}
id, err = o.Insert(&productCategory)
if err != nil {
// 回滚事务
err = o.Rollback()
}
// 提交事务
err = o.Commit()
return id, err
}
/*
GetPmsProductCategory 查询商品分类
*/
func GetPmsProductCategory(uid int) (u *PmsProductCategory, err error) {
// 创建一个 ormer 对象
o := orm.NewOrm()
productCategory := &PmsProductCategory{Id: uid}
err = o.Read(productCategory)
return productCategory, err
}
// GetAllPmsProductCategory 分页查询商品分类
func GetAllPmsProductCategory(p int, size int) (u utils.Page, err error) {
o := orm.NewOrm()
// user := new(User)
var productCategories []PmsProductCategory
qs := o.QueryTable("pms_product_category")
count, _ := qs.Limit(-1).Count()
_, err = qs.RelatedSel().Limit(size).Offset((p - 1) * size).All(&productCategories)
c, _ := strconv.Atoi(strconv.FormatInt(count, 10))
return utils.Pagination(c, p, size, productCategories), err
}
// UpdatePmsProductCategory 更新
func UpdatePmsProductCategory(uid int, uu *PmsProductCategory) (a *PmsProductCategory, err error) {
// 创建一个 orm对象
o := orm.NewOrm()
// 实例化 productCategory
productCategory := PmsProductCategory{Id: uid}
if o.Read(&productCategory) == nil {
if uu.Name != "" {
productCategory.Name = uu.Name
}
if uu.Level >= 0 {
productCategory.Level = uu.Level
}
if uu.ProductCount >= 0 {
productCategory.ProductCount = uu.ProductCount
}
if uu.ProductUnit != "" {
productCategory.ProductUnit = uu.ProductUnit
}
if uu.Icon != "" {
productCategory.Icon = uu.Icon
}
// 开启事务
err = o.Begin()
if _, err := o.Update(&productCategory); err != nil {
return nil, errors.New("修改失败")
}
if _, err := o.Update(&productCategory); err != nil {
return nil, errors.New("修改失败")
}
if err != nil {
err = o.Rollback()
} else {
err = o.Commit()
}
return &productCategory, nil
}
return nil, err
}
// DeletePmsProductCategory 删除
func DeletePmsProductCategory(uid int) (b bool, err error) {
// one2one 删除
// 创建一个 ormer 对象
o := orm.NewOrm()
// 开启事务
err = o.Begin()
// 删除主表
productCategory := PmsProductCategory{Id: uid}
_, err = o.Delete(&productCategory)
if err != nil {
// 回滚事务
err = o.Rollback()
}
// 提交事务
err = o.Commit()
return b, err
}
// 注册 model
func init() {
// 注册 model:
orm.RegisterModel(new(PmsProductCategory))
}
|
package schema
import (
"fmt"
"reflect"
"strings"
"testing"
"time"
"github.com/EverythingMe/bson/bson"
"golang.org/x/text/language"
)
var mockSchema = `
# Mock Schema
schema: mock
tables:
users:
engines:
- redis
columns:
name:
comment: "The name of this user"
type: Text
options:
not_null: true
num:
type: Int
bum:
type: Int
indexes:
- type: simple
columns: [bum]
losers:
engines:
- redis
columns:
name:
comment: "The name of this user"
type: Text
`
var mockSchema2 = `
schema: mock
tables:
users:
engines:
- redis
columns:
name:
comment: "The name of this user"
type: Text
options:
not_null: true
num:
type: Text
sum:
type: Text
indexes:
- type: simple
columns: [name]
bars:
engines:
- redis
columns:
name:
comment: "The name of this user"
type: Text
options:
not_null: true
num:
type: Int
`
func TestLoad(t *testing.T) {
//t.SkipNow()
r := strings.NewReader(mockSchema)
sc, e := Load(r)
if e != nil {
t.Fatal(e)
}
if sc.Name != "mock" {
t.Error("Wrong name: expected mock, got %s", sc.Name)
}
//b, _ := json.MarshalIndent(sc, "-", " ")
if tbl, found := sc.Tables["users"]; !found {
t.Fatal("Table users not found")
} else {
if len(tbl.Columns) != 3 {
t.Fatal("Expected 2 columns for users")
}
if c, found := tbl.Columns["name"]; !found {
t.Fatal("no column name")
} else {
if c.Type != TextType {
t.Fatal("name is not text but", c.Type)
}
}
if c, found := tbl.Columns["num"]; !found {
t.Fatal("no column num")
} else {
if c.Type != IntType {
t.Fatal("num is not int but", c.Type)
}
}
}
}
func TestDiff(t *testing.T) {
//t.SkipNow()
r := strings.NewReader(mockSchema)
sc, e := Load(r)
if e != nil {
t.Fatal(e)
}
r = strings.NewReader(mockSchema2)
sc2, e := Load(r)
if e != nil {
t.Fatal(e)
}
diff, err := sc.Diff(sc2)
if err != nil {
t.Fatal(err)
}
if len(diff) == 0 {
t.Fatal("No diff detected")
}
for _, change := range diff {
//fmt.Println(reflect.TypeOf(change))
//b, _ := json.MarshalIndent(change, "-", " ")
//fmt.Println(string(b))
switch ch := change.(type) {
case TableAddedChange:
if ch.Table.Name != "mock.bars" {
t.Fatal("Wrong table added")
}
case TableDeletedChange:
if ch.Table.Name != "mock.losers" {
t.Fatal("Wrong table deleted")
}
case ColumnAlterChange:
if ch.Column.Name != "num" || ch.Column.Type != TextType {
t.Fatal("Wrong column change", ch.Column)
}
case ColumnAddedChange:
if ch.Column.Name != "sum" {
t.Fatal("Wrong column added:", ch.Column.Name)
}
case ColumnDeletedChange:
if ch.Column.Name != "bum" {
t.Fatal("Wrong column deleted: ", ch.Column.Name)
}
case IndexAddedChange:
if ch.Index.Name != "mock.users__name_simple" {
t.Fatal("Wrong index added: %s", ch.Index.Name)
}
case IndexRemovedChange:
if ch.Index.Name != "mock.users__bum_simple" {
t.Fatal("Wrong index deleted: %s", ch.Index.Name)
}
default:
t.Error("Undetected change: ", reflect.TypeOf(ch))
}
}
}
func TestNormalization(t *testing.T) {
//t.SkipNow()
normalizer := NewNormalizer(language.Und, true, true)
input := "Hello, Café - WORLD!... אבוללה"
out, err := normalizer.Normalize([]byte(input))
if err != nil {
t.Fatal(err)
}
if string(out) != "hello cafe world אבוללה" {
t.Fatal("Wrong normalization: ", string(out))
}
}
func TestSet(t *testing.T) {
s := NewSet("foo", "bar", "baz")
ent := NewEntity("foo").Set("bar", s)
b, err := bson.Marshal(ent)
if err != nil {
t.Fatal(err)
}
e2 := Entity{}
err = bson.Unmarshal(b, &e2)
if err != nil {
t.Fatal(err)
}
v, found := e2.Get("bar")
if !found {
t.Error("encoded set not found in decoded entity")
}
s2 := v.(Set)
if len(s) != len(s2) {
t.Errorf("Incompatible list sizes: %d/%d", len(s2), len(s))
}
for k := range s2 {
if _, found := s[k]; !found {
t.Error(k, "not in ", s)
}
}
}
func TestTTL(t *testing.T) {
ttl := 150 * time.Millisecond
ent := NewEntity("foo").Set("bar", "baz").Expire(ttl)
if ent.TTL != ttl {
t.Fatal("TTL not set correctly, got %v", ent.TTL)
}
b, err := bson.Marshal(ent)
if err != nil {
t.Fatal(err)
}
e2 := Entity{}
if err = bson.Unmarshal(b, &e2); err != nil {
t.Fatal(err)
}
if e2.TTL != ent.TTL {
t.Errorf("Unmatching ttls. Want %v, got %v", ent.TTL, e2.TTL)
}
}
func TestList(t *testing.T) {
s := NewList("foo", "bar", "baz")
ent := NewEntity("foo").Set("bar", s)
b, err := bson.Marshal(ent)
if err != nil {
t.Fatal(err)
}
e2 := Entity{}
err = bson.Unmarshal(b, &e2)
if err != nil {
t.Fatal(err)
}
s2, found := e2.Get("bar")
if !found {
t.Error("encoded set not found in decoded entity")
}
l2 := s2.(List)
if len(l2) != len(s) {
t.Errorf("Incompatible list sizes: %d/%d", len(l2), len(s))
}
for i := range l2 {
if s[i] != l2[i] {
t.Errorf("Incompatible list elements %v /%v", s[i], l2[i])
}
}
}
func TestMap(t *testing.T) {
m := NewMap().
Set("foo", "Bar").
Set("bar", 123)
ent := NewEntity("foo").Set("map", m)
b, err := bson.Marshal(ent)
if err != nil {
t.Fatal(err)
}
e2 := Entity{}
err = bson.Unmarshal(b, &e2)
if err != nil {
t.Fatal(err)
}
p2, found := e2.Get("map")
fmt.Printf("%#v\n", p2)
if !found {
t.Error("encoded set not found in decoded entity")
}
m2 := p2.(Map)
if len(m2) != len(m) {
t.Errorf("Incompatible list sizes: %d/%d", len(m2), len(m))
}
for k, v := range m2 {
if m[k] != v {
t.Errorf("Incompatible map elements %v(%s)/%v(%s)", m[k], reflect.TypeOf(m[k]), v, reflect.TypeOf(v))
}
}
}
|
package model
import (
"fmt"
)
type (
User struct {
// User Unique ID. Generated by snowflake.
UserID uint64 `json:"user_id,string" db:"user_id"`
// Mail Address used to log in to the service.
Email string `json:"email" db:"email"`
// password stored with bcrypt salt hash.
Password string `json:"password" db:"password"`
// 2FA type set by User.
// 0 : Password authentication only.
// 1 : TOTP Authentication.
TwoFAType TwoFAType `json:"two_fa_type" db:"two_fa_type"`
// The secret key is saved if 2FA for TOTP Authentication is enabled.
TotpSecret string `json:"totp_secret,omitempty" db:"totp_secret"`
// User location settings.
Locale string `json:"locale" db:"locale"`
// User role settings.
// Exists for booking and is not currently in use.
Role int `json:"role" db:"role"`
// Date when the user was created.
CreatedAt string `json:",omitempty", db:"created_at"`
// Date the user last logged in.
LastLogin string `json:",omitempty", db:"last_login"`
// Date when the user was deleted.
DeletedAt string `json:",omitempty", db:"deleted_at"`
// Detailed profile of the user.
Profile Profile `json:"user_profiles", db:"user_profiles"`
}
Profile struct {
// User's displayed name
Name string `json:"name" db:"name"`
// User profile image
Avatar string `json:"avatar" db:"avatar"`
}
)
type TwoFAType int
const (
TwoFATypeNotUsed TwoFAType = iota
TwoFATypeTOTP
)
func (m *model) SignupUser(user *User) (err error) {
tx, err := m.db.Begin()
if err != nil {
return
}
defer func() {
if err != nil {
tx.Rollback()
return
}
err = tx.Commit()
}()
_, err = tx.Exec(`INSERT INTO users
(user_id, email, password, two_fa_type, locale, role, created_at)
VALUES (?, ?, ?, ?, ?, ?, NOW())`,
user.UserID,
user.Email,
user.Password,
user.TwoFAType,
user.Locale,
user.Role)
if err != nil {
return
}
_, err = tx.Exec(`INSERT INTO user_profiles
(user_id, name, avatar, created_at)
VALUES (?, ?, ?, NOW())`,
user.UserID,
user.Profile.Name,
user.Profile.Avatar)
if err != nil {
return
}
return
}
func (m *model) LoginUser(user *User) (err error) {
tx, err := m.db.Begin()
if err != nil {
fmt.Println(err)
return
}
defer func() {
if err != nil {
tx.Rollback()
return
}
err = tx.Commit()
}()
_, err = tx.Exec(`UPDATE users SET last_login = NOW() WHERE user_id = ?`, user.UserID)
if err != nil {
return
}
return
}
func (m *model) UpdateUserProfile(user *User) (err error) {
tx, err := m.db.Begin()
if err != nil {
return
}
defer func() {
if err != nil {
tx.Rollback()
return
}
err = tx.Commit()
}()
_, err = tx.Exec(`UPDATE user_profiles SET
name = :name
avatar = :avatar
WHERE user_id = :user_id`,
&user)
if err != nil {
return
}
return
}
func (m *model) UpdateUserPassword(user *User) (err error) {
tx, err := m.db.Begin()
if err != nil {
return
}
defer func() {
if err != nil {
tx.Rollback()
return
}
err = tx.Commit()
}()
_, err = tx.Exec(`UPDATE users SET
password = :password
WHERE user_id = :user_id`,
&user)
if err != nil {
return
}
return
}
func (m *model) UpdateUserLocale(user *User) (err error) {
tx, err := m.db.Begin()
if err != nil {
return
}
defer func() {
if err != nil {
tx.Rollback()
return
}
err = tx.Commit()
}()
_, err = tx.Exec(`UPDATE users SET
locale = :locale
WHERE user_id = :user_id`,
&user)
if err != nil {
return
}
return
}
func (m *model) GetUser(id uint64) (*User, error) {
user := []*User{}
err := m.db.Select(&user,`SELECT users.user_id,
users.email,
users.locale,
users.role,
users.created_at,
users.last_login,
user_profiles.name,
user_profiles.avatar
FROM users
INNER JOIN user_profiles ON users.user_id = user_profiles.user_id
WHERE users.user_id = ?`, id)
if err != nil {
return nil, err
}
return user[0], nil
}
func (m *model) GetDetailForUserLogin(email string) (*User, error) {
user := []*User{}
err := m.db.Select(&user,`SELECT users.user_id,
users.email,
users.password,
users.two_fa_type,
users.totp_secret,
users.locale,
user_profiles.name,
user_profiles.avatar
FROM users
INNER JOIN user_profiles ON users.user_id = user_profiles.user_id
AND users.email = ?`, email)
if err != nil {
return nil, err
}
return user[0], nil
} |
package wordcount
import (
"regexp"
"strings"
)
type Frequency map[string]int
func WordCount(s string) Frequency {
c := make(Frequency)
s = strings.ToLower(s)
s = strings.Replace(s, ",", " ", -1)
reg, _ := regexp.Compile("[^a-zA-Z0-9' ]+")
s = reg.ReplaceAllString(s, "")
words := strings.Fields(s)
if len(words) == 1 {
c[words[0]] = 1
return c
}
for _, word := range words {
if strings.Count(word, "'") == 2 {
word = strings.Replace(word, "'", "", -1)
c[word] += 1
} else if strings.Count(word, "'") <= 1 {
c[word] += 1
}
}
return c
}
|
package validate
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"net"
"os"
"path/filepath"
"reflect"
"regexp"
"runtime"
"strings"
"unicode"
"unicode/utf8"
"github.com/blang/semver/v4"
"github.com/hashicorp/go-multierror"
rspec "github.com/opencontainers/runtime-spec/specs-go"
osFilepath "github.com/opencontainers/runtime-tools/filepath"
capsCheck "github.com/opencontainers/runtime-tools/validate/capabilities"
"github.com/sirupsen/logrus"
"github.com/opencontainers/runtime-tools/specerror"
"github.com/xeipuuv/gojsonschema"
)
const specConfig = "config.json"
var (
// http://pubs.opengroup.org/onlinepubs/9699919799/functions/getrlimit.html
posixRlimits = []string{
"RLIMIT_AS",
"RLIMIT_CORE",
"RLIMIT_CPU",
"RLIMIT_DATA",
"RLIMIT_FSIZE",
"RLIMIT_NOFILE",
"RLIMIT_STACK",
}
// https://git.kernel.org/pub/scm/docs/man-pages/man-pages.git/tree/man2/getrlimit.2?h=man-pages-4.13
linuxRlimits = append(posixRlimits, []string{
"RLIMIT_MEMLOCK",
"RLIMIT_MSGQUEUE",
"RLIMIT_NICE",
"RLIMIT_NPROC",
"RLIMIT_RSS",
"RLIMIT_RTPRIO",
"RLIMIT_RTTIME",
"RLIMIT_SIGPENDING",
}...)
configSchemaTemplate = "https://raw.githubusercontent.com/opencontainers/runtime-spec/v%s/schema/config-schema.json"
)
// Validator represents a validator for runtime bundle
type Validator struct {
spec *rspec.Spec
bundlePath string
HostSpecific bool
platform string
}
// NewValidator creates a Validator
func NewValidator(spec *rspec.Spec, bundlePath string, hostSpecific bool, platform string) (Validator, error) {
if hostSpecific && platform != runtime.GOOS {
return Validator{}, fmt.Errorf("When hostSpecific is set, platform must be same as the host platform")
}
return Validator{
spec: spec,
bundlePath: bundlePath,
HostSpecific: hostSpecific,
platform: platform,
}, nil
}
// NewValidatorFromPath creates a Validator with specified bundle path
func NewValidatorFromPath(bundlePath string, hostSpecific bool, platform string) (Validator, error) {
if bundlePath == "" {
return Validator{}, fmt.Errorf("bundle path shouldn't be empty")
}
if _, err := os.Stat(bundlePath); err != nil {
return Validator{}, err
}
configPath := filepath.Join(bundlePath, specConfig)
content, err := os.ReadFile(configPath)
if err != nil {
return Validator{}, specerror.NewError(specerror.ConfigInRootBundleDir, err, rspec.Version)
}
if !utf8.Valid(content) {
return Validator{}, fmt.Errorf("%q is not encoded in UTF-8", configPath)
}
var spec rspec.Spec
if err = json.Unmarshal(content, &spec); err != nil {
return Validator{}, err
}
return NewValidator(&spec, bundlePath, hostSpecific, platform)
}
// CheckAll checks all parts of runtime bundle
func (v *Validator) CheckAll() error {
var errs *multierror.Error
errs = multierror.Append(errs, v.CheckJSONSchema())
errs = multierror.Append(errs, v.CheckPlatform())
errs = multierror.Append(errs, v.CheckRoot())
errs = multierror.Append(errs, v.CheckMandatoryFields())
errs = multierror.Append(errs, v.CheckSemVer())
errs = multierror.Append(errs, v.CheckMounts())
errs = multierror.Append(errs, v.CheckProcess())
errs = multierror.Append(errs, v.CheckLinux())
errs = multierror.Append(errs, v.CheckAnnotations())
if v.platform == "linux" || v.platform == "solaris" {
errs = multierror.Append(errs, v.CheckHooks())
}
return errs.ErrorOrNil()
}
// JSONSchemaURL returns the URL for the JSON Schema specifying the
// configuration format. It consumes configSchemaTemplate, but we
// provide it as a function to isolate consumers from inconsistent
// naming as runtime-spec evolves.
func JSONSchemaURL(version string) (url string, err error) {
ver, err := semver.Parse(version)
if err != nil {
return "", specerror.NewError(specerror.SpecVersionInSemVer, err, rspec.Version)
}
if ver.LT(semver.Version{Major: 1, Minor: 0, Patch: 2}) {
return "", errors.New("unsupported configuration version (older than 1.0.2)")
}
return fmt.Sprintf(configSchemaTemplate, version), nil
}
// CheckJSONSchema validates the configuration against the
// runtime-spec JSON Schema, using the version of the schema that
// matches the configuration's declared version.
func (v *Validator) CheckJSONSchema() (errs error) {
logrus.Debugf("check JSON schema")
url, err := JSONSchemaURL(strings.TrimSuffix(v.spec.Version, "-dev"))
if err != nil {
errs = multierror.Append(errs, err)
return errs
}
schemaLoader := gojsonschema.NewReferenceLoader(url)
documentLoader := gojsonschema.NewGoLoader(v.spec)
result, err := gojsonschema.Validate(schemaLoader, documentLoader)
if err != nil {
errs = multierror.Append(errs, err)
return errs
}
if !result.Valid() {
for _, resultError := range result.Errors() {
errs = multierror.Append(errs, errors.New(resultError.String()))
}
}
return errs
}
// CheckRoot checks status of v.spec.Root
func (v *Validator) CheckRoot() (errs error) {
logrus.Debugf("check root")
if v.platform == "windows" {
if v.spec.Windows != nil && v.spec.Windows.HyperV != nil {
if v.spec.Root != nil {
errs = multierror.Append(errs,
specerror.NewError(specerror.RootOnHyperVNotSet, fmt.Errorf("for Hyper-V containers, Root must not be set"), rspec.Version))
}
return
} else if v.spec.Root == nil {
errs = multierror.Append(errs,
specerror.NewError(specerror.RootOnWindowsRequired, fmt.Errorf("on Windows, for Windows Server Containers, Root is REQUIRED"), rspec.Version))
return
}
} else if v.spec.Root == nil {
errs = multierror.Append(errs,
specerror.NewError(specerror.RootOnNonWindowsRequired, fmt.Errorf("on all other platforms, Root is REQUIRED"), rspec.Version))
return
}
if v.platform == "windows" {
matched, err := regexp.MatchString(`\\\\[?]\\Volume[{][a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}[}]\\`, v.spec.Root.Path)
if err != nil {
errs = multierror.Append(errs, err)
} else if !matched {
errs = multierror.Append(errs,
specerror.NewError(specerror.RootPathOnWindowsGUID, fmt.Errorf("root.path is %q, but it MUST be a volume GUID path when target platform is windows", v.spec.Root.Path), rspec.Version))
}
if v.spec.Root.Readonly {
errs = multierror.Append(errs,
specerror.NewError(specerror.RootReadonlyOnWindowsFalse, fmt.Errorf("root.readonly field MUST be omitted or false when target platform is windows"), rspec.Version))
}
return
}
absBundlePath, err := filepath.Abs(v.bundlePath)
if err != nil {
errs = multierror.Append(errs, fmt.Errorf("unable to convert %q to an absolute path", v.bundlePath))
return
}
if filepath.Base(v.spec.Root.Path) != "rootfs" {
errs = multierror.Append(errs,
specerror.NewError(specerror.RootPathOnPosixConvention, fmt.Errorf("path name should be the conventional 'rootfs'"), rspec.Version))
}
var rootfsPath string
var absRootPath string
if filepath.IsAbs(v.spec.Root.Path) {
rootfsPath = v.spec.Root.Path
absRootPath = filepath.Clean(rootfsPath)
} else {
var err error
rootfsPath = filepath.Join(v.bundlePath, v.spec.Root.Path)
absRootPath, err = filepath.Abs(rootfsPath)
if err != nil {
errs = multierror.Append(errs, fmt.Errorf("unable to convert %q to an absolute path", rootfsPath))
return
}
}
if fi, err := os.Stat(rootfsPath); err != nil {
errs = multierror.Append(errs,
specerror.NewError(specerror.RootPathExist, fmt.Errorf("cannot find the root path %q", rootfsPath), rspec.Version))
} else if !fi.IsDir() {
errs = multierror.Append(errs,
specerror.NewError(specerror.RootPathExist, fmt.Errorf("root.path %q is not a directory", rootfsPath), rspec.Version))
}
rootParent := filepath.Dir(absRootPath)
if absRootPath == string(filepath.Separator) || rootParent != absBundlePath {
errs = multierror.Append(errs,
specerror.NewError(specerror.ArtifactsInSingleDir, fmt.Errorf("root.path is %q, but it MUST be a child of %q", v.spec.Root.Path, absBundlePath), rspec.Version))
}
return
}
// CheckSemVer checks v.spec.Version
func (v *Validator) CheckSemVer() (errs error) {
logrus.Debugf("check semver")
version := v.spec.Version
_, err := semver.Parse(version)
if err != nil {
errs = multierror.Append(errs,
specerror.NewError(specerror.SpecVersionInSemVer, fmt.Errorf("%q is not valid SemVer: %s", version, err.Error()), rspec.Version))
}
if version != rspec.Version {
errs = multierror.Append(errs, fmt.Errorf("validate currently only handles version %s, but the supplied configuration targets %s", rspec.Version, version))
}
return
}
// CheckHooks check v.spec.Hooks
func (v *Validator) CheckHooks() (errs error) {
logrus.Debugf("check hooks")
if v.platform != "linux" && v.platform != "solaris" {
errs = multierror.Append(errs, fmt.Errorf("For %q platform, the configuration structure does not support hooks", v.platform))
return
}
if v.spec.Hooks != nil {
errs = multierror.Append(errs, v.checkEventHooks("prestart", v.spec.Hooks.Prestart, v.HostSpecific))
errs = multierror.Append(errs, v.checkEventHooks("poststart", v.spec.Hooks.Poststart, v.HostSpecific))
errs = multierror.Append(errs, v.checkEventHooks("poststop", v.spec.Hooks.Poststop, v.HostSpecific))
}
return
}
func (v *Validator) checkEventHooks(hookType string, hooks []rspec.Hook, hostSpecific bool) (errs error) {
for i, hook := range hooks {
if !osFilepath.IsAbs(v.platform, hook.Path) {
errs = multierror.Append(errs,
specerror.NewError(
specerror.PosixHooksPathAbs,
fmt.Errorf("hooks.%s[%d].path %v: is not absolute path",
hookType, i, hook.Path),
rspec.Version))
}
if hostSpecific {
fi, err := os.Stat(hook.Path)
if err != nil {
errs = multierror.Append(errs, fmt.Errorf("cannot find %s hook: %v", hookType, hook.Path))
}
if fi.Mode()&0111 == 0 {
errs = multierror.Append(errs, fmt.Errorf("the %s hook %v: is not executable", hookType, hook.Path))
}
}
for _, env := range hook.Env {
if !envValid(env) {
errs = multierror.Append(errs, fmt.Errorf("env %q for hook %v is in the invalid form", env, hook.Path))
}
}
}
return
}
// CheckProcess checks v.spec.Process
func (v *Validator) CheckProcess() (errs error) {
logrus.Debugf("check process")
if v.spec.Process == nil {
return
}
process := v.spec.Process
if !osFilepath.IsAbs(v.platform, process.Cwd) {
errs = multierror.Append(errs,
specerror.NewError(
specerror.ProcCwdAbs,
fmt.Errorf("cwd %q is not an absolute path", process.Cwd),
rspec.Version))
}
for _, env := range process.Env {
if !envValid(env) {
errs = multierror.Append(errs, fmt.Errorf("env %q should be in the form of 'key=value'. The left hand side must consist solely of letters, digits, and underscores '_'", env))
}
}
if len(process.Args) == 0 {
errs = multierror.Append(errs,
specerror.NewError(
specerror.ProcArgsOneEntryRequired,
fmt.Errorf("args must not be empty"),
rspec.Version))
} else {
if filepath.IsAbs(process.Args[0]) && v.spec.Root != nil {
var rootfsPath string
if filepath.IsAbs(v.spec.Root.Path) {
rootfsPath = v.spec.Root.Path
} else {
rootfsPath = filepath.Join(v.bundlePath, v.spec.Root.Path)
}
absPath := filepath.Join(rootfsPath, process.Args[0])
fileinfo, err := os.Stat(absPath)
if os.IsNotExist(err) {
logrus.Warnf("executable %q is not available in rootfs currently", process.Args[0])
} else if err != nil {
errs = multierror.Append(errs, err)
} else {
m := fileinfo.Mode()
if m.IsDir() || m&0111 == 0 {
errs = multierror.Append(errs, fmt.Errorf("arg %q is not executable", process.Args[0]))
}
}
}
}
if v.platform == "linux" || v.platform == "solaris" {
errs = multierror.Append(errs, v.CheckRlimits())
}
if v.platform == "linux" {
if v.spec.Process.Capabilities != nil {
errs = multierror.Append(errs, v.CheckCapabilities())
}
if len(process.ApparmorProfile) > 0 {
profilePath := filepath.Join(v.bundlePath, v.spec.Root.Path, "/etc/apparmor.d", process.ApparmorProfile)
_, err := os.Stat(profilePath)
if err != nil {
errs = multierror.Append(errs, err)
}
}
}
return
}
// CheckCapabilities checks v.spec.Process.Capabilities
func (v *Validator) CheckCapabilities() (errs error) {
if v.platform != "linux" {
errs = multierror.Append(errs, fmt.Errorf("For %q platform, the configuration structure does not support process.capabilities", v.platform))
return
}
process := v.spec.Process
var effective, permitted, inheritable, ambient bool
caps := make(map[string][]string)
for _, cap := range process.Capabilities.Bounding {
caps[cap] = append(caps[cap], "bounding")
}
for _, cap := range process.Capabilities.Effective {
caps[cap] = append(caps[cap], "effective")
}
for _, cap := range process.Capabilities.Inheritable {
caps[cap] = append(caps[cap], "inheritable")
}
for _, cap := range process.Capabilities.Permitted {
caps[cap] = append(caps[cap], "permitted")
}
for _, cap := range process.Capabilities.Ambient {
caps[cap] = append(caps[cap], "ambient")
}
for capability, owns := range caps {
if err := CapValid(capability, v.HostSpecific); err != nil {
errs = multierror.Append(errs, fmt.Errorf("capability %q is not valid, man capabilities(7)", capability))
}
effective, permitted, ambient, inheritable = false, false, false, false
for _, set := range owns {
if set == "effective" {
effective = true
continue
}
if set == "inheritable" {
inheritable = true
continue
}
if set == "permitted" {
permitted = true
continue
}
if set == "ambient" {
ambient = true
continue
}
}
if effective && !permitted {
errs = multierror.Append(errs, fmt.Errorf("effective capability %q is not allowed, as it's not permitted", capability))
}
if ambient && !(permitted && inheritable) {
errs = multierror.Append(errs, fmt.Errorf("ambient capability %q is not allowed, as it's not permitted and inheribate", capability))
}
}
return
}
// CheckRlimits checks v.spec.Process.Rlimits
func (v *Validator) CheckRlimits() (errs error) {
if v.platform != "linux" && v.platform != "solaris" {
errs = multierror.Append(errs, fmt.Errorf("For %q platform, the configuration structure does not support process.rlimits", v.platform))
return
}
process := v.spec.Process
for index, rlimit := range process.Rlimits {
for i := index + 1; i < len(process.Rlimits); i++ {
if process.Rlimits[index].Type == process.Rlimits[i].Type {
errs = multierror.Append(errs,
specerror.NewError(
specerror.PosixProcRlimitsErrorOnDup,
fmt.Errorf("rlimit can not contain the same type %q",
process.Rlimits[index].Type),
rspec.Version))
}
}
errs = multierror.Append(errs, v.rlimitValid(rlimit))
}
return
}
func supportedMountTypes(OS string, hostSpecific bool) (map[string]bool, error) {
supportedTypes := make(map[string]bool)
if OS != "linux" && OS != "windows" {
logrus.Warnf("%v is not supported to check mount type", OS)
return nil, nil
} else if OS == "windows" {
supportedTypes["ntfs"] = true
return supportedTypes, nil
}
if hostSpecific {
f, err := os.Open("/proc/filesystems")
if err != nil {
return nil, err
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
if err := s.Err(); err != nil {
return supportedTypes, err
}
text := s.Text()
parts := strings.Split(text, "\t")
if len(parts) > 1 {
supportedTypes[parts[1]] = true
} else {
supportedTypes[parts[0]] = true
}
}
supportedTypes["bind"] = true
return supportedTypes, nil
}
logrus.Warn("Checking linux mount types without --host-specific is not supported yet")
return nil, nil
}
// CheckMounts checks v.spec.Mounts
func (v *Validator) CheckMounts() (errs error) {
logrus.Debugf("check mounts")
supportedTypes, err := supportedMountTypes(v.platform, v.HostSpecific)
if err != nil {
errs = multierror.Append(errs, err)
return
}
for i, mountA := range v.spec.Mounts {
if supportedTypes != nil && !supportedTypes[mountA.Type] {
errs = multierror.Append(errs, fmt.Errorf("unsupported mount type %q", mountA.Type))
}
if !osFilepath.IsAbs(v.platform, mountA.Destination) {
errs = multierror.Append(errs,
specerror.NewError(
specerror.MountsDestAbs,
fmt.Errorf("mounts[%d].destination %q is not absolute",
i,
mountA.Destination),
rspec.Version))
}
for j, mountB := range v.spec.Mounts {
if i == j {
continue
}
// whether B.Desination is nested within A.Destination
nested, err := osFilepath.IsAncestor(v.platform, mountA.Destination, mountB.Destination, ".")
if err != nil {
errs = multierror.Append(errs, err)
continue
}
if nested {
if v.platform == "windows" && i < j {
errs = multierror.Append(errs,
specerror.NewError(
specerror.MountsDestOnWindowsNotNested,
fmt.Errorf("on Windows, %v nested within %v is forbidden",
mountB.Destination, mountA.Destination),
rspec.Version))
}
if i > j {
logrus.Warnf("%v will be covered by %v", mountB.Destination, mountA.Destination)
}
}
}
}
return
}
// CheckPlatform checks v.platform
func (v *Validator) CheckPlatform() (errs error) {
logrus.Debugf("check platform")
if v.platform != "linux" && v.platform != "solaris" && v.platform != "windows" {
errs = multierror.Append(errs, fmt.Errorf("platform %q is not supported", v.platform))
return
}
if v.HostSpecific && v.platform != runtime.GOOS {
errs = multierror.Append(errs, fmt.Errorf("platform %q differs from the host %q, skipping host-specific checks", v.platform, runtime.GOOS))
v.HostSpecific = false
}
if v.platform == "windows" {
if v.spec.Windows == nil {
errs = multierror.Append(errs,
specerror.NewError(
specerror.PlatformSpecConfOnWindowsSet,
fmt.Errorf("'windows' MUST be set when platform is `windows`"),
rspec.Version))
}
}
return
}
// CheckLinuxResources checks v.spec.Linux.Resources
func (v *Validator) CheckLinuxResources() (errs error) {
logrus.Debugf("check linux resources")
r := v.spec.Linux.Resources
if r.Memory != nil {
if r.Memory.Limit != nil && r.Memory.Swap != nil && uint64(*r.Memory.Limit) > uint64(*r.Memory.Swap) {
errs = multierror.Append(errs, fmt.Errorf("minimum memoryswap should be larger than memory limit"))
}
if r.Memory.Limit != nil && r.Memory.Reservation != nil && uint64(*r.Memory.Reservation) > uint64(*r.Memory.Limit) {
errs = multierror.Append(errs, fmt.Errorf("minimum memory limit should be larger than memory reservation"))
}
}
if r.Network != nil && v.HostSpecific {
var exist bool
interfaces, err := net.Interfaces()
if err != nil {
errs = multierror.Append(errs, err)
return
}
for _, prio := range r.Network.Priorities {
exist = false
for _, ni := range interfaces {
if prio.Name == ni.Name {
exist = true
break
}
}
if !exist {
errs = multierror.Append(errs, fmt.Errorf("interface %s does not exist currently", prio.Name))
}
}
}
for index := 0; index < len(r.Devices); index++ {
switch r.Devices[index].Type {
case "a", "b", "c", "":
default:
errs = multierror.Append(errs, fmt.Errorf("type of devices %s is invalid", r.Devices[index].Type))
}
access := []byte(r.Devices[index].Access)
for i := 0; i < len(access); i++ {
switch access[i] {
case 'r', 'w', 'm':
default:
errs = multierror.Append(errs, fmt.Errorf("access %s is invalid", r.Devices[index].Access))
return
}
}
}
if r.BlockIO != nil && r.BlockIO.WeightDevice != nil {
for i, weightDevice := range r.BlockIO.WeightDevice {
if weightDevice.Weight == nil && weightDevice.LeafWeight == nil {
errs = multierror.Append(errs,
specerror.NewError(
specerror.BlkIOWeightOrLeafWeightExist,
fmt.Errorf("linux.resources.blockIO.weightDevice[%d] specifies neither weight nor leafWeight", i),
rspec.Version))
}
}
}
return
}
// CheckAnnotations checks v.spec.Annotations
func (v *Validator) CheckAnnotations() (errs error) {
logrus.Debugf("check annotations")
reversedDomain := regexp.MustCompile(`^[A-Za-z]{2,6}(\.[A-Za-z0-9-]{1,63})+$`)
for key := range v.spec.Annotations {
if strings.HasPrefix(key, "org.opencontainers") {
errs = multierror.Append(errs,
specerror.NewError(
specerror.AnnotationsKeyReservedNS,
fmt.Errorf("key %q is reserved", key),
rspec.Version))
}
if !reversedDomain.MatchString(key) {
errs = multierror.Append(errs,
specerror.NewError(
specerror.AnnotationsKeyReversedDomain,
fmt.Errorf("key %q SHOULD be named using a reverse domain notation", key),
rspec.Version))
}
}
return
}
// CapValid checks whether a capability is valid
//
// Deprecated: use github.com/opencontainers/runtime-tools/validate/capabilities.CapValid directly.
func CapValid(c string, hostSpecific bool) error {
return capsCheck.CapValid(c, hostSpecific)
}
func envValid(env string) bool {
items := strings.Split(env, "=")
if len(items) < 2 {
return false
}
for i, ch := range strings.TrimSpace(items[0]) {
if !unicode.IsDigit(ch) && !unicode.IsLetter(ch) && ch != '_' {
return false
}
if i == 0 && unicode.IsDigit(ch) {
logrus.Warnf("Env %v: variable name beginning with digit is not recommended.", env)
}
}
return true
}
func (v *Validator) rlimitValid(rlimit rspec.POSIXRlimit) (errs error) {
if rlimit.Hard < rlimit.Soft {
errs = multierror.Append(errs, fmt.Errorf("hard limit of rlimit %s should not be less than soft limit", rlimit.Type))
}
if v.platform == "linux" {
for _, val := range linuxRlimits {
if val == rlimit.Type {
return
}
}
errs = multierror.Append(errs, specerror.NewError(specerror.PosixProcRlimitsTypeValueError, fmt.Errorf("rlimit type %q may not be valid", rlimit.Type), v.spec.Version))
} else if v.platform == "solaris" {
for _, val := range posixRlimits {
if val == rlimit.Type {
return
}
}
errs = multierror.Append(errs, specerror.NewError(specerror.PosixProcRlimitsTypeValueError, fmt.Errorf("rlimit type %q may not be valid", rlimit.Type), v.spec.Version))
} else {
logrus.Warnf("process.rlimits validation not yet implemented for platform %q", v.platform)
}
return
}
func isStruct(t reflect.Type) bool {
return t.Kind() == reflect.Struct
}
func isStructPtr(t reflect.Type) bool {
return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct
}
func checkMandatoryUnit(field reflect.Value, tagField reflect.StructField, parent string) (errs error) {
mandatory := !strings.Contains(tagField.Tag.Get("json"), "omitempty")
switch field.Kind() {
case reflect.Ptr:
if mandatory && field.IsNil() {
errs = multierror.Append(errs, fmt.Errorf("'%s.%s' should not be empty", parent, tagField.Name))
}
case reflect.String:
if mandatory && (field.Len() == 0) {
errs = multierror.Append(errs, fmt.Errorf("'%s.%s' should not be empty", parent, tagField.Name))
}
case reflect.Slice:
if mandatory && (field.IsNil() || field.Len() == 0) {
errs = multierror.Append(errs, fmt.Errorf("'%s.%s' should not be empty", parent, tagField.Name))
return
}
for index := 0; index < field.Len(); index++ {
mValue := field.Index(index)
if mValue.CanInterface() {
errs = multierror.Append(errs, checkMandatory(mValue.Interface()))
}
}
case reflect.Map:
if mandatory && (field.IsNil() || field.Len() == 0) {
errs = multierror.Append(errs, fmt.Errorf("'%s.%s' should not be empty", parent, tagField.Name))
return
}
keys := field.MapKeys()
for index := 0; index < len(keys); index++ {
mValue := field.MapIndex(keys[index])
if mValue.CanInterface() {
errs = multierror.Append(errs, checkMandatory(mValue.Interface()))
}
}
default:
}
return
}
func checkMandatory(obj interface{}) (errs error) {
objT := reflect.TypeOf(obj)
objV := reflect.ValueOf(obj)
if isStructPtr(objT) {
objT = objT.Elem()
objV = objV.Elem()
} else if !isStruct(objT) {
return
}
for i := 0; i < objT.NumField(); i++ {
t := objT.Field(i).Type
if isStructPtr(t) && objV.Field(i).IsNil() {
if !strings.Contains(objT.Field(i).Tag.Get("json"), "omitempty") {
errs = multierror.Append(errs, fmt.Errorf("'%s.%s' should not be empty", objT.Name(), objT.Field(i).Name))
}
} else if (isStruct(t) || isStructPtr(t)) && objV.Field(i).CanInterface() {
errs = multierror.Append(errs, checkMandatory(objV.Field(i).Interface()))
} else {
errs = multierror.Append(errs, checkMandatoryUnit(objV.Field(i), objT.Field(i), objT.Name()))
}
}
return
}
// CheckMandatoryFields checks mandatory field of container's config file
func (v *Validator) CheckMandatoryFields() error {
logrus.Debugf("check mandatory fields")
if v.spec == nil {
return fmt.Errorf("Spec can't be nil")
}
return checkMandatory(v.spec)
}
|
package main
import (
"fmt"
"strings"
)
func main0101() {
//查找一个字符串在另一个字符串中是否出现
str1 := "hello world"
str2 := "g"
//Contains(被查找的字符串,查找的字符串) 返回值 bool
//一般用于模糊查找
b := strings.Contains(str1,str2)
//fmt.Println(b)
if b {
fmt.Println("找到了")
}else {
fmt.Println("没有找到")
}
}
func main0102() {
//字符串切片
slice := []string{"123","456","789"}
//fmt.Println(slice)
//Join
//字符串的连接
str := strings.Join(slice,"")
fmt.Println(str)
//fmt.Printf("%T\n",str)
}
func main0103() {
str1 := "hello world"
str2 := "e"
//查找一个字符串在另一个字符串中第一次出现的位置 返回值 int 下标 -1 找不到
i := strings.Index(str1,str2)
fmt.Println(i)
}
func main0104() {
str := "性感网友,在线取名。"
//将一个字符串重复n次
str1 := strings.Repeat(str,100)
fmt.Println(str1)
}
func main0105() {
str := "性感网友在线取名性感性感性感性感性感"
//字符串替换 屏蔽敏感词汇
//如果替换次数小于0 表示全部替换
str1 := strings.Replace(str,"性感","**",-1)
fmt.Println(str1)
}
func main0106() {
//str1 := "1300-188-1999"
//将一个字符串按照标志位进行切割变成切片
str1 := "123456789@qq.com"
slice := strings.Split(str1,"@")
fmt.Println(slice[0])
}
func main0107() {
str := "====a===u=ok===="
//去掉字符串头尾的内容
str1:= strings.Trim(str,"=")
fmt.Println(str1)
}
func main() {
str := " are you ok "
//去除字符串中空格 转成切片 一般用于统计单词个数
slice := strings.Fields(str)
fmt.Println(slice)
}
//总结
//查找
//1.bool类型 := strings.Contains(被查找字符串,查找字符串)
//2.int类型 := strings.Index(被查找字符串,查找字符串)
//分割
//[]string类型 := strings.Spilt(切割字符串,标志)
//组合
//string类型 := strings.Join(字符串切片,标志)
//重复
//string类型 := strings.Repeat(字符串,次数)
//替换
//string类型 := strings.Replace(字符串,被替换字符串,替换字符串,次数)
//去掉内容
//string类型 := strings.Trim(字符串,去掉字符串)
//[]string类型 := strings.Fields(字符串) |
package database
import (
"context"
"fmt"
"os"
"time"
"github.com/joho/godotenv"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/mongo/readpref"
)
func GetDatabase() (*mongo.Database, *mongo.Client) {
godotenv.Load()
uri := fmt.Sprintf("mongodb+srv://%s:%s@teste.sy7ap.mongodb.net/Teste?retryWrites=true&w=majority",
os.Getenv("MONGO_USER"), os.Getenv("MONGO_PASSWORD"))
fmt.Print(uri)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
client, err := mongo.Connect(ctx, options.Client().ApplyURI(uri))
if err != nil {
panic(err)
}
if err := client.Ping(ctx, readpref.Primary()); err != nil {
panic(err)
}
return client.Database("teste"), client
}
|
package service
import (
"mobingi/ocean/pkg/tools/machine"
)
func NewRunControlPlaneJobs(ips []string, etcdServers, advertiseAddress string) ([]*machine.Job, error) {
apiserverJobs, err := NewRunAPIServerJobs(ips, etcdServers, advertiseAddress)
if err != nil {
return nil, err
}
controllerManagerJob, err := NewRunControllerManagerJob()
if err != nil {
return nil, err
}
schedulerJob, err := NewRunSchedulerJob()
if err != nil {
return nil, err
}
for _, v := range apiserverJobs {
v.AddAnother(controllerManagerJob)
v.AddAnother(schedulerJob)
}
return apiserverJobs, nil
}
|
package main
import (
"testing"
"exer10"
)
func BenchmarkFibonacci1(b *testing.B){
for n := 0; n < b.N; n++ {
exer10.Fibonacci(1)
}
}
func BenchmarkFibonacci5(b *testing.B){
for n := 0; n < b.N; n++ {
exer10.Fibonacci(5)
}
}
func BenchmarkFibonacci10(b *testing.B){
for n := 0; n < b.N; n++ {
exer10.Fibonacci(10)
}
}
func BenchmarkFibonacci20(b *testing.B){
for n := 0; n < b.N; n++ {
exer10.Fibonacci(20)
}
}
func BenchmarkFibonacci25(b *testing.B){
for n := 0; n < b.N; n++ {
exer10.Fibonacci(25)
}
}
func BenchmarkFibonacci30(b *testing.B){
for n := 0; n < b.N; n++ {
exer10.Fibonacci(30)
}
}
func BenchmarkFib30cutoff01(b *testing.B){
for n := 0; n < b.N; n++ {
exer10.Fib(30,1)
}
}
func BenchmarkFib30cutoff05(b *testing.B){
for n := 0; n < b.N; n++ {
exer10.Fib(30,5)
}
}
func BenchmarkFib30cutoff10(b *testing.B){
for n := 0; n < b.N; n++ {
exer10.Fib(30,10)
}
}
func BenchmarkFib30cutoff12(b *testing.B){
for n := 0; n < b.N; n++ {
exer10.Fib(30,12)
}
}
func BenchmarkFib30cutoff14(b *testing.B){
for n := 0; n < b.N; n++ {
exer10.Fib(30,14)
}
}
func BenchmarkFib30cutoff15(b *testing.B){
for n := 0; n < b.N; n++ {
exer10.Fib(30,15)
}
}
func BenchmarkFib30cutoff16(b *testing.B){
for n := 0; n < b.N; n++ {
exer10.Fib(30,16)
}
}
func BenchmarkFib30cutoff17(b *testing.B){
for n := 0; n < b.N; n++ {
exer10.Fib(30,17)
}
}
func BenchmarkFib30cutoff19(b *testing.B){
for n := 0; n < b.N; n++ {
exer10.Fib(30,19)
}
}
func BenchmarkFib30cutoff20(b *testing.B){
for n := 0; n < b.N; n++ {
exer10.Fib(30,20)
}
}
func BenchmarkFib30cutoff21(b *testing.B){
for n := 0; n < b.N; n++ {
exer10.Fib(30,21)
}
}
func BenchmarkFib30cutoff23(b *testing.B){
for n := 0; n < b.N; n++ {
exer10.Fib(30,23)
}
}
func BenchmarkFib30cutoff25(b *testing.B){
for n := 0; n < b.N; n++ {
exer10.Fib(30,25)
}
}
func BenchmarkFib30cutoff30(b *testing.B){
for n := 0; n < b.N; n++ {
exer10.Fib(30,30)
}
}
func BenchmarkFibMemoized(b *testing.B){
for n := 0; n < b.N; n++ {
exer10.FibMemoized(30)
}
}
|
package main
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/socketplane/libovsdb"
"flag"
"github.com/Sirupsen/logrus"
"net/http"
"github.com/joatmon08/ovs_exporter/openvswitch"
)
const (
namespace = "openvswitch" // For Prometheus metrics.
)
var (
up = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "up"),
"Was the last query of Open vSwitch successful.",
nil, nil,
)
dbs = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "dbs_total"),
"How many Open vSwitch dbs on this node.",
nil, nil,
)
bridges = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "bridges_total"),
"How many Open vSwitch bridges on this node.",
nil, nil,
)
interfaces = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "interfaces_total"),
"How many Open vSwitch interfaces on this node.",
nil, nil,
)
ports = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "ports_total"),
"How many Open vSwitch ports on this node.",
nil, nil,
)
bridges_num_ports = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "bridges_ports",
Help: "Number of ports attached to bridges",
},
[]string{"name"},
)
interfaces_stats = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Name: "interfaces_statistics",
Help: "Open vSwitch interface statistics",
},
[]string{"name", "stat"},
)
)
type Exporter struct {
URI string
client *libovsdb.OvsdbClient
up *prometheus.Desc
dbs *prometheus.Desc
bridges *prometheus.Desc
ports *prometheus.Desc
interfaces *prometheus.Desc
bridges_num_ports *prometheus.GaugeVec
interfaces_stats *prometheus.CounterVec
}
func NewExporter(uri string) (*Exporter, error) {
return &Exporter{
URI: uri,
up: up,
dbs: dbs,
client: &libovsdb.OvsdbClient{},
bridges: bridges,
ports: ports,
interfaces: interfaces,
bridges_num_ports: bridges_num_ports,
interfaces_stats: interfaces_stats,
}, nil
}
func (e *Exporter) Describe(ch chan <- *prometheus.Desc) {
ch <- up
ch <- dbs
ch <- bridges
ch <- ports
ch <- interfaces
e.bridges_num_ports.Describe(ch)
e.interfaces_stats.Describe(ch)
}
func (e *Exporter) collectPortsForBridges(rows []map[string]interface{}) {
e.bridges_num_ports.Reset()
bridges, err := openvswitch.ParsePortsFromBridges(rows)
if err != nil {
return
}
for _, bridge := range bridges {
e.bridges_num_ports.WithLabelValues(bridge.Name).Set(float64(len(bridge.Ports)))
}
}
func (e *Exporter) collectInterfacesStats(rows []map[string]interface{}) {
e.interfaces_stats.Reset()
interfaces, err := openvswitch.ParseStatisticsFromInterfaces(rows)
if err != nil {
return
}
for _, iface := range interfaces {
for stat_name, num := range iface.Statistics {
e.interfaces_stats.WithLabelValues(iface.Name, stat_name).Add(num)
e.interfaces_stats.WithLabelValues(iface.Name, stat_name).Add(num)
}
}
}
func (e *Exporter) connect() error {
network, err := openvswitch.GenerateNetworkAndHealthCheck(e.URI)
if err != nil {
return err
}
e.client, err = libovsdb.ConnectUsingProtocol(network, e.URI)
if err != nil {
logrus.WithFields(logrus.Fields{
"event": "cannot connect to ovsdb",
}).Error(err)
}
return err
}
func (e *Exporter) Collect(ch chan <- prometheus.Metric) {
if err := e.connect(); err != nil {
ch <- prometheus.MustNewConstMetric(
up, prometheus.GaugeValue, 0,
)
return
}
defer e.client.Disconnect()
ch <- prometheus.MustNewConstMetric(
up, prometheus.GaugeValue, 1,
)
databases, err := openvswitch.GetDatabases(e.client)
if err != nil {
logrus.Error(err)
}
ch <- prometheus.MustNewConstMetric(
dbs, prometheus.GaugeValue, float64(len(databases)),
)
total_bridges := openvswitch.GetRowsFromTable(e.client, "Bridge")
ch <- prometheus.MustNewConstMetric(
bridges, prometheus.GaugeValue, float64(len(total_bridges)),
)
total_ports := openvswitch.GetRowsFromTable(e.client, "Port")
ch <- prometheus.MustNewConstMetric(
ports, prometheus.GaugeValue, float64(len(total_ports)),
)
total_interfaces := openvswitch.GetRowsFromTable(e.client, "Interface")
ch <- prometheus.MustNewConstMetric(
interfaces, prometheus.GaugeValue, float64(len(total_interfaces)),
)
e.collectPortsForBridges(total_bridges)
e.bridges_num_ports.Collect(ch)
e.collectInterfacesStats(total_interfaces)
e.interfaces_stats.Collect(ch)
}
func init() {
formatter := &logrus.TextFormatter{
FullTimestamp: true,
}
logrus.SetFormatter(formatter)
logrus.SetLevel(logrus.InfoLevel)
}
func main() {
var (
uri = flag.String("uri", "/var/run/openvswitch/db.sock", "URI to connect to Open vSwitch")
listenAddress = flag.String("listen-port", ":9177", "Address to listen on for web interface and telemetry.")
metricsPath = flag.String("metrics-path", "/metrics", "Path under which to expose metrics.")
)
flag.Parse()
exporter, err := NewExporter(*uri)
if err != nil {
logrus.WithFields(logrus.Fields{
"uri": *uri,
"event": "starting exporter",
}).Fatal(err)
}
prometheus.MustRegister(exporter)
http.Handle(*metricsPath, prometheus.Handler())
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(`<html>
<head><title>Open vSwitch Exporter</title></head>
<body>
<h1>Open vSwitch Exporter</h1>
<p><a href='` + *metricsPath + `'>Metrics</a></p>
</body>
</html>`))
})
logrus.WithFields(logrus.Fields{
"port": *listenAddress,
"path": *metricsPath,
"event": "listening",
}).Info("prometheus started")
logrus.WithFields(logrus.Fields{
"port": *listenAddress,
"path": *metricsPath,
"event": "web server error",
}).Fatal(http.ListenAndServe(*listenAddress, nil))
} |
// Copyright 2021 BoCloud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package agent
import (
"flag"
"os"
"github.com/fsnotify/fsnotify"
"k8s.io/klog/v2"
"k8s.io/klog/v2/klogr"
"github.com/fabedge/fabedge/pkg/common/about"
)
func Execute() error {
klog.InitFlags(nil)
// init klog level
_ = flag.Set("v", "3")
flag.Parse()
if version {
about.DisplayVersion()
return nil
}
var log = klogr.New().WithName("agent")
defer klog.Flush()
if err := validateFlags(); err != nil {
log.Error(err, "invalid arguments")
return err
}
if err := os.MkdirAll(cniConfDir, 0777); err != nil {
log.Error(err, "failed to create cni conf dir")
return err
}
manager, err := newManager()
if err != nil {
log.Error(err, "failed to create manager")
return err
}
go manager.start()
err = watchFile(tunnelsConfPath, servicesConfPath, func(event fsnotify.Event) {
log.V(5).Info("tunnels or services config may change", "file", event.Name, "event", event.Op.String())
manager.notify()
})
if err != nil {
log.Error(err, "failed to watch tunnelsconf", "file", tunnelsConfPath)
}
return err
}
|
package commands
import (
"flag"
"fmt"
"net/http"
"strconv"
"time"
"github.com/opentable/sous/core"
"github.com/opentable/sous/tools/cli"
"github.com/opentable/sous/tools/docker"
"github.com/opentable/sous/tools/ports"
)
var contractsFlags = flag.NewFlagSet("contracts", flag.ExitOnError)
var timeoutFlag = contractsFlags.Duration("timeout", 10*time.Second, "per-contract timeout")
var dockerImage = contractsFlags.String("image", "", "run contracts against a pre-built Docker image")
type Contract struct {
Name string
Desc func(*docker.Run) string
Tips func(*docker.Run) []string
Premise func(*docker.Run) bool
}
var theContracts = []Contract{
{
Name: "Listening on http://TASK_HOST:PORT0",
Desc: func(run *docker.Run) string {
return fmt.Sprintf("Your app should respond to GET http://TASK_HOST:PORT0/ with any HTTP response code")
},
Tips: func(run *docker.Run) []string {
host, port0 := run.Env["TASK_HOST"], run.Env["PORT0"]
return []string{
fmt.Sprintf("TASK_HOST and PORT0 are environment variables set by the docker run command."),
fmt.Sprintf("For this particular run they are set as: TASK_HOST=%s and PORT0=%s", host, port0),
fmt.Sprintf("So your app should be listening on http://%s:%s/", host, port0),
}
},
Premise: func(run *docker.Run) bool {
taskHost := run.Env["TASK_HOST"]
port0 := run.Env["PORT0"]
result, err := http.Get(fmt.Sprintf("http://%s:%d/", taskHost, port0))
return err == nil && result.StatusCode > 0
},
},
{
Name: "Health Endpoint",
Desc: func(run *docker.Run) string {
return "responds to GET /health with HTTP Status Code 200"
},
Premise: func(run *docker.Run) bool {
taskHost := run.Env["TASK_HOST"]
port0 := run.Env["PORT0"]
result, err := http.Get(fmt.Sprintf("http://%s:%d/health", taskHost, port0))
return err == nil && result.StatusCode == 200
},
},
}
func ContractsHelp() string {
return `sous contracts tests your project conforms to necessary contracts to run successfully on the OpenTable Mesos platform.`
}
func Contracts(sous *core.Sous, args []string) {
contractsFlags.Parse(args)
args = contractsFlags.Args()
timeout := *timeoutFlag
targetName := "app"
if len(args) != 0 {
targetName = args[0]
}
core.RequireGit()
core.RequireDocker()
if *dockerImage != "" {
cli.Fatalf("-image flag not yet implemented")
}
target, context := sous.AssembleTargetContext(targetName)
sous.RunTarget(target, context)
cli.Logf("=> Running Contracts")
cli.Logf(`=> **TIP:** Open another terminal in this directory and type **sous logs -f**`)
taskHost := core.DivineTaskHost()
port0, err := ports.GetFreePort()
if err != nil {
cli.Fatalf("Unable to get free port: %s", err)
}
dr := docker.NewRun(context.DockerTag())
dr.AddEnv("PORT0", strconv.Itoa(port0))
dr.AddEnv("TASK_HOST", taskHost)
dr.StdoutFile = context.FilePath("stdout")
dr.StderrFile = context.FilePath("stderr")
container, err := dr.Background().Start()
if err != nil {
cli.Fatalf("Unable to start container: %s", err)
}
cli.AddCleanupTask(func() error {
return container.KillIfRunning()
})
failed := 0
for _, c := range theContracts {
cli.Logf(`===> CHECKING CONTRACT: "%s"`, c.Name)
cli.Logf(`===> Description: %s`, c.Desc(dr))
if c.Tips != nil {
cli.Logf("===> **TIPS for this contract:**")
cli.LogBulletList(" -", c.Tips(dr))
}
failed += within(timeout, func() bool {
return c.Premise(dr)
})
}
if failed != 0 {
cli.Fatalf("%d contracts failed.", failed)
}
cli.Success()
}
func within(d time.Duration, f func() bool) int {
start := time.Now()
end := start.Add(d)
p := cli.BeginProgress("Polling")
for {
if f() {
p.Done("Success!")
return 0
}
if time.Now().After(end) {
break
}
p.Increment()
time.Sleep(time.Second)
}
p.Done("Timeout")
return 1
}
|
package criteria
import (
"github.com/open-policy-agent/opa/ast"
"github.com/pomerium/pomerium/pkg/policy/parser"
)
type httpPathCriterion struct {
g *Generator
}
func (httpPathCriterion) DataType() CriterionDataType {
return CriterionDataTypeStringMatcher
}
func (httpPathCriterion) Name() string {
return "http_path"
}
func (c httpPathCriterion) GenerateRule(_ string, data parser.Value) (*ast.Rule, []*ast.Rule, error) {
var body ast.Body
ref := ast.RefTerm(ast.VarTerm("input"), ast.VarTerm("http"), ast.VarTerm("path"))
err := matchString(&body, ref, data)
if err != nil {
return nil, nil, err
}
rule := NewCriterionRule(c.g, c.Name(),
ReasonHTTPPathOK, ReasonHTTPPathUnauthorized,
body)
return rule, nil, nil
}
// HTTPPath returns a Criterion which matches an HTTP path.
func HTTPPath(generator *Generator) Criterion {
return httpPathCriterion{g: generator}
}
func init() {
Register(HTTPPath)
}
|
/**
This exercise will reinforce our understanding of method sets:
create a type person struct
- attach a method speak to type person using a pointer receiver
*person
create a type human interface
- to implicitly implement the interface, a human must have the speak method
create func “saySomething”
- have it take in a human as a parameter
- have it call the speak method
show the following in your code
- you CAN pass a value of type *person into saySomething
- you CANNOT pass a value of type person into saySomething
*/
package main
import (
"fmt"
)
type person struct {
name string
age int
}
func (p *person) speak() {
fmt.Println("Person SPEAK()")
}
type human interface {
speak()
}
func saySomething(h human) {
h.speak()
}
func main() {
p := person {
name: "Amit Cool",
age: 25,
}
saySomething(&p)
// saySomething(p) -> will not work as speak is a pointer receiver
}
|
package di
import (
"github.com/golobby/container"
"github.com/profiralex/go-bootstrap-redis/pkg/config"
)
func RegisterDependencies() {
//Config
container.Singleton(func() config.Config {
return config.GetConfig()
})
}
func UnregisterDependencies() {
container.Reset()
}
func Make(receiver interface{}) {
container.Make(receiver)
}
func MakeAll(receivers ...interface{}) {
for _, receiver := range receivers {
Make(receiver)
}
}
|
package eventdata
import (
"bytes"
"encoding/json"
"io/ioutil"
"net/http"
"os"
"strconv"
"time"
"github.com/elastic/beats/libbeat/common"
"github.com/elastic/beats/libbeat/common/cfgwarn"
"github.com/elastic/beats/metricbeat/mb"
)
// init registers the MetricSet with the central registry as soon as the program
// starts. The New function will be called later to instantiate an instance of
// the MetricSet for each host defined in the module's configuration. After the
// MetricSet has been created then Fetch will begin to be called periodically.
func init() {
mb.Registry.MustAddMetricSet("eventdata", "eventdata", New)
}
// MetricSet holds any configuration or state information. It must implement
// the mb.MetricSet interface. And this is best achieved by embedding
// mb.BaseMetricSet because it implements all of the required mb.MetricSet
// interface methods except for Fetch.
type MetricSet struct {
mb.BaseMetricSet
config struct {
// EventsUrl string `config:"events url"`
Period time.Duration `config:"period"`
Username string `config:"username"`
Password string `config:"password"`
ProxyUrl string `config:"proxy url"`
MessageSeverities []int `config:"message severities"`
}
}
type TraverseData struct {
Success bool `json:"success"`
ErrorCode interface{} `json:"errorCode"`
ErrorMessage interface{} `json:"errorMessage"`
Timestamp int64 `json:"timestamp"`
TimestampStr string `json:"timestampStr"`
Result struct {
Messages []struct {
AccountName string `json:"accountName"`
AccountSerialNumber int `json:"accountSerialNumber"`
CanDelete bool `json:"canDelete"`
Cleared bool `json:"cleared"`
DgeName string `json:"dgeName"`
DgeSerialNumber int `json:"dgeSerialNumber"`
EventCount int `json:"eventCount"`
EventID int `json:"eventId"`
EventSource string `json:"eventSource"`
ExportedDevice bool `json:"exportedDevice"`
LatestEventTime int64 `json:"latestEventTime"`
LatestEventTimeStr string `json:"latestEventTimeStr"`
LocationName string `json:"locationName"`
OldestEventTime int64 `json:"oldestEventTime"`
OldestEventTimeStr string `json:"oldestEventTimeStr"`
Severity int `json:"severity"`
SeverityDesc string `json:"severityDesc"`
TestSerialNumber int `json:"testSerialNumber"`
TransformedMessage string `json:"transformedMessage"`
LastTestResult bool `json:"lastTestResult"`
DeviceSerialNumber int `json:"deviceSerialNumber"`
DeviceName string `json:"deviceName"`
DeviceAddress string `json:"deviceAddress"`
} `json:"messages"`
AckCount int `json:"ackCount"`
Paging struct {
Total int `json:"total"`
} `json:"paging"`
SeverityCounts []struct {
Severity int `json:"severity"`
Count int `json:"count"`
} `json:"severityCounts"`
TotalMessageCount int `json:"totalMessageCount"`
} `json:"result"`
}
// New creates a new instance of the MetricSet. New is responsible for unpacking
// any MetricSet specific configuration options if there are any.
func New(base mb.BaseMetricSet) (mb.MetricSet, error) {
cfgwarn.Experimental("The eventdata eventdata metricset is experimental.")
config := struct {
// EventsUrl string `config:"events url"`
Period time.Duration `config:"period"`
Username string `config:"username"`
Password string `config:"password"`
ProxyUrl string `config:"proxy url"`
MessageSeverities []int `config:"message severities"`
}{}
if err := base.Module().UnpackConfig(&config); err != nil {
return nil, err
}
return &MetricSet{
BaseMetricSet: base,
config: config,
}, nil
}
// Fetch methods implements the data gathering and data conversion to the right
// format. It publishes the event which is then forwarded to the output. In case
// of an error set the Error field of mb.Event or simply call report.Error().
func (m *MetricSet) Fetch(report mb.ReporterV2) {
username := m.config.Username
password := m.config.Password
messageSeverities := m.config.MessageSeverities[0:len(m.config.MessageSeverities)]
for i, j := range m.config.MessageSeverities {
messageSeverities[i] = j
}
estimate := len(messageSeverities) * 4
severity := make([]byte, 0, estimate)
for _, n := range messageSeverities {
severity = strconv.AppendInt(severity, int64(n), 10)
severity = append(severity, ',')
}
severity = severity[:len(severity)-1]
// proxyUrl := bt.config.ProxyUrl
// return nil
client := &http.Client{
Timeout: 120 * time.Second,
}
var payload = []byte(`{"username":"` + username + `","password":"` + password + `","startTimeExp":"5-minutes-ago","endTimeExp":"now", "messageSeverities":[` + string(severity) + `]}`)
os.Setenv("HTTP_PROXY", m.config.ProxyUrl)
req, err := http.NewRequest("POST", m.Host(), bytes.NewBuffer(payload))
req.Header.Add("Content-Type", "application/json")
// fmt.Println(bytes.NewBuffer(payload))
resp, err := client.Do(req)
if err != nil {
report.Error(err)
}
// fmt.Println("Response: ", resp)
defer resp.Body.Close()
var data TraverseData
if resp.StatusCode == http.StatusOK {
bodyBytes, err2 := ioutil.ReadAll(resp.Body)
if err2 != nil {
report.Error(err2)
}
// fmt.Println("Body:", bodyBytes)
json.Unmarshal(bodyBytes, &data)
}
// variable for converting date string to time
for _, s := range data.Result.Messages {
if s.LatestEventTime < time.Now().Add(-m.config.Period).Unix()*1000 {
// fmt.Println("Filtered Time: ", time.Unix(s.LatestEventTime/1000, 0))
continue
}
event := mb.Event{
Timestamp: time.Unix(int64(s.LatestEventTime/1000), 0),
RootFields: common.MapStr{
"AccountName": s.AccountName,
"AccountSerialNumber": s.AccountSerialNumber,
"CanDelete": s.CanDelete,
"Cleared": s.Cleared,
"DgeName": s.DgeName,
"DgeSerialNumber": s.DgeSerialNumber,
"EventCount": s.EventCount,
"EventID": s.EventID,
"EventSource": s.EventSource,
"ExportedDevice": s.ExportedDevice,
"LatestEventTimeStr": s.LatestEventTimeStr,
"LocationName": s.LocationName,
"OldestEventTime": s.OldestEventTime,
"OldestEventTimeStr": s.OldestEventTimeStr,
"Severity": s.Severity,
"SeverityDesc": s.SeverityDesc,
"TestSerialNumber": s.TestSerialNumber,
"LastTestResult": s.LastTestResult,
"DeviceSerialNumber": s.DeviceSerialNumber,
"DeviceName": s.DeviceName,
"DeviceAddress": s.DeviceAddress,
"type": "traverseeventsbeat",
"TransformedMessage": s.TransformedMessage,
// "@timestamp": time.Unix(int64(s.LatestEventTime/1000), 0),
},
}
report.Event(event)
// fmt.Println(event)
// fmt.Println("Done")
}
}
|
package main
import (
"github.com/beego/beego/v2/client/orm/migration"
)
// DO NOT MODIFY
type User_20210713_191340 struct {
migration.Migration
}
// DO NOT MODIFY
func init() {
m := &User_20210713_191340{}
m.Created = "20210713_191340"
migration.Register("User_20210713_191340", m)
}
// Run the migrations
func (m *User_20210713_191340) Up() {
// use m.SQL("CREATE TABLE ...") to make schema update
}
// Reverse the migrations
func (m *User_20210713_191340) Down() {
// use m.SQL("DROP TABLE ...") to reverse schema update
}
|
package handlers
import (
"encoding/xml"
"log"
"net/http"
"github.com/matscus/Hamster/Mock/rkk_tomsk/structs"
)
func CreditClaimAcceptHandler(w http.ResponseWriter, r *http.Request) {
var res structs.CreditClaimAcceptResponse
res.ReturnCode = 0
res.InstitutionId = 500058
res.ContractReqId = 441014
res.CardReqId = 1091122
res.CardReqStatus = "На ожидании выдачи"
res.ContractId = 1058716
res.ContractStatus = "Введен"
res.ResourceId = 4086337
res.AccNumber = "40817810810002702546"
res.WorkDay = "1900-01-01T00:00:00"
w.Header().Set("content-type ", "text/xml")
w.WriteHeader(http.StatusOK)
err := xml.NewEncoder(w).Encode(res)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
_, errWrite := w.Write([]byte("{\"Message\":\"" + err.Error() + "\"}"))
if errWrite != nil {
log.Printf("[ERROR] Not Writing to ResponseWriter error %s due: %s", err.Error(), errWrite.Error())
}
}
}
|
package task
import (
"encoding/json"
"tcc_transaction/constant"
"tcc_transaction/global/various"
"tcc_transaction/log"
"tcc_transaction/model"
"tcc_transaction/store/data"
"tcc_transaction/util"
"time"
)
func taskToRetry(needRollbackData []*data.RequestInfo) {
log.Infof("start to retry, data is : %+v", len(needRollbackData))
for _, v := range needRollbackData {
if len(v.SuccessSteps) == 0 {
continue
}
if v.Times >= constant.RetryTimes {
continue
}
runtimeAPI, err := various.GetApiWithURL(v.Url)
if err != nil {
log.Errorf("get api by url of [request_info] failed, please check it. error information is: %s", err)
continue
}
runtimeAPI.RequestInfo = v
if v.Status == constant.RequestInfoStatus2 {
go confirm(runtimeAPI)
} else if v.Status == constant.RequestInfoStatus4 {
go cancel(runtimeAPI)
}
}
}
func confirm(api *model.RuntimeApi) {
var isErr bool
ri := api.RequestInfo
for _, v := range ri.SuccessSteps {
// confirm
cURL := util.URLRewrite(api.UrlPattern, ri.Url, api.Nodes[v.Index].Confirm.Url)
_, err := util.HttpForward(cURL, api.Nodes[v.Index].Confirm.Method, []byte(v.Param), nil, time.Duration(api.Nodes[v.Index].Confirm.Timeout))
if err != nil {
isErr = true
log.Errorf("asynchronous to confirm failed, please check it. error information is: %s", err)
continue
}
various.C.UpdateSuccessStepStatus(api.RequestInfo.Id, v.Id, constant.RequestTypeConfirm)
}
if !isErr {
various.C.Confirm(ri.Id)
} else {
various.C.UpdateRequestInfoTimes(ri.Id)
}
}
func cancel(api *model.RuntimeApi) {
var isErr bool
ri := api.RequestInfo
for _, v := range ri.SuccessSteps {
// cancel
cURL := util.URLRewrite(api.UrlPattern, ri.Url, api.Nodes[v.Index].Cancel.Url)
dt, err := util.HttpForward(cURL, api.Nodes[v.Index].Cancel.Method, []byte(v.Param), nil, time.Duration(api.Nodes[v.Index].Cancel.Timeout))
if err != nil {
isErr = true
log.Errorf("asynchronous to cancel failed, please check it. error information is: %s", err)
continue
}
var rst *util.Response
err = json.Unmarshal(dt, &rst)
if err != nil {
isErr = true
log.Errorf("asynchronous to cancel, the content format of response back is wrong, please check it. error information is: %s", err)
continue
}
if rst.Code != constant.Success {
isErr = true
log.Errorf("asynchronous to cancel, response back content is wrong, please check it. error information is: %s", err)
continue
}
various.C.UpdateSuccessStepStatus(api.RequestInfo.Id, v.Id, constant.RequestTypeCancel)
}
if !isErr {
various.C.UpdateRequestInfoStatus(constant.RequestInfoStatus3, ri.Id)
} else {
various.C.UpdateRequestInfoTimes(ri.Id)
}
}
|
package pipeline
import (
"context"
"encoding/base64"
"fmt"
)
// Encode takes plain text as int
// and returns "string => <base64 string encoding>
// as out
func (w *Worker) Encode(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case val := <-w.in:
w.out <- fmt.Sprintf("%s => %s", val, base64.StdEncoding.EncodeToString([]byte(val)))
}
}
}
|
package bylog
import (
"github.com/hashicorp/go-syslog"
)
type BySysLogger struct {
logger gsyslog.Syslogger
}
func NewSysLogger(fac,name string) (ByLogger,error) {
logger,err:=gsyslog.NewLogger(gsyslog.LOG_DEBUG,fac,name)
if err!=nil{
//fmt.Println("Create Logger failed ",err)
return nil,err
}
return &BySysLogger{
logger:logger,
},nil
}
func (l *BySysLogger) WriteLevel(p int ,data []byte) error {
return l.logger.WriteLevel(gsyslog.Priority(p), data)
}
func (l *BySysLogger) Write(data []byte) (int, error) {
return l.logger.Write(data)
}
func (l *BySysLogger) Close() error {
return l.logger.Close()
}
|
package containers
import (
"github.com/exproletariy/pip-services3-containers-examples/app-aws-lambda-example-go/build"
cproc "github.com/pip-services3-go/pip-services3-aws-go/container"
)
type AppExampleLambdaFunction struct {
cproc.CommandableLambdaFunction
}
func NewAppExampleLambdaFunction() *AppExampleLambdaFunction {
c := AppExampleLambdaFunction{}
c.CommandableLambdaFunction = *cproc.NewCommandableLambdaFunction("app-example", "Example of aws lambda container")
c.AddFactory(build.NewAppExampleServiceFactory())
return &c
}
|
package tracks
type Track struct {
Id uint `json:"id"`
User_id uint `json:"user_id"`
Name string `json:"name"`
Url string `json:"url"`
}
|
// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0.
package summary
import (
"testing"
"time"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
func TestSumDurationInt(t *testing.T) {
fields := []zap.Field{}
logger := func(msg string, fs ...zap.Field) {
fields = append(fields, fs...)
}
col := NewLogCollector(logger)
col.CollectDuration("a", time.Second)
col.CollectDuration("b", time.Second)
col.CollectDuration("b", time.Second)
col.CollectInt("c", 2)
col.CollectInt("c", 2)
col.SetSuccessStatus(true)
col.Summary("foo")
require.Equal(t, 7, len(fields))
assertContains := func(field zap.Field) {
for _, f := range fields {
if f.Key == field.Key {
require.Equal(t, field, f)
return
}
}
t.Error(field, "is not in", fields)
}
assertContains(zap.Duration("a", time.Second))
assertContains(zap.Duration("b", 2*time.Second))
assertContains(zap.Int("c", 4))
}
|
package web_controller
import (
"2021/yunsongcailu/yunsong_server/common"
"2021/yunsongcailu/yunsong_server/param/web_param"
"2021/yunsongcailu/yunsong_server/web/web_model"
"fmt"
"github.com/gin-gonic/gin"
"github.com/gin-gonic/gin/binding"
"math/rand"
"os"
"path/filepath"
"strconv"
"time"
)
// 上传更新头像
func AuthPostPersonalUploadAvatar(ctx *gin.Context) {
//_,avatarFileHeader,err := ctx.Request.FormFile("file")
avatarParamData,_ := ctx.Get("avatarParam")
if avatarParamData == nil {
common.Failed(ctx,"获取上传文件失败")
return
}
avatarParam := avatarParamData.(web_param.PersonalAvatarParams)
avatarFileHeader := avatarParam.File
// 随机数
code := fmt.Sprintf("%06v",rand.New(rand.NewSource(time.Now().UnixNano())).Int31n(1000000))
extString := filepath.Ext(avatarFileHeader.Filename)
fileName := "webUpload/consumer/avatar/avatar" + strconv.FormatInt(time.Now().Unix(),10) + code + extString
filePath := "public/" + fileName
err := ctx.SaveUploadedFile(avatarFileHeader,filePath)
if err != nil {
common.Failed(ctx,"保存文件失败")
return
}
// 如果存在旧文件
oldAvatar := avatarParam.OldAvatar
if oldAvatar != ""{
oldFilePath := "public/" + oldAvatar
_ = os.Remove(oldFilePath)
}
// 更新数据库
webUser,isSet := ctx.Get("webUser")
var userInfo web_model.Consumers
if isSet {
userInfo = webUser.(web_model.Consumers)
}
err = cs.EditConsumerIconById(userInfo.Id,fileName)
if err != nil {
common.Failed(ctx,"更新头像入库失败")
return
}
common.Success(ctx,fileName)
}
// 修改个人信息
func AuthPostPersonalEdit(ctx *gin.Context) {
var consumerEditParam web_param.PersonalEditParam
err := ctx.ShouldBindBodyWith(&consumerEditParam,binding.JSON)
if err != nil {
common.Failed(ctx,"获取用户数据失败")
return
}
consumerInfo := consumerEditParam.Consumer
var consumer web_model.Consumers
consumer.Id = consumerInfo.Id
if consumerEditParam.IsEmail == "false" {
consumer.Email = consumerInfo.Email
}
if consumerEditParam.IsPhone == "false" {
consumer.Phone = consumerInfo.Phone
}
consumer.Gender = consumerInfo.Gender
consumer.NickName = consumerInfo.Nickname
consumer.Desc = consumerInfo.Desc
err = cs.EditConsumerInfoById(consumer)
if err != nil {
common.Failed(ctx,"更新用户信息失败")
return
}
common.Success(ctx,"OK")
return
} |
package misc_test
import (
"testing"
parser "github.com/romshark/llparser"
"github.com/romshark/llparser/misc"
"github.com/stretchr/testify/require"
)
func TestLexerRead(t *testing.T) {
lex := misc.NewLexer(&parser.SourceFile{
Name: "test.txt",
Src: []rune("abc\r\n\t defg,!"),
})
tk1, err := lex.Read()
require.NoError(t, err)
require.NotNil(t, tk1)
require.Equal(t, misc.FrWord, tk1.Kind())
require.Equal(t, "abc", string(tk1.Src()))
require.Equal(t, uint(0), tk1.Begin().Index)
require.Equal(t, uint(1), tk1.Begin().Line)
require.Equal(t, uint(1), tk1.Begin().Column)
require.Equal(t, uint(3), tk1.End().Index)
require.Equal(t, uint(1), tk1.End().Line)
require.Equal(t, uint(4), tk1.End().Column)
tk2, err := lex.Read()
require.NoError(t, err)
require.NotNil(t, tk2)
require.Equal(t, misc.FrSpace, tk2.Kind())
require.Equal(t, "\r\n\t ", string(tk2.Src()))
require.Equal(t, uint(3), tk2.Begin().Index)
require.Equal(t, uint(1), tk2.Begin().Line)
require.Equal(t, uint(4), tk2.Begin().Column)
require.Equal(t, uint(7), tk2.End().Index)
require.Equal(t, uint(2), tk2.End().Line)
require.Equal(t, uint(3), tk2.End().Column)
tk3, err := lex.Read()
require.NoError(t, err)
require.NotNil(t, tk3)
require.Equal(t, misc.FrWord, tk3.Kind())
require.Equal(t, "defg", string(tk3.Src()))
require.Equal(t, uint(7), tk3.Begin().Index)
require.Equal(t, uint(2), tk3.Begin().Line)
require.Equal(t, uint(3), tk3.Begin().Column)
require.Equal(t, uint(11), tk3.End().Index)
require.Equal(t, uint(2), tk3.End().Line)
require.Equal(t, uint(7), tk3.End().Column)
tk4, err := lex.Read()
require.NoError(t, err)
require.NotNil(t, tk4)
require.Equal(t, misc.FrSign, tk4.Kind())
require.Equal(t, ",", string(tk4.Src()))
require.Equal(t, uint(11), tk4.Begin().Index)
require.Equal(t, uint(2), tk4.Begin().Line)
require.Equal(t, uint(7), tk4.Begin().Column)
require.Equal(t, uint(12), tk4.End().Index)
require.Equal(t, uint(2), tk4.End().Line)
require.Equal(t, uint(8), tk4.End().Column)
tk5, err := lex.Read()
require.NoError(t, err)
require.NotNil(t, tk5)
require.Equal(t, misc.FrSign, tk5.Kind())
require.Equal(t, "!", string(tk5.Src()))
require.Equal(t, uint(12), tk5.Begin().Index)
require.Equal(t, uint(2), tk5.Begin().Line)
require.Equal(t, uint(8), tk5.Begin().Column)
require.Equal(t, uint(13), tk5.End().Index)
require.Equal(t, uint(2), tk5.End().Line)
require.Equal(t, uint(9), tk5.End().Column)
tk6, err := lex.Read()
require.NoError(t, err)
require.Nil(t, tk6)
}
func TestLexerReadExact(t *testing.T) {
lex := misc.NewLexer(&parser.SourceFile{
Name: "test.txt",
Src: []rune("abc\r\n\t defg,!"),
})
tk1, match, err := lex.ReadExact([]rune("abc\r\n\t defg,!"), 1002)
require.NoError(t, err)
require.NotNil(t, tk1)
require.True(t, match)
require.Equal(t, parser.FragmentKind(1002), tk1.Kind())
require.Equal(t, "abc\r\n\t defg,!", string(tk1.Src()))
require.Equal(t, uint(0), tk1.Begin().Index)
require.Equal(t, uint(1), tk1.Begin().Line)
require.Equal(t, uint(1), tk1.Begin().Column)
require.Equal(t, uint(13), tk1.End().Index)
require.Equal(t, uint(2), tk1.End().Line)
require.Equal(t, uint(9), tk1.End().Column)
tk6, err := lex.Read()
require.NoError(t, err)
require.Nil(t, tk6)
}
func TestLexerReadExactNoMatch(t *testing.T) {
lex := misc.NewLexer(&parser.SourceFile{
Name: "test.txt",
Src: []rune("abc\r\n\t defg,!"),
})
tk1, match1, err1 := lex.ReadExact([]rune("ac"), 1002)
require.NoError(t, err1)
require.NotNil(t, tk1)
require.False(t, match1)
require.Equal(t, parser.FragmentKind(1002), tk1.Kind())
require.Equal(t, "ab", string(tk1.Src()))
require.Equal(t, uint(0), tk1.Begin().Index)
require.Equal(t, uint(1), tk1.Begin().Line)
require.Equal(t, uint(1), tk1.Begin().Column)
require.Equal(t, uint(2), tk1.End().Index)
require.Equal(t, uint(1), tk1.End().Line)
require.Equal(t, uint(3), tk1.End().Column)
tk2, match2, err2 := lex.ReadExact([]rune("c"), 1003)
require.NoError(t, err2)
require.NotNil(t, tk2)
require.True(t, match2)
require.Equal(t, parser.FragmentKind(1003), tk2.Kind())
require.Equal(t, "c", string(tk2.Src()))
require.Equal(t, uint(2), tk2.Begin().Index)
require.Equal(t, uint(1), tk2.Begin().Line)
require.Equal(t, uint(3), tk2.Begin().Column)
require.Equal(t, uint(3), tk2.End().Index)
require.Equal(t, uint(1), tk2.End().Line)
require.Equal(t, uint(4), tk2.End().Column)
}
// TestLexerReadUntil tests all ReadUntil cases
func TestLexerReadUntil(t *testing.T) {
// MatchAll tests matching any input character
t.Run("MatchAll", func(t *testing.T) {
lex := misc.NewLexer(&parser.SourceFile{
Name: "test.txt",
Src: []rune("abc\r\n\t defg,!"),
})
tk1, err := lex.ReadUntil(func(parser.Cursor) uint { return 1 }, 1002)
require.NoError(t, err)
require.NotNil(t, tk1)
require.Equal(t, parser.FragmentKind(1002), tk1.Kind())
require.Equal(t, "abc\r\n\t defg,!", string(tk1.Src()))
require.Equal(t, uint(0), tk1.Begin().Index)
require.Equal(t, uint(1), tk1.Begin().Line)
require.Equal(t, uint(1), tk1.Begin().Column)
require.Equal(t, uint(13), tk1.End().Index)
require.Equal(t, uint(2), tk1.End().Line)
require.Equal(t, uint(9), tk1.End().Column)
tk6, err := lex.Read()
require.NoError(t, err)
require.Nil(t, tk6)
})
// SplitCRLF tests whether CRLF sequences are splitted. The lexer is
// expected to skip CRLF sequences as a whole
t.Run("SplitCRLF", func(t *testing.T) {
lex := misc.NewLexer(&parser.SourceFile{
Name: "test.txt",
Src: []rune("a\r\nbc"),
})
until := func(crs parser.Cursor) uint {
if crs.File.Src[crs.Index] == '\n' {
// This should only be matched in the second case
// where there's no carriage-return character in front
// of the line-feed
return 0
}
return 1
}
tk1, err := lex.ReadUntil(until, 1002)
// Read head
require.NoError(t, err)
require.NotNil(t, tk1)
require.Equal(t, parser.FragmentKind(1002), tk1.Kind())
require.Equal(t, "a\r\nbc", string(tk1.Src()))
require.Equal(t, uint(0), tk1.Begin().Index)
require.Equal(t, uint(1), tk1.Begin().Line)
require.Equal(t, uint(1), tk1.Begin().Column)
require.Equal(t, uint(5), tk1.End().Index)
require.Equal(t, uint(2), tk1.End().Line)
require.Equal(t, uint(3), tk1.End().Column)
// Read EOF
tk2, err := lex.Read()
require.NoError(t, err)
require.Nil(t, tk2)
})
// SkipMultiple tests returning >1 offset returns
t.Run("SkipMultiple", func(t *testing.T) {
lex := misc.NewLexer(&parser.SourceFile{
Name: "test.txt",
Src: []rune("abc\ndef"),
})
tk1, err := lex.ReadUntil(
func(crs parser.Cursor) uint {
if crs.File.Src[crs.Index] == 'c' {
// This condition should never be met because the second
// will be matched first which will cause the lexer
// to skip 'c'
return 0
}
if crs.File.Src[crs.Index] == 'b' {
return 2
}
return 1
},
1002,
)
require.NoError(t, err)
require.NotNil(t, tk1)
require.Equal(t, parser.FragmentKind(1002), tk1.Kind())
require.Equal(t, "abc\ndef", string(tk1.Src()))
require.Equal(t, uint(0), tk1.Begin().Index)
require.Equal(t, uint(1), tk1.Begin().Line)
require.Equal(t, uint(1), tk1.Begin().Column)
require.Equal(t, uint(7), tk1.End().Index)
require.Equal(t, uint(2), tk1.End().Line)
require.Equal(t, uint(4), tk1.End().Column)
tk6, err := lex.Read()
require.NoError(t, err)
require.Nil(t, tk6)
})
// SkipExceed tests returning >1 offsets exceeding the source file size.
// The lexer is expected not to crash, it should just read until EOF
t.Run("SkipExceed", func(t *testing.T) {
lex := misc.NewLexer(&parser.SourceFile{
Name: "test.txt",
Src: []rune("abc"),
})
tk1, err := lex.ReadUntil(
func(crs parser.Cursor) uint {
if crs.File.Src[crs.Index] == 'c' {
return 2
}
return 1
},
1002,
)
require.NoError(t, err)
require.NotNil(t, tk1)
require.Equal(t, parser.FragmentKind(1002), tk1.Kind())
require.Equal(t, "abc", string(tk1.Src()))
require.Equal(t, uint(0), tk1.Begin().Index)
require.Equal(t, uint(1), tk1.Begin().Line)
require.Equal(t, uint(1), tk1.Begin().Column)
require.Equal(t, uint(3), tk1.End().Index)
require.Equal(t, uint(1), tk1.End().Line)
require.Equal(t, uint(4), tk1.End().Column)
tk6, err := lex.Read()
require.NoError(t, err)
require.Nil(t, tk6)
})
// Nil returning 0 immediately for any cursor
t.Run("Nil", func(t *testing.T) {
lex := misc.NewLexer(&parser.SourceFile{
Name: "test.txt",
Src: []rune("abc"),
})
tk1, err := lex.ReadUntil(
func(crs parser.Cursor) uint { return 0 },
1002,
)
require.NoError(t, err)
require.Nil(t, tk1)
})
}
|
package main
import (
"github.com/labstack/echo"
"github.com/labstack/echo/middleware"
"graphql-golang/common"
"graphql-golang/handler"
)
func main() {
e := echo.New()
e.Use(middleware.CORS())
e.Use(middleware.Logger())
e.Use(middleware.Recover())
e.GET("/hello", handler.Hello())
e.POST("/login", handler.Login())
r := e.Group("/query")
r.Use(middleware.JWT([]byte(common.SECRET_KEY)))
r.POST("", handler.Query())
e.Start(":5000")
}
|
package main
import (
"crypto/tls"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"path"
"github.com/osbuild/osbuild-composer/internal/common"
"github.com/osbuild/osbuild-composer/internal/distro"
"github.com/osbuild/osbuild-composer/internal/upload/koji"
"github.com/osbuild/osbuild-composer/internal/worker"
)
type OSBuildKojiJobImpl struct {
Store string
Output string
KojiServers map[string]koji.GSSAPICredentials
}
func (impl *OSBuildKojiJobImpl) kojiUpload(file *os.File, server, directory, filename string) (string, uint64, error) {
// Koji for some reason needs TLS renegotiation enabled.
// Clone the default http transport and enable renegotiation.
transport := http.DefaultTransport.(*http.Transport).Clone()
transport.TLSClientConfig = &tls.Config{
Renegotiation: tls.RenegotiateOnceAsClient,
}
serverURL, err := url.Parse(server)
if err != nil {
return "", 0, err
}
creds, exists := impl.KojiServers[serverURL.Hostname()]
if !exists {
return "", 0, fmt.Errorf("Koji server has not been configured: %s", serverURL.Hostname())
}
k, err := koji.NewFromGSSAPI(server, &creds, transport)
if err != nil {
return "", 0, err
}
defer func() {
err := k.Logout()
if err != nil {
log.Printf("koji logout failed: %v", err)
}
}()
return k.Upload(file, directory, filename)
}
func (impl *OSBuildKojiJobImpl) Run(job worker.Job) error {
outputDirectory, err := ioutil.TempDir(impl.Output, job.Id().String()+"-*")
if err != nil {
return fmt.Errorf("error creating temporary output directory: %v", err)
}
defer func() {
err := os.RemoveAll(outputDirectory)
if err != nil {
log.Printf("Error removing temporary output directory (%s): %v", outputDirectory, err)
}
}()
var args worker.OSBuildKojiJob
err = job.Args(&args)
if err != nil {
return err
}
var initArgs worker.KojiInitJobResult
err = job.DynamicArgs(0, &initArgs)
if err != nil {
return err
}
var result worker.OSBuildKojiJobResult
result.Arch = common.CurrentArch()
result.HostOS, err = distro.GetRedHatRelease()
if err != nil {
return err
}
if initArgs.KojiError == "" {
exports := args.Exports
if len(exports) == 0 {
// job did not define exports, likely coming from an older version of composer
// fall back to default "assembler"
exports = []string{"assembler"}
} else if len(exports) > 1 {
// this worker only supports returning one (1) export
return fmt.Errorf("at most one build artifact can be exported")
}
result.OSBuildOutput, err = RunOSBuild(args.Manifest, impl.Store, outputDirectory, exports, os.Stderr)
if err != nil {
return err
}
// NOTE: Currently OSBuild supports multiple exports, but this isn't used
// by any of the image types and it can't be specified during the request.
// Use the first (and presumably only) export for the imagePath.
exportPath := exports[0]
if result.OSBuildOutput.Success {
f, err := os.Open(path.Join(outputDirectory, exportPath, args.ImageName))
if err != nil {
return err
}
result.ImageHash, result.ImageSize, err = impl.kojiUpload(f, args.KojiServer, args.KojiDirectory, args.KojiFilename)
if err != nil {
result.KojiError = err.Error()
}
}
}
err = job.Update(&result)
if err != nil {
return fmt.Errorf("Error reporting job result: %v", err)
}
return nil
}
|
package main
import (
"git.apache.org/thrift.git/lib/go/thrift"
"github.com/lnhote/hello-thrift/gen-go/bill"
"context"
"log"
)
func main() {
sock, err := thrift.NewTSocket("localhost:9090")
if err != nil {
panic(err)
}
defer sock.Close()
transportFactory := thrift.NewTFramedTransportFactory(thrift.NewTTransportFactory())
transport, err := transportFactory.GetTransport(sock)
if err != nil {
panic(err)
}
protocolFactory := thrift.NewTBinaryProtocolFactoryDefault()
err = transport.Open()
if err != nil {
panic(err)
}
defer transport.Close()
protocol := protocolFactory.GetProtocol(transport)
client := thrift.NewTStandardClient(protocol, protocol)
billClient := bill.NewBillServiceClient(client)
ctx := context.Background()
billInfoList, err := billClient.GetBillList(ctx, "123")
if err != nil {
log.Print(err.Error())
}
log.Printf("GetBillList = %+v", billInfoList)
billInfoList, err = billClient.GetBillList(ctx, "124")
if err != nil {
log.Print(err.Error())
}
log.Printf("GetBillList = %+v", billInfoList)
} |
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"fmt"
"math"
"strconv"
"testing"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/parser/charset"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/parser/terror"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/stretchr/testify/require"
)
type invalidMockType struct {
}
// Convert converts the val with type tp.
func Convert(val interface{}, target *FieldType) (v interface{}, err error) {
d := NewDatum(val)
sc := new(stmtctx.StatementContext)
sc.TimeZone = time.UTC
ret, err := d.ConvertTo(sc, target)
if err != nil {
return ret.GetValue(), errors.Trace(err)
}
return ret.GetValue(), nil
}
func TestConvertType(t *testing.T) {
ft := NewFieldType(mysql.TypeBlob)
ft.SetFlen(4)
ft.SetCharset("utf8")
v, err := Convert("123456", ft)
require.True(t, ErrDataTooLong.Equal(err))
require.Equal(t, "1234", v)
ft = NewFieldType(mysql.TypeString)
ft.SetFlen(4)
ft.SetCharset(charset.CharsetBin)
v, err = Convert("12345", ft)
require.True(t, ErrDataTooLong.Equal(err))
require.Equal(t, []byte("1234"), v)
ft = NewFieldType(mysql.TypeFloat)
ft.SetFlen(5)
ft.SetDecimal(2)
v, err = Convert(111.114, ft)
require.NoError(t, err)
require.Equal(t, float32(111.11), v)
ft = NewFieldType(mysql.TypeFloat)
ft.SetFlen(5)
ft.SetDecimal(2)
v, err = Convert(999.999, ft)
require.Error(t, err)
require.Equal(t, float32(999.99), v)
ft = NewFieldType(mysql.TypeFloat)
ft.SetFlen(5)
ft.SetDecimal(2)
v, err = Convert(-999.999, ft)
require.Error(t, err)
require.Equal(t, float32(-999.99), v)
ft = NewFieldType(mysql.TypeFloat)
ft.SetFlen(5)
ft.SetDecimal(2)
v, err = Convert(1111.11, ft)
require.Error(t, err)
require.Equal(t, float32(999.99), v)
ft = NewFieldType(mysql.TypeFloat)
ft.SetFlen(5)
ft.SetDecimal(2)
v, err = Convert(999.916, ft)
require.NoError(t, err)
require.Equal(t, float32(999.92), v)
ft = NewFieldType(mysql.TypeFloat)
ft.SetFlen(5)
ft.SetDecimal(2)
v, err = Convert(999.914, ft)
require.NoError(t, err)
require.Equal(t, float32(999.91), v)
ft = NewFieldType(mysql.TypeFloat)
ft.SetFlen(5)
ft.SetDecimal(2)
v, err = Convert(999.9155, ft)
require.NoError(t, err)
require.Equal(t, float32(999.92), v)
// For TypeBlob
ft = NewFieldType(mysql.TypeBlob)
_, err = Convert(&invalidMockType{}, ft)
require.Error(t, err)
// Nil
ft = NewFieldType(mysql.TypeBlob)
v, err = Convert(nil, ft)
require.NoError(t, err)
require.Nil(t, v)
// TypeDouble
ft = NewFieldType(mysql.TypeDouble)
ft.SetFlen(5)
ft.SetDecimal(2)
v, err = Convert(999.9155, ft)
require.NoError(t, err)
require.Equal(t, float64(999.92), v)
// For TypeString
ft = NewFieldType(mysql.TypeString)
ft.SetFlen(3)
v, err = Convert("12345", ft)
require.True(t, ErrDataTooLong.Equal(err))
require.Equal(t, "123", v)
ft = NewFieldType(mysql.TypeString)
ft.SetFlen(3)
ft.SetCharset(charset.CharsetBin)
v, err = Convert("12345", ft)
require.True(t, ErrDataTooLong.Equal(err))
require.Equal(t, []byte("123"), v)
// For TypeDuration
ft = NewFieldType(mysql.TypeDuration)
ft.SetDecimal(3)
v, err = Convert("10:11:12.123456", ft)
require.NoError(t, err)
require.Equal(t, "10:11:12.123", v.(Duration).String())
ft.SetDecimal(1)
vv, err := Convert(v, ft)
require.NoError(t, err)
require.Equal(t, "10:11:12.1", vv.(Duration).String())
sc := &stmtctx.StatementContext{TimeZone: time.UTC}
vd, err := ParseTime(sc, "2010-10-10 10:11:11.12345", mysql.TypeDatetime, 2, nil)
require.Equal(t, "2010-10-10 10:11:11.12", vd.String())
require.NoError(t, err)
v, err = Convert(vd, ft)
require.NoError(t, err)
require.Equal(t, "10:11:11.1", v.(Duration).String())
vt, err := ParseTime(sc, "2010-10-10 10:11:11.12345", mysql.TypeTimestamp, 2, nil)
require.Equal(t, "2010-10-10 10:11:11.12", vt.String())
require.NoError(t, err)
v, err = Convert(vt, ft)
require.NoError(t, err)
require.Equal(t, "10:11:11.1", v.(Duration).String())
// For mysql.TypeTimestamp, mysql.TypeDatetime, mysql.TypeDate
ft = NewFieldType(mysql.TypeTimestamp)
ft.SetDecimal(3)
v, err = Convert("2010-10-10 10:11:11.12345", ft)
require.NoError(t, err)
require.Equal(t, "2010-10-10 10:11:11.123", v.(Time).String())
ft.SetDecimal(1)
vv, err = Convert(v, ft)
require.NoError(t, err)
require.Equal(t, "2010-10-10 10:11:11.1", vv.(Time).String())
// For TypeLonglong
ft = NewFieldType(mysql.TypeLonglong)
v, err = Convert("100", ft)
require.NoError(t, err)
require.Equal(t, int64(100), v)
// issue 4287.
v, err = Convert(math.Pow(2, 63)-1, ft)
require.NoError(t, err)
require.Equal(t, int64(math.MaxInt64), v)
ft = NewFieldType(mysql.TypeLonglong)
ft.AddFlag(mysql.UnsignedFlag)
v, err = Convert("100", ft)
require.NoError(t, err)
require.Equal(t, uint64(100), v)
// issue 3470
ft = NewFieldType(mysql.TypeLonglong)
v, err = Convert(Duration{Duration: 12*time.Hour + 59*time.Minute + 59*time.Second + 555*time.Millisecond, Fsp: 3}, ft)
require.NoError(t, err)
require.Equal(t, int64(130000), v)
v, err = Convert(NewTime(FromDate(2017, 1, 1, 12, 59, 59, 555000), mysql.TypeDatetime, MaxFsp), ft)
require.NoError(t, err)
require.Equal(t, int64(20170101130000), v)
// For TypeBit
ft = NewFieldType(mysql.TypeBit)
ft.SetFlen(24) // 3 bytes.
v, err = Convert("100", ft)
require.NoError(t, err)
require.Equal(t, NewBinaryLiteralFromUint(3223600, 3), v)
v, err = Convert(NewBinaryLiteralFromUint(100, -1), ft)
require.NoError(t, err)
require.Equal(t, NewBinaryLiteralFromUint(100, 3), v)
ft.SetFlen(1)
v, err = Convert(1, ft)
require.NoError(t, err)
require.Equal(t, NewBinaryLiteralFromUint(1, 1), v)
_, err = Convert(2, ft)
require.Error(t, err)
ft.SetFlen(0)
_, err = Convert(2, ft)
require.Error(t, err)
// For TypeNewDecimal
ft = NewFieldType(mysql.TypeNewDecimal)
ft.SetFlen(8)
ft.SetDecimal(4)
v, err = Convert(3.1416, ft)
require.NoErrorf(t, err, errors.ErrorStack(err))
require.Equal(t, "3.1416", v.(*MyDecimal).String())
v, err = Convert("3.1415926", ft)
require.NoError(t, err)
require.Equal(t, "3.1416", v.(*MyDecimal).String())
v, err = Convert("99999", ft)
require.Truef(t, terror.ErrorEqual(err, ErrOverflow), "err %v", err)
require.Equal(t, "9999.9999", v.(*MyDecimal).String())
v, err = Convert("-10000", ft)
require.Truef(t, terror.ErrorEqual(err, ErrOverflow), "err %v", err)
require.Equal(t, "-9999.9999", v.(*MyDecimal).String())
v, err = Convert("1,999.00", ft)
require.Truef(t, terror.ErrorEqual(err, ErrTruncated), "err %v", err)
require.Equal(t, "1.0000", v.(*MyDecimal).String())
v, err = Convert("1,999,999.00", ft)
require.Truef(t, terror.ErrorEqual(err, ErrTruncated), "err %v", err)
require.Equal(t, "1.0000", v.(*MyDecimal).String())
v, err = Convert("199.00 ", ft)
require.NoError(t, err)
require.Equal(t, "199.0000", v.(*MyDecimal).String())
// Test Datum.ToDecimal with bad number.
d := NewDatum("hello")
_, err = d.ToDecimal(sc)
require.Truef(t, terror.ErrorEqual(err, ErrTruncatedWrongVal), "err %v", err)
sc.IgnoreTruncate.Store(true)
v, err = d.ToDecimal(sc)
require.NoError(t, err)
require.Equal(t, "0", v.(*MyDecimal).String())
// For TypeYear
ft = NewFieldType(mysql.TypeYear)
v, err = Convert("2015", ft)
require.NoError(t, err)
require.Equal(t, int64(2015), v)
v, err = Convert(2015, ft)
require.NoError(t, err)
require.Equal(t, int64(2015), v)
_, err = Convert(1800, ft)
require.Error(t, err)
dt, err := ParseDate(nil, "2015-11-11")
require.NoError(t, err)
v, err = Convert(dt, ft)
require.NoError(t, err)
require.Equal(t, int64(2015), v)
v, err = Convert(ZeroDuration, ft)
require.NoError(t, err)
require.Equal(t, int64(0), v)
bj1, err := ParseBinaryJSONFromString("99")
require.NoError(t, err)
v, err = Convert(bj1, ft)
require.NoError(t, err)
require.Equal(t, int64(1999), v)
bj2, err := ParseBinaryJSONFromString("-1")
require.NoError(t, err)
_, err = Convert(bj2, ft)
require.Error(t, err)
bj3, err := ParseBinaryJSONFromString("{\"key\": 99}")
require.NoError(t, err)
_, err = Convert(bj3, ft)
require.Error(t, err)
bj4, err := ParseBinaryJSONFromString("[99, 0, 1]")
require.NoError(t, err)
_, err = Convert(bj4, ft)
require.Error(t, err)
// For enum
ft = NewFieldType(mysql.TypeEnum)
ft.SetElems([]string{"a", "b", "c"})
v, err = Convert("a", ft)
require.NoError(t, err)
require.Equal(t, Enum{Name: "a", Value: 1}, v)
v, err = Convert(2, ft)
require.NoError(t, err)
require.Equal(t, Enum{Name: "b", Value: 2}, v)
_, err = Convert("d", ft)
require.Error(t, err)
v, err = Convert(4, ft)
require.Truef(t, terror.ErrorEqual(err, ErrTruncated), "err %v", err)
require.Equal(t, Enum{}, v)
ft = NewFieldType(mysql.TypeSet)
ft.SetElems([]string{"a", "b", "c"})
v, err = Convert("a", ft)
require.NoError(t, err)
require.Equal(t, Set{Name: "a", Value: 1}, v)
v, err = Convert(2, ft)
require.NoError(t, err)
require.Equal(t, Set{Name: "b", Value: 2}, v)
v, err = Convert(3, ft)
require.NoError(t, err)
require.Equal(t, Set{Name: "a,b", Value: 3}, v)
_, err = Convert("d", ft)
require.Error(t, err)
_, err = Convert(9, ft)
require.Error(t, err)
}
func testToString(t *testing.T, val interface{}, expect string) {
b, err := ToString(val)
require.NoError(t, err)
require.Equal(t, expect, b)
}
func TestConvertToString(t *testing.T) {
testToString(t, "0", "0")
testToString(t, true, "1")
testToString(t, "false", "false")
testToString(t, 0, "0")
testToString(t, int64(0), "0")
testToString(t, uint64(0), "0")
testToString(t, float32(1.6), "1.6")
testToString(t, float64(-0.6), "-0.6")
testToString(t, []byte{1}, "\x01")
testToString(t, NewBinaryLiteralFromUint(0x4D7953514C, -1), "MySQL")
testToString(t, NewBinaryLiteralFromUint(0x41, -1), "A")
testToString(t, Enum{Name: "a", Value: 1}, "a")
testToString(t, Set{Name: "a", Value: 1}, "a")
t1, err := ParseTime(&stmtctx.StatementContext{TimeZone: time.UTC}, "2011-11-10 11:11:11.999999", mysql.TypeTimestamp, 6, nil)
require.NoError(t, err)
testToString(t, t1, "2011-11-10 11:11:11.999999")
td, _, err := ParseDuration(nil, "11:11:11.999999", 6)
require.NoError(t, err)
testToString(t, td, "11:11:11.999999")
ft := NewFieldType(mysql.TypeNewDecimal)
ft.SetFlen(10)
ft.SetDecimal(5)
v, err := Convert(3.1415926, ft)
require.NoError(t, err)
testToString(t, v, "3.14159")
_, err = ToString(&invalidMockType{})
require.Error(t, err)
// test truncate
tests := []struct {
flen int
charset string
input string
output string
}{
{5, "utf8", "你好,世界", "你好,世界"},
{5, "utf8mb4", "你好,世界", "你好,世界"},
{4, "utf8", "你好,世界", "你好,世"},
{4, "utf8mb4", "你好,世界", "你好,世"},
{15, "binary", "你好,世界", "你好,世界"},
{12, "binary", "你好,世界", "你好,世"},
{0, "binary", "你好,世界", ""},
}
for _, tt := range tests {
ft = NewFieldType(mysql.TypeVarchar)
ft.SetFlen(tt.flen)
ft.SetCharset(tt.charset)
inputDatum := NewStringDatum(tt.input)
sc := new(stmtctx.StatementContext)
outputDatum, err := inputDatum.ConvertTo(sc, ft)
if tt.input != tt.output {
require.True(t, ErrDataTooLong.Equal(err), "flen: %d, charset: %s, input: %s, output: %s", tt.flen, tt.charset, tt.input, tt.output)
} else {
require.NoError(t, err)
}
require.Equal(t, tt.output, outputDatum.GetString())
}
}
func TestConvertToStringWithCheck(t *testing.T) {
nhUTF8 := "你好"
nhUTF8MB4 := "你好👋"
nhUTF8Invalid := "你好" + string([]byte{0x81})
type SC = *stmtctx.StatementContext
tests := []struct {
input string
outputChs string
setStmtCtx func(ctx *stmtctx.StatementContext)
output string
}{
{nhUTF8, "utf8mb4", func(s SC) { s.SkipUTF8Check = false }, nhUTF8},
{nhUTF8MB4, "utf8mb4", func(s SC) { s.SkipUTF8Check = false }, nhUTF8MB4},
{nhUTF8, "utf8mb4", func(s SC) { s.SkipUTF8Check = true }, nhUTF8},
{nhUTF8MB4, "utf8mb4", func(s SC) { s.SkipUTF8Check = true }, nhUTF8MB4},
{nhUTF8Invalid, "utf8mb4", func(s SC) { s.SkipUTF8Check = true }, nhUTF8Invalid},
{nhUTF8Invalid, "utf8mb4", func(s SC) { s.SkipUTF8Check = false }, ""},
{nhUTF8Invalid, "ascii", func(s SC) { s.SkipASCIICheck = false }, ""},
{nhUTF8Invalid, "ascii", func(s SC) { s.SkipASCIICheck = true }, nhUTF8Invalid},
{nhUTF8MB4, "utf8", func(s SC) { s.SkipUTF8MB4Check = false }, ""},
{nhUTF8MB4, "utf8", func(s SC) { s.SkipUTF8MB4Check = true }, nhUTF8MB4},
}
for _, tt := range tests {
ft := NewFieldType(mysql.TypeVarchar)
ft.SetFlen(255)
ft.SetCharset(tt.outputChs)
inputDatum := NewStringDatum(tt.input)
sc := new(stmtctx.StatementContext)
tt.setStmtCtx(sc)
outputDatum, err := inputDatum.ConvertTo(sc, ft)
if len(tt.output) == 0 {
require.True(t, charset.ErrInvalidCharacterString.Equal(err), tt)
} else {
require.NoError(t, err, tt)
require.Equal(t, tt.output, outputDatum.GetString(), tt)
}
}
}
func TestConvertToBinaryString(t *testing.T) {
nhUTF8 := "你好"
nhGBK := string([]byte{0xC4, 0xE3, 0xBA, 0xC3}) // "你好" in GBK
nhUTF8Invalid := "你好" + string([]byte{0x81})
nhGBKInvalid := nhGBK + string([]byte{0x81})
tests := []struct {
input string
inputCollate string
outputCharset string
output string
}{
{nhUTF8, "utf8_bin", "utf8", nhUTF8},
{nhUTF8, "utf8mb4_bin", "utf8mb4", nhUTF8},
{nhUTF8, "gbk_bin", "utf8", nhUTF8},
{nhUTF8, "gbk_bin", "gbk", nhUTF8},
{nhUTF8, "binary", "utf8mb4", nhUTF8},
{nhGBK, "binary", "gbk", nhUTF8},
{nhUTF8, "utf8_bin", "binary", nhUTF8},
{nhUTF8, "gbk_bin", "binary", nhGBK},
{nhUTF8Invalid, "utf8_bin", "utf8", ""},
{nhGBKInvalid, "gbk_bin", "gbk", ""},
}
for _, tt := range tests {
ft := NewFieldType(mysql.TypeVarchar)
ft.SetFlen(255)
ft.SetCharset(tt.outputCharset)
inputDatum := NewCollationStringDatum(tt.input, tt.inputCollate)
sc := new(stmtctx.StatementContext)
outputDatum, err := inputDatum.ConvertTo(sc, ft)
if len(tt.output) == 0 {
require.True(t, charset.ErrInvalidCharacterString.Equal(err), tt)
} else {
require.NoError(t, err, tt)
require.Equal(t, tt.output, outputDatum.GetString(), tt)
}
}
}
func testStrToInt(t *testing.T, str string, expect int64, truncateAsErr bool, expectErr error) {
sc := new(stmtctx.StatementContext)
sc.IgnoreTruncate.Store(!truncateAsErr)
val, err := StrToInt(sc, str, false)
if expectErr != nil {
require.Truef(t, terror.ErrorEqual(err, expectErr), "err %v", err)
} else {
require.NoError(t, err)
require.Equal(t, expect, val)
}
}
func testStrToUint(t *testing.T, str string, expect uint64, truncateAsErr bool, expectErr error) {
sc := new(stmtctx.StatementContext)
sc.IgnoreTruncate.Store(!truncateAsErr)
val, err := StrToUint(sc, str, false)
if expectErr != nil {
require.Truef(t, terror.ErrorEqual(err, expectErr), "err %v", err)
} else {
require.NoError(t, err)
require.Equal(t, expect, val)
}
}
func testStrToFloat(t *testing.T, str string, expect float64, truncateAsErr bool, expectErr error) {
sc := new(stmtctx.StatementContext)
sc.IgnoreTruncate.Store(!truncateAsErr)
val, err := StrToFloat(sc, str, false)
if expectErr != nil {
require.Truef(t, terror.ErrorEqual(err, expectErr), "err %v", err)
} else {
require.NoError(t, err)
require.Equal(t, expect, val)
}
}
func TestStrToNum(t *testing.T) {
testStrToInt(t, "0", 0, true, nil)
testStrToInt(t, "-1", -1, true, nil)
testStrToInt(t, "100", 100, true, nil)
testStrToInt(t, "65.0", 65, false, nil)
testStrToInt(t, "65.0", 65, true, nil)
testStrToInt(t, "", 0, false, nil)
testStrToInt(t, "", 0, true, ErrTruncatedWrongVal)
testStrToInt(t, "xx", 0, true, ErrTruncatedWrongVal)
testStrToInt(t, "xx", 0, false, nil)
testStrToInt(t, "11xx", 11, true, ErrTruncatedWrongVal)
testStrToInt(t, "11xx", 11, false, nil)
testStrToInt(t, "xx11", 0, false, nil)
testStrToUint(t, "0", 0, true, nil)
testStrToUint(t, "", 0, false, nil)
testStrToUint(t, "", 0, false, nil)
testStrToUint(t, "-1", 0xffffffffffffffff, false, ErrOverflow)
testStrToUint(t, "100", 100, true, nil)
testStrToUint(t, "+100", 100, true, nil)
testStrToUint(t, "65.0", 65, true, nil)
testStrToUint(t, "xx", 0, true, ErrTruncatedWrongVal)
testStrToUint(t, "11xx", 11, true, ErrTruncatedWrongVal)
testStrToUint(t, "xx11", 0, true, ErrTruncatedWrongVal)
// TODO: makes StrToFloat return truncated value instead of zero to make it pass.
testStrToFloat(t, "", 0, true, ErrTruncatedWrongVal)
testStrToFloat(t, "-1", -1.0, true, nil)
testStrToFloat(t, "1.11", 1.11, true, nil)
testStrToFloat(t, "1.11.00", 1.11, false, nil)
testStrToFloat(t, "1.11.00", 1.11, true, ErrTruncatedWrongVal)
testStrToFloat(t, "xx", 0.0, false, nil)
testStrToFloat(t, "0x00", 0.0, false, nil)
testStrToFloat(t, "11.xx", 11.0, false, nil)
testStrToFloat(t, "11.xx", 11.0, true, ErrTruncatedWrongVal)
testStrToFloat(t, "xx.11", 0.0, false, nil)
// for issue #5111
testStrToFloat(t, "1e649", math.MaxFloat64, true, ErrTruncatedWrongVal)
testStrToFloat(t, "1e649", math.MaxFloat64, false, nil)
testStrToFloat(t, "-1e649", -math.MaxFloat64, true, ErrTruncatedWrongVal)
testStrToFloat(t, "-1e649", -math.MaxFloat64, false, nil)
// for issue #10806, #11179
testSelectUpdateDeleteEmptyStringError(t)
}
func testSelectUpdateDeleteEmptyStringError(t *testing.T) {
testCases := []struct {
inSelect bool
inDelete bool
}{
{true, false},
{false, true},
}
sc := new(stmtctx.StatementContext)
sc.TruncateAsWarning = true
for _, tc := range testCases {
sc.InSelectStmt = tc.inSelect
sc.InDeleteStmt = tc.inDelete
str := ""
expect := 0
val, err := StrToInt(sc, str, false)
require.NoError(t, err)
require.Equal(t, int64(expect), val)
val1, err := StrToUint(sc, str, false)
require.NoError(t, err)
require.Equal(t, uint64(expect), val1)
val2, err := StrToFloat(sc, str, false)
require.NoError(t, err)
require.Equal(t, float64(expect), val2)
}
}
func TestFieldTypeToStr(t *testing.T) {
v := TypeToStr(mysql.TypeUnspecified, "not binary")
require.Equal(t, TypeStr(mysql.TypeUnspecified), v)
v = TypeToStr(mysql.TypeBlob, charset.CharsetBin)
require.Equal(t, "blob", v)
v = TypeToStr(mysql.TypeString, charset.CharsetBin)
require.Equal(t, "binary", v)
}
func accept(t *testing.T, tp byte, value interface{}, unsigned bool, expected string) {
ft := NewFieldType(tp)
if unsigned {
ft.AddFlag(mysql.UnsignedFlag)
}
d := NewDatum(value)
sc := new(stmtctx.StatementContext)
sc.TimeZone = time.UTC
sc.IgnoreTruncate.Store(true)
casted, err := d.ConvertTo(sc, ft)
require.NoErrorf(t, err, "%v", ft)
if casted.IsNull() {
require.Equal(t, "<nil>", expected)
} else {
str, err := casted.ToString()
require.NoError(t, err)
require.Equal(t, expected, str)
}
}
func unsignedAccept(t *testing.T, tp byte, value interface{}, expected string) {
accept(t, tp, value, true, expected)
}
func signedAccept(t *testing.T, tp byte, value interface{}, expected string) {
accept(t, tp, value, false, expected)
}
func deny(t *testing.T, tp byte, value interface{}, unsigned bool, expected string) {
ft := NewFieldType(tp)
if unsigned {
ft.AddFlag(mysql.UnsignedFlag)
}
d := NewDatum(value)
sc := new(stmtctx.StatementContext)
casted, err := d.ConvertTo(sc, ft)
require.Error(t, err)
if casted.IsNull() {
require.Equal(t, "<nil>", expected)
} else {
str, err := casted.ToString()
require.NoError(t, err)
require.Equal(t, expected, str)
}
}
func unsignedDeny(t *testing.T, tp byte, value interface{}, expected string) {
deny(t, tp, value, true, expected)
}
func signedDeny(t *testing.T, tp byte, value interface{}, expected string) {
deny(t, tp, value, false, expected)
}
func strvalue(v interface{}) string {
return fmt.Sprintf("%v", v)
}
func TestConvert(t *testing.T) {
// integer ranges
signedDeny(t, mysql.TypeTiny, -129, "-128")
signedAccept(t, mysql.TypeTiny, -128, "-128")
signedAccept(t, mysql.TypeTiny, 127, "127")
signedDeny(t, mysql.TypeTiny, 128, "127")
signedAccept(t, mysql.TypeTiny, NewBinaryLiteralFromUint(127, -1), "127")
signedDeny(t, mysql.TypeTiny, NewBinaryLiteralFromUint(128, -1), "127")
unsignedDeny(t, mysql.TypeTiny, -1, "255")
unsignedAccept(t, mysql.TypeTiny, 0, "0")
unsignedAccept(t, mysql.TypeTiny, 255, "255")
unsignedDeny(t, mysql.TypeTiny, 256, "255")
unsignedAccept(t, mysql.TypeTiny, NewBinaryLiteralFromUint(0, -1), "0")
unsignedAccept(t, mysql.TypeTiny, NewBinaryLiteralFromUint(255, -1), "255")
unsignedDeny(t, mysql.TypeTiny, NewBinaryLiteralFromUint(256, -1), "255")
signedDeny(t, mysql.TypeShort, int64(math.MinInt16)-1, strvalue(int64(math.MinInt16)))
signedAccept(t, mysql.TypeShort, int64(math.MinInt16), strvalue(int64(math.MinInt16)))
signedAccept(t, mysql.TypeShort, int64(math.MaxInt16), strvalue(int64(math.MaxInt16)))
signedDeny(t, mysql.TypeShort, int64(math.MaxInt16)+1, strvalue(int64(math.MaxInt16)))
signedAccept(t, mysql.TypeShort, NewBinaryLiteralFromUint(math.MaxInt16, -1), strvalue(int64(math.MaxInt16)))
signedDeny(t, mysql.TypeShort, NewBinaryLiteralFromUint(math.MaxInt16+1, -1), strvalue(int64(math.MaxInt16)))
unsignedDeny(t, mysql.TypeShort, -1, "65535")
unsignedAccept(t, mysql.TypeShort, 0, "0")
unsignedAccept(t, mysql.TypeShort, uint64(math.MaxUint16), strvalue(uint64(math.MaxUint16)))
unsignedDeny(t, mysql.TypeShort, uint64(math.MaxUint16)+1, strvalue(uint64(math.MaxUint16)))
unsignedAccept(t, mysql.TypeShort, NewBinaryLiteralFromUint(0, -1), "0")
unsignedAccept(t, mysql.TypeShort, NewBinaryLiteralFromUint(math.MaxUint16, -1), strvalue(uint64(math.MaxUint16)))
unsignedDeny(t, mysql.TypeShort, NewBinaryLiteralFromUint(math.MaxUint16+1, -1), strvalue(uint64(math.MaxUint16)))
signedDeny(t, mysql.TypeInt24, -1<<23-1, strvalue(-1<<23))
signedAccept(t, mysql.TypeInt24, -1<<23, strvalue(-1<<23))
signedAccept(t, mysql.TypeInt24, 1<<23-1, strvalue(1<<23-1))
signedDeny(t, mysql.TypeInt24, 1<<23, strvalue(1<<23-1))
signedAccept(t, mysql.TypeInt24, NewBinaryLiteralFromUint(1<<23-1, -1), strvalue(1<<23-1))
signedDeny(t, mysql.TypeInt24, NewBinaryLiteralFromUint(1<<23, -1), strvalue(1<<23-1))
unsignedDeny(t, mysql.TypeInt24, -1, "16777215")
unsignedAccept(t, mysql.TypeInt24, 0, "0")
unsignedAccept(t, mysql.TypeInt24, 1<<24-1, strvalue(1<<24-1))
unsignedDeny(t, mysql.TypeInt24, 1<<24, strvalue(1<<24-1))
unsignedAccept(t, mysql.TypeInt24, NewBinaryLiteralFromUint(0, -1), "0")
unsignedAccept(t, mysql.TypeInt24, NewBinaryLiteralFromUint(1<<24-1, -1), strvalue(1<<24-1))
unsignedDeny(t, mysql.TypeInt24, NewBinaryLiteralFromUint(1<<24, -1), strvalue(1<<24-1))
signedDeny(t, mysql.TypeLong, int64(math.MinInt32)-1, strvalue(int64(math.MinInt32)))
signedAccept(t, mysql.TypeLong, int64(math.MinInt32), strvalue(int64(math.MinInt32)))
signedAccept(t, mysql.TypeLong, int64(math.MaxInt32), strvalue(int64(math.MaxInt32)))
signedDeny(t, mysql.TypeLong, uint64(math.MaxUint64), strvalue(uint64(math.MaxInt32)))
signedDeny(t, mysql.TypeLong, int64(math.MaxInt32)+1, strvalue(int64(math.MaxInt32)))
signedDeny(t, mysql.TypeLong, "1343545435346432587475", strvalue(int64(math.MaxInt32)))
signedAccept(t, mysql.TypeLong, NewBinaryLiteralFromUint(math.MaxInt32, -1), strvalue(int64(math.MaxInt32)))
signedDeny(t, mysql.TypeLong, NewBinaryLiteralFromUint(math.MaxUint64, -1), strvalue(int64(math.MaxInt32)))
signedDeny(t, mysql.TypeLong, NewBinaryLiteralFromUint(math.MaxInt32+1, -1), strvalue(int64(math.MaxInt32)))
unsignedDeny(t, mysql.TypeLong, -1, "4294967295")
unsignedAccept(t, mysql.TypeLong, 0, "0")
unsignedAccept(t, mysql.TypeLong, uint64(math.MaxUint32), strvalue(uint64(math.MaxUint32)))
unsignedDeny(t, mysql.TypeLong, uint64(math.MaxUint32)+1, strvalue(uint64(math.MaxUint32)))
unsignedAccept(t, mysql.TypeLong, NewBinaryLiteralFromUint(0, -1), "0")
unsignedAccept(t, mysql.TypeLong, NewBinaryLiteralFromUint(math.MaxUint32, -1), strvalue(uint64(math.MaxUint32)))
unsignedDeny(t, mysql.TypeLong, NewBinaryLiteralFromUint(math.MaxUint32+1, -1), strvalue(uint64(math.MaxUint32)))
signedDeny(t, mysql.TypeLonglong, math.MinInt64*1.1, strvalue(int64(math.MinInt64)))
signedAccept(t, mysql.TypeLonglong, int64(math.MinInt64), strvalue(int64(math.MinInt64)))
signedAccept(t, mysql.TypeLonglong, int64(math.MaxInt64), strvalue(int64(math.MaxInt64)))
signedDeny(t, mysql.TypeLonglong, math.MaxInt64*1.1, strvalue(int64(math.MaxInt64)))
signedAccept(t, mysql.TypeLonglong, NewBinaryLiteralFromUint(math.MaxInt64, -1), strvalue(int64(math.MaxInt64)))
signedDeny(t, mysql.TypeLonglong, NewBinaryLiteralFromUint(math.MaxInt64+1, -1), strvalue(int64(math.MaxInt64)))
unsignedAccept(t, mysql.TypeLonglong, -1, "18446744073709551615")
unsignedAccept(t, mysql.TypeLonglong, 0, "0")
unsignedAccept(t, mysql.TypeLonglong, uint64(math.MaxUint64), strvalue(uint64(math.MaxUint64)))
unsignedDeny(t, mysql.TypeLonglong, math.MaxUint64*1.1, strvalue(uint64(math.MaxUint64)))
unsignedAccept(t, mysql.TypeLonglong, NewBinaryLiteralFromUint(0, -1), "0")
unsignedAccept(t, mysql.TypeLonglong, NewBinaryLiteralFromUint(math.MaxUint64, -1), strvalue(uint64(math.MaxUint64)))
// integer from string
signedAccept(t, mysql.TypeLong, " 234 ", "234")
signedAccept(t, mysql.TypeLong, " 2.35e3 ", "2350")
signedAccept(t, mysql.TypeLong, " 2.e3 ", "2000")
signedAccept(t, mysql.TypeLong, " -2.e3 ", "-2000")
signedAccept(t, mysql.TypeLong, " 2e2 ", "200")
signedAccept(t, mysql.TypeLong, " 0.002e3 ", "2")
signedAccept(t, mysql.TypeLong, " .002e3 ", "2")
signedAccept(t, mysql.TypeLong, " 20e-2 ", "0")
signedAccept(t, mysql.TypeLong, " -20e-2 ", "0")
signedAccept(t, mysql.TypeLong, " +2.51 ", "3")
signedAccept(t, mysql.TypeLong, " -9999.5 ", "-10000")
signedAccept(t, mysql.TypeLong, " 999.4", "999")
signedAccept(t, mysql.TypeLong, " -3.58", "-4")
signedDeny(t, mysql.TypeLong, " 1a ", "1")
signedDeny(t, mysql.TypeLong, " +1+ ", "1")
// integer from float
signedAccept(t, mysql.TypeLong, 234.5456, "235")
signedAccept(t, mysql.TypeLong, -23.45, "-23")
unsignedAccept(t, mysql.TypeLonglong, 234.5456, "235")
unsignedDeny(t, mysql.TypeLonglong, -23.45, "18446744073709551593")
// float from string
signedAccept(t, mysql.TypeFloat, "23.523", "23.523")
signedAccept(t, mysql.TypeFloat, int64(123), "123")
signedAccept(t, mysql.TypeFloat, uint64(123), "123")
signedAccept(t, mysql.TypeFloat, 123, "123")
signedAccept(t, mysql.TypeFloat, float32(123), "123")
signedAccept(t, mysql.TypeFloat, float64(123), "123")
signedAccept(t, mysql.TypeDouble, " -23.54", "-23.54")
signedDeny(t, mysql.TypeDouble, "-23.54a", "-23.54")
signedDeny(t, mysql.TypeDouble, "-23.54e2e", "-2354")
signedDeny(t, mysql.TypeDouble, "+.e", "0")
signedAccept(t, mysql.TypeDouble, "1e+1", "10")
// year
signedDeny(t, mysql.TypeYear, 123, "1901")
signedDeny(t, mysql.TypeYear, 3000, "2155")
signedAccept(t, mysql.TypeYear, "2000", "2000")
signedAccept(t, mysql.TypeYear, "abc", "0")
signedAccept(t, mysql.TypeYear, "00abc", "2000")
signedAccept(t, mysql.TypeYear, "0019", "2019")
signedAccept(t, mysql.TypeYear, 2155, "2155")
signedAccept(t, mysql.TypeYear, 2155.123, "2155")
signedDeny(t, mysql.TypeYear, 2156, "2155")
signedDeny(t, mysql.TypeYear, 123.123, "1901")
signedDeny(t, mysql.TypeYear, 1900, "1901")
signedAccept(t, mysql.TypeYear, 1901, "1901")
signedAccept(t, mysql.TypeYear, 1900.567, "1901")
signedDeny(t, mysql.TypeYear, 1900.456, "1901")
signedAccept(t, mysql.TypeYear, 0, "0")
signedAccept(t, mysql.TypeYear, "0", "2000")
signedAccept(t, mysql.TypeYear, "00", "2000")
signedAccept(t, mysql.TypeYear, " 0", "2000")
signedAccept(t, mysql.TypeYear, " 00", "2000")
signedAccept(t, mysql.TypeYear, " 000", "0")
signedAccept(t, mysql.TypeYear, " 0000 ", "2000")
signedAccept(t, mysql.TypeYear, " 0ab", "0")
signedAccept(t, mysql.TypeYear, "00bc", "0")
signedAccept(t, mysql.TypeYear, "000a", "0")
signedAccept(t, mysql.TypeYear, " 000a ", "2000")
signedAccept(t, mysql.TypeYear, 1, "2001")
signedAccept(t, mysql.TypeYear, "1", "2001")
signedAccept(t, mysql.TypeYear, "01", "2001")
signedAccept(t, mysql.TypeYear, 69, "2069")
signedAccept(t, mysql.TypeYear, "69", "2069")
signedAccept(t, mysql.TypeYear, 70, "1970")
signedAccept(t, mysql.TypeYear, "70", "1970")
signedAccept(t, mysql.TypeYear, 99, "1999")
signedAccept(t, mysql.TypeYear, "99", "1999")
signedDeny(t, mysql.TypeYear, 100, "1901")
signedDeny(t, mysql.TypeYear, "99999999999999999999999999999999999", "0")
// time from string
signedAccept(t, mysql.TypeDate, "2012-08-23", "2012-08-23")
signedAccept(t, mysql.TypeDatetime, "2012-08-23 12:34:03.123456", "2012-08-23 12:34:03")
signedAccept(t, mysql.TypeDatetime, ZeroDatetime, "0000-00-00 00:00:00")
signedAccept(t, mysql.TypeDatetime, int64(0), "0000-00-00 00:00:00")
signedAccept(t, mysql.TypeDatetime, NewDecFromFloatForTest(20010101100000.123456), "2001-01-01 10:00:00")
signedAccept(t, mysql.TypeTimestamp, "2012-08-23 12:34:03.123456", "2012-08-23 12:34:03")
signedAccept(t, mysql.TypeTimestamp, NewDecFromFloatForTest(20010101100000.123456), "2001-01-01 10:00:00")
signedAccept(t, mysql.TypeDuration, "10:11:12", "10:11:12")
signedAccept(t, mysql.TypeDuration, ZeroDatetime, "00:00:00")
signedAccept(t, mysql.TypeDuration, ZeroDuration, "00:00:00")
signedAccept(t, mysql.TypeDuration, 0, "00:00:00")
signedDeny(t, mysql.TypeDate, "2012-08-x", "0000-00-00")
signedDeny(t, mysql.TypeDatetime, "2012-08-x", "0000-00-00 00:00:00")
signedDeny(t, mysql.TypeTimestamp, "2012-08-x", "0000-00-00 00:00:00")
signedDeny(t, mysql.TypeDuration, "2012-08-x", "00:20:12")
signedDeny(t, mysql.TypeDuration, "0000-00-00", "00:00:00")
signedDeny(t, mysql.TypeDuration, "1234abc", "00:12:34")
// string from string
signedAccept(t, mysql.TypeString, "abc", "abc")
// string from integer
signedAccept(t, mysql.TypeString, 5678, "5678")
signedAccept(t, mysql.TypeString, ZeroDuration, "00:00:00")
signedAccept(t, mysql.TypeString, ZeroDatetime, "0000-00-00 00:00:00")
signedAccept(t, mysql.TypeString, []byte("123"), "123")
// TODO add more tests
signedAccept(t, mysql.TypeNewDecimal, 123, "123")
signedAccept(t, mysql.TypeNewDecimal, int64(123), "123")
signedAccept(t, mysql.TypeNewDecimal, uint64(123), "123")
signedAccept(t, mysql.TypeNewDecimal, float32(123), "123")
signedAccept(t, mysql.TypeNewDecimal, 123.456, "123.456")
signedAccept(t, mysql.TypeNewDecimal, "-123.456", "-123.456")
signedAccept(t, mysql.TypeNewDecimal, NewDecFromInt(12300000), "12300000")
dec := NewDecFromInt(-123)
err := dec.Shift(-5)
require.NoError(t, err)
err = dec.Round(dec, 5, ModeHalfUp)
require.NoError(t, err)
signedAccept(t, mysql.TypeNewDecimal, dec, "-0.00123")
}
func TestRoundIntStr(t *testing.T) {
cases := []struct {
a string
b byte
c string
}{
{"+999", '5', "+1000"},
{"999", '5', "1000"},
{"-999", '5', "-1000"},
}
for _, cc := range cases {
require.Equal(t, cc.c, roundIntStr(cc.b, cc.a))
}
}
func TestGetValidInt(t *testing.T) {
tests := []struct {
origin string
valid string
signed bool
warning bool
}{
{"100", "100", true, false},
{"-100", "-100", true, false},
{"9223372036854775808", "9223372036854775808", false, false},
{"1abc", "1", true, true},
{"-1-1", "-1", true, true},
{"+1+1", "+1", true, true},
{"123..34", "123", true, true},
{"123.23E-10", "0", true, false},
{"1.1e1.3", "11", true, true},
{"11e1.3", "110", true, true},
{"1.", "1", true, false},
{".1", "0", true, false},
{"", "0", true, true},
{"123e+", "123", true, true},
{"123de", "123", true, true},
}
sc := new(stmtctx.StatementContext)
sc.TruncateAsWarning = true
sc.InSelectStmt = true
warningCount := 0
for i, tt := range tests {
prefix, err := getValidIntPrefix(sc, tt.origin, false)
require.NoError(t, err)
require.Equal(t, tt.valid, prefix)
if tt.signed {
_, err = strconv.ParseInt(prefix, 10, 64)
} else {
_, err = strconv.ParseUint(prefix, 10, 64)
}
require.NoError(t, err)
warnings := sc.GetWarnings()
if tt.warning {
require.Lenf(t, warnings, warningCount+1, "%d", i)
require.True(t, terror.ErrorEqual(warnings[len(warnings)-1].Err, ErrTruncatedWrongVal))
warningCount++
} else {
require.Len(t, warnings, warningCount)
}
}
tests2 := []struct {
origin string
valid string
warning bool
}{
{"100", "100", false},
{"-100", "-100", false},
{"1abc", "1", true},
{"-1-1", "-1", true},
{"+1+1", "+1", true},
{"123..34", "123.", true},
{"123.23E-10", "0", false},
{"1.1e1.3", "1.1e1", true},
{"11e1.3", "11e1", true},
{"1.", "1", false},
{".1", "0", false},
{"", "0", true},
{"123e+", "123", true},
{"123de", "123", true},
}
sc.TruncateAsWarning = false
sc.InSelectStmt = false
for _, tt := range tests2 {
prefix, err := getValidIntPrefix(sc, tt.origin, false)
if tt.warning {
require.True(t, terror.ErrorEqual(err, ErrTruncatedWrongVal))
} else {
require.NoError(t, err)
}
require.Equal(t, tt.valid, prefix)
}
}
func TestGetValidFloat(t *testing.T) {
tests := []struct {
origin string
valid string
}{
{"-100", "-100"},
{"1abc", "1"},
{"-1-1", "-1"},
{"+1+1", "+1"},
{"123..34", "123."},
{"123.23E-10", "123.23E-10"},
{"1.1e1.3", "1.1e1"},
{"11e1.3", "11e1"},
{"1.1e-13a", "1.1e-13"},
{"1.", "1."},
{".1", ".1"},
{"", "0"},
{"123e+", "123"},
{"123.e", "123."},
{"0-123", "0"},
{"9-3", "9"},
{"1001001\\u0000\\u0000\\u0000", "1001001"},
}
sc := new(stmtctx.StatementContext)
for _, tt := range tests {
prefix, _ := getValidFloatPrefix(sc, tt.origin, false)
require.Equal(t, tt.valid, prefix)
_, err := strconv.ParseFloat(prefix, 64)
require.NoError(t, err)
}
tests2 := []struct {
origin string
expected string
}{
{"1e9223372036854775807", "1"},
{"125e342", "125"},
{"1e21", "1"},
{"1e5", "100000"},
{"-123.45678e5", "-12345678"},
{"+0.5", "1"},
{"-0.5", "-1"},
{".5e0", "1"},
{"+.5e0", "+1"},
{"-.5e0", "-1"},
{".5", "1"},
{"123.456789e5", "12345679"},
{"123.456784e5", "12345678"},
{"+999.9999e2", "+100000"},
}
for _, tt := range tests2 {
str, err := floatStrToIntStr(sc, tt.origin, tt.origin)
require.NoError(t, err)
require.Equalf(t, tt.expected, str, "%v, %v", tt.origin, tt.expected)
}
}
// TestConvertTime tests time related conversion.
// time conversion is complicated including Date/Datetime/Time/Timestamp etc,
// Timestamp may involving timezone.
func TestConvertTime(t *testing.T) {
timezones := []*time.Location{
time.UTC,
time.FixedZone("", 3*3600),
time.Local,
}
for _, timezone := range timezones {
sc := &stmtctx.StatementContext{
TimeZone: timezone,
}
testConvertTimeTimeZone(t, sc)
}
}
func testConvertTimeTimeZone(t *testing.T, sc *stmtctx.StatementContext) {
raw := FromDate(2002, 3, 4, 4, 6, 7, 8)
tests := []struct {
input Time
target *FieldType
expect Time
}{
{
input: NewTime(raw, mysql.TypeDatetime, DefaultFsp),
target: NewFieldType(mysql.TypeTimestamp),
expect: NewTime(raw, mysql.TypeTimestamp, DefaultFsp),
},
{
input: NewTime(raw, mysql.TypeDatetime, DefaultFsp),
target: NewFieldType(mysql.TypeTimestamp),
expect: NewTime(raw, mysql.TypeTimestamp, DefaultFsp),
},
{
input: NewTime(raw, mysql.TypeDatetime, DefaultFsp),
target: NewFieldType(mysql.TypeTimestamp),
expect: NewTime(raw, mysql.TypeTimestamp, DefaultFsp),
},
{
input: NewTime(raw, mysql.TypeTimestamp, DefaultFsp),
target: NewFieldType(mysql.TypeDatetime),
expect: NewTime(raw, mysql.TypeDatetime, DefaultFsp),
},
}
for _, test := range tests {
var d Datum
d.SetMysqlTime(test.input)
nd, err := d.ConvertTo(sc, test.target)
require.NoError(t, err)
v := nd.GetMysqlTime()
require.Equal(t, test.expect.Type(), v.Type())
require.Equal(t, test.expect.CoreTime(), v.CoreTime())
}
}
func TestConvertJSONToInt(t *testing.T) {
var tests = []struct {
in string
out int64
err bool
}{
{in: `{}`, err: true},
{in: `[]`, err: true},
{in: `3`, out: 3},
{in: `-3`, out: -3},
{in: `4.5`, out: 4},
{in: `true`, out: 1},
{in: `false`, out: 0},
{in: `null`, err: true},
{in: `"hello"`, err: true},
{in: `"123hello"`, out: 123, err: true},
{in: `"1234"`, out: 1234},
}
for _, tt := range tests {
j, err := ParseBinaryJSONFromString(tt.in)
require.NoError(t, err)
casted, err := ConvertJSONToInt64(new(stmtctx.StatementContext), j, false)
if tt.err {
require.Error(t, err, tt)
} else {
require.NoError(t, err, tt)
}
require.Equal(t, tt.out, casted)
}
}
func TestConvertJSONToFloat(t *testing.T) {
var tests = []struct {
in interface{}
out float64
ty JSONTypeCode
err bool
}{
{in: make(map[string]interface{}), ty: JSONTypeCodeObject, err: true},
{in: make([]interface{}, 0), ty: JSONTypeCodeArray, err: true},
{in: int64(3), out: 3, ty: JSONTypeCodeInt64},
{in: int64(-3), out: -3, ty: JSONTypeCodeInt64},
{in: uint64(1 << 63), out: 1 << 63, ty: JSONTypeCodeUint64},
{in: float64(4.5), out: 4.5, ty: JSONTypeCodeFloat64},
{in: true, out: 1, ty: JSONTypeCodeLiteral},
{in: false, out: 0, ty: JSONTypeCodeLiteral},
{in: nil, ty: JSONTypeCodeLiteral, err: true},
{in: "hello", ty: JSONTypeCodeString, err: true},
{in: "123.456hello", out: 123.456, ty: JSONTypeCodeString, err: true},
{in: "1234", out: 1234, ty: JSONTypeCodeString},
}
for _, tt := range tests {
j := CreateBinaryJSON(tt.in)
require.Equal(t, tt.ty, j.TypeCode)
casted, err := ConvertJSONToFloat(new(stmtctx.StatementContext), j)
if tt.err {
require.Error(t, err, tt)
} else {
require.NoError(t, err, tt)
}
require.Equal(t, tt.out, casted)
}
}
func TestConvertJSONToDecimal(t *testing.T) {
var tests = []struct {
in string
out *MyDecimal
err bool
}{
{in: `3`, out: NewDecFromStringForTest("3")},
{in: `-3`, out: NewDecFromStringForTest("-3")},
{in: `4.5`, out: NewDecFromStringForTest("4.5")},
{in: `"1234"`, out: NewDecFromStringForTest("1234")},
{in: `"1234567890123456789012345678901234567890123456789012345"`, out: NewDecFromStringForTest("1234567890123456789012345678901234567890123456789012345")},
{in: `true`, out: NewDecFromStringForTest("1")},
{in: `false`, out: NewDecFromStringForTest("0")},
{in: `null`, out: NewDecFromStringForTest("0"), err: true},
}
for _, tt := range tests {
j, err := ParseBinaryJSONFromString(tt.in)
require.NoError(t, err)
casted, err := ConvertJSONToDecimal(new(stmtctx.StatementContext), j)
errMsg := fmt.Sprintf("input: %v, casted: %v, out: %v, json: %#v", tt.in, casted, tt.out, j)
if tt.err {
require.Error(t, err, errMsg)
} else {
require.NoError(t, err, errMsg)
}
require.Equalf(t, 0, casted.Compare(tt.out), "input: %v, casted: %v, out: %v, json: %#v", tt.in, casted, tt.out, j)
}
}
func TestNumberToDuration(t *testing.T) {
var testCases = []struct {
number int64
fsp int
hasErr bool
year int
month int
day int
hour int
minute int
second int
}{
{20171222, 0, true, 0, 0, 0, 0, 0, 0},
{171222, 0, false, 0, 0, 0, 17, 12, 22},
{20171222020005, 0, false, 2017, 12, 22, 02, 00, 05},
{10000000000, 0, true, 0, 0, 0, 0, 0, 0},
{171222, 1, false, 0, 0, 0, 17, 12, 22},
{176022, 1, true, 0, 0, 0, 0, 0, 0},
{8391222, 1, true, 0, 0, 0, 0, 0, 0},
{8381222, 0, false, 0, 0, 0, 838, 12, 22},
{1001222, 0, false, 0, 0, 0, 100, 12, 22},
{171260, 1, true, 0, 0, 0, 0, 0, 0},
}
for _, tc := range testCases {
dur, err := NumberToDuration(tc.number, tc.fsp)
if tc.hasErr {
require.Error(t, err)
continue
}
require.NoError(t, err)
require.Equal(t, tc.hour, dur.Hour())
require.Equal(t, tc.minute, dur.Minute())
require.Equal(t, tc.second, dur.Second())
}
var testCases1 = []struct {
number int64
dur time.Duration
}{
{171222, 17*time.Hour + 12*time.Minute + 22*time.Second},
{-171222, -(17*time.Hour + 12*time.Minute + 22*time.Second)},
}
for _, tc := range testCases1 {
dur, err := NumberToDuration(tc.number, 0)
require.NoError(t, err)
require.Equal(t, tc.dur, dur.Duration)
}
}
func TestStrToDuration(t *testing.T) {
sc := new(stmtctx.StatementContext)
var tests = []struct {
str string
fsp int
isDuration bool
}{
{"20190412120000", 4, false},
{"20190101180000", 6, false},
{"20190101180000", 1, false},
{"20190101181234", 3, false},
{"00:00:00.000000", 6, true},
{"00:00:00", 0, true},
}
for _, tt := range tests {
_, _, isDuration, err := StrToDuration(sc, tt.str, tt.fsp)
require.NoError(t, err)
require.Equal(t, tt.isDuration, isDuration)
}
}
func TestConvertScientificNotation(t *testing.T) {
cases := []struct {
input string
output string
succ bool
}{
{"123.456e0", "123.456", true},
{"123.456e1", "1234.56", true},
{"123.456e3", "123456", true},
{"123.456e4", "1234560", true},
{"123.456e5", "12345600", true},
{"123.456e6", "123456000", true},
{"123.456e7", "1234560000", true},
{"123.456e-1", "12.3456", true},
{"123.456e-2", "1.23456", true},
{"123.456e-3", "0.123456", true},
{"123.456e-4", "0.0123456", true},
{"123.456e-5", "0.00123456", true},
{"123.456e-6", "0.000123456", true},
{"123.456e-7", "0.0000123456", true},
{"123.456e-", "", false},
{"123.456e-7.5", "", false},
{"123.456e", "", false},
}
for _, ca := range cases {
result, err := convertScientificNotation(ca.input)
if !ca.succ {
require.Error(t, err)
} else {
require.NoError(t, err)
require.Equal(t, ca.output, result)
}
}
}
func TestConvertDecimalStrToUint(t *testing.T) {
cases := []struct {
input string
result uint64
succ bool
}{
{"0.", 0, true},
{"72.40", 72, true},
{"072.40", 72, true},
{"123.456e2", 12346, true},
{"123.456e-2", 1, true},
{"072.50000000001", 73, true},
{".5757", 1, true},
{".12345E+4", 1235, true},
{"9223372036854775807.5", 9223372036854775808, true},
{"9223372036854775807.4999", 9223372036854775807, true},
{"18446744073709551614.55", 18446744073709551615, true},
{"18446744073709551615.344", 18446744073709551615, true},
{"18446744073709551615.544", 18446744073709551615, false},
{"-111.111", 0, false},
{"-10000000000000000000.0", 0, false},
}
for _, ca := range cases {
result, err := convertDecimalStrToUint(&stmtctx.StatementContext{}, ca.input, math.MaxUint64, 0)
if !ca.succ {
require.Error(t, err)
} else {
require.NoError(t, err)
}
require.Equal(t, ca.result, result, "input=%v", ca.input)
}
result, err := convertDecimalStrToUint(&stmtctx.StatementContext{}, "-99.0", math.MaxUint8, 0)
require.Error(t, err)
require.Equal(t, uint64(0), result)
result, err = convertDecimalStrToUint(&stmtctx.StatementContext{}, "-100.0", math.MaxUint8, 0)
require.Error(t, err)
require.Equal(t, uint64(0), result)
}
|
package main
import (
_ "github.com/lingdor/glog2midlog"
"github.com/lingdor/midlog-examples/library1"
)
func init() {
}
func main() {
library1.DumpLog("rootlog pring")
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2021/8/23 9:47 下午
# @File : lt_24_删除有序数组的重复项.go
# @Description :
# @Attention :
*/
package offer
// 关键
// 题目特点: 有序+重复
// 解题关键: 快慢指针,慢指针充当不重复的元素个数,快指针快速过滤
func removeDuplicates(nums []int) int {
if len(nums) < 2 {
return len(nums)
}
slow, fast := 0, 1
for ; fast < len(nums); {
if nums[fast] == nums[slow] {
fast++
} else {
slow++
// 移动到下个匹配重复的地方,因为是有序的,下一个重复的必然>=当前值
nums[slow] = nums[fast]
}
}
return slow + 1
}
|
package game_map
import (
"fmt"
"github.com/steelx/go-rpg-cgm/combat"
)
type CEFlee struct {
Scene *CombatState
Character *Character
owner *combat.Actor
name string
countDown float64
finished bool
FleeParams CSMoveParams
CanFlee bool
Storyboard *Storyboard
}
func CEFleeCreate(scene *CombatState, owner *combat.Actor, fleeParams CSMoveParams) *CEFlee {
//CSMoveParams{Dir: -1, Distance: 180, Time: 0.6}
c := &CEFlee{
Scene: scene,
owner: owner,
Character: scene.ActorCharMap[owner],
FleeParams: fleeParams,
name: fmt.Sprintf("Flee for %s", owner.Name),
}
c.Character.Facing = CharacterFacingDirection[1] //right
c.Character.Controller.Change(csRunanim, csProne, false)
var storyboardEvents []interface{}
//Scene CanFlee override
if c.Scene.CanFlee {
c.CanFlee = Formula.CanFlee(scene, owner)
} else {
c.CanFlee = false
}
if c.CanFlee {
storyboardEvents = []interface{}{
//stateMachine, stateID, ...animID, additionalParams
RunFunction(func() {
c.Scene.ShowNotice("Attempting to Flee...")
}),
Wait(1),
RunFunction(func() {
c.Scene.ShowNotice("Success")
c.Character.Controller.Change(csMove, c.FleeParams)
}),
Wait(1),
RunFunction(c.DoFleeSuccess),
Wait(0.6),
}
} else {
storyboardEvents = []interface{}{
RunFunction(func() {
c.Scene.ShowNotice("Attempting to Flee...")
}),
Wait(1),
RunFunction(func() {
c.Scene.ShowNotice("Failed !")
}),
Wait(1),
RunFunction(c.OnFleeFail),
}
}
c.Storyboard = StoryboardCreate(scene.InternalStack, scene.win, storyboardEvents, false)
return c
}
func (c CEFlee) Name() string {
return c.name
}
func (c CEFlee) CountDown() float64 {
return c.countDown
}
func (c *CEFlee) CountDownSet(t float64) {
c.countDown = t
}
func (c CEFlee) Owner() *combat.Actor {
return c.owner
}
func (c *CEFlee) Update() {
}
func (c CEFlee) IsFinished() bool {
return c.finished
}
func (c *CEFlee) Execute(queue *EventQueue) {
c.Scene.InternalStack.Push(c.Storyboard)
}
func (c CEFlee) TimePoints(queue *EventQueue) float64 {
speed := c.owner.Stats.Get("Speed")
return queue.SpeedToTimePoints(speed)
}
func (c *CEFlee) OnFleeFail() {
c.Character.Facing = CharacterFacingDirection[3] //left
c.Character.Controller.Change(csStandby, csStandby) //animId
c.finished = true
c.Scene.HideNotice()
}
func (c *CEFlee) DoFleeSuccess() {
for _, v := range c.Scene.Actors[party] {
alive := v.Stats.Get("HpNow") > 0
var isFleer bool
if v == c.owner {
isFleer = true
}
if alive && !isFleer {
char := c.Scene.ActorCharMap[v]
char.Facing = CharacterFacingDirection[1]
char.Controller.Change(csMove, c.FleeParams)
}
}
c.Scene.OnFlee()
c.Scene.HideNotice()
}
|
package main
import (
"fmt"
"io/ioutil"
"net/http"
"strings"
)
var port string
var name string
func handler(w http.ResponseWriter, r *http.Request) {
cont, err := ioutil.ReadFile("index.php")
if err != nil {
fmt.Println("Error")
}
aob := len(cont)
s := string(cont[:aob])
fmt.Fprint(w, s)
}
func readConf() {
conf, err := ioutil.ReadFile("config.cfg")
if err != nil {
fmt.Println("Failed")
} else {
aob := len(conf)
s := string(conf[:aob])
arr := strings.Split(s, "\n")
for i := 0; i < len(arr); i++ {
if strings.HasPrefix(arr[i], "port:") {
a := strings.Split(arr[i], ":")
port = a[1]
}
if strings.HasPrefix(arr[i], "name:") {
b := strings.Split(arr[i], ":")
name = b[1]
}
}
}
}
func addHandlers() {
http.HandleFunc("/", handler)
}
func main() {
readConf()
fmt.Println("Server \"" + name + "\" running on port " + port)
p := ":" + port
addHandlers()
http.ListenAndServe(p, nil)
}
|
package p2p
import (
"math"
"sort"
"sync/atomic"
"time"
"github.com/qlcchain/go-qlc/common"
"github.com/qlcchain/go-qlc/common/types"
"github.com/qlcchain/go-qlc/ledger"
"github.com/qlcchain/go-qlc/log"
"github.com/qlcchain/go-qlc/p2p/protos"
"go.uber.org/zap"
)
var (
headerBlockHash types.Hash
openBlockHash types.Hash
bulkPush, bulkPull []*protos.Bulk
)
const syncTimeout = 10 * time.Second
// Service manage sync tasks
type ServiceSync struct {
netService *QlcService
qlcLedger *ledger.Ledger
frontiers []*types.Frontier
remoteFrontiers []*types.Frontier
quitCh chan bool
logger *zap.SugaredLogger
lastSyncTime int64
syncCount uint32
}
// NewService return new Service.
func NewSyncService(netService *QlcService, ledger *ledger.Ledger) *ServiceSync {
ss := &ServiceSync{
netService: netService,
qlcLedger: ledger,
quitCh: make(chan bool, 1),
logger: log.NewLogger("sync"),
lastSyncTime: 0,
syncCount: 0,
}
return ss
}
func (ss *ServiceSync) Start() {
ss.logger.Info("started sync loop")
address := types.Address{}
Req := protos.NewFrontierReq(address, math.MaxUint32, math.MaxUint32)
ticker := time.NewTicker(time.Duration(ss.netService.node.cfg.P2P.SyncInterval) * time.Second)
for {
select {
case <-ss.quitCh:
ss.logger.Info("Stopped Sync Loop.")
return
case <-ticker.C:
now := time.Now().Unix()
v := atomic.LoadInt64(&ss.lastSyncTime)
if v < now {
peerID, err := ss.netService.node.StreamManager().RandomPeer()
if err != nil {
continue
}
ss.frontiers, err = getLocalFrontier(ss.qlcLedger)
if err != nil {
continue
}
ss.logger.Infof("begin sync block from [%s]", peerID)
ss.remoteFrontiers = ss.remoteFrontiers[:0:0]
ss.next()
bulkPull = bulkPull[:0:0]
bulkPush = bulkPush[:0:0]
err = ss.netService.node.SendMessageToPeer(FrontierRequest, Req, peerID)
if err != nil {
ss.logger.Errorf("err [%s] when send FrontierRequest", err)
}
ss.syncCount++
}
}
}
}
func (ss *ServiceSync) LastSyncTime(t time.Time) {
atomic.StoreInt64(&ss.lastSyncTime, t.Add(syncTimeout).Unix())
}
// Stop sync service
func (ss *ServiceSync) Stop() {
//ss.logger.Info("Stop Qlc sync...")
ss.quitCh <- true
}
func (ss *ServiceSync) onFrontierReq(message *Message) error {
ss.netService.node.logger.Debug("receive FrontierReq")
now := time.Now().Unix()
v := atomic.LoadInt64(&ss.lastSyncTime)
if v < now {
var fs []*types.Frontier
fs, err := ss.qlcLedger.GetFrontiers()
if err != nil {
return err
}
num := len(fs)
var rsp *protos.FrontierResponse
for _, f := range fs {
rsp = protos.NewFrontierRsp(f, uint32(num))
err = ss.netService.SendMessageToPeer(FrontierRsp, rsp, message.MessageFrom())
if err != nil {
ss.logger.Errorf("send FrontierRsp err [%s]", err)
}
}
}
//send frontier finished,last frontier is all zero,tell remote peer send finished
//zeroFrontier := new(types.Frontier)
//err = ss.netService.SendMessageToPeer(FrontierRsp, zeroFrontier, message.MessageFrom())
//if err != nil {
// ss.logger.Errorf("send FrontierRsp err [%s] for zeroFrontier", err)
//}
return nil
}
func (ss *ServiceSync) checkFrontier(message *Message) {
rsp, err := protos.FrontierResponseFromProto(message.Data())
if err != nil {
ss.logger.Error(err)
return
}
if uint32(len(ss.remoteFrontiers)) < rsp.TotalFrontierNum {
ss.remoteFrontiers = append(ss.remoteFrontiers, rsp.Frontier)
if uint32(len(ss.remoteFrontiers)) == rsp.TotalFrontierNum {
var remoteFrontiers []*types.Frontier
remoteFrontiers = append(remoteFrontiers, ss.remoteFrontiers...)
sort.Sort(types.Frontiers(remoteFrontiers))
zeroFrontier := new(types.Frontier)
remoteFrontiers = append(remoteFrontiers, zeroFrontier)
ss.remoteFrontiers = ss.remoteFrontiers[:0:0]
go func() {
err := ss.processFrontiers(remoteFrontiers, message.MessageFrom())
if err != nil {
ss.logger.Errorf("process frontiers error:[%s]", err)
}
}()
}
return
}
}
func (ss *ServiceSync) processFrontiers(fsRemotes []*types.Frontier, peerID string) error {
//ss.netService.node.logger.Info("receive FrontierRsp")
//fsRemote, err := protos.FrontierResponseFromProto(message.Data())
//if err != nil {
// return err
//}
//fr := fsRemote.Frontier
//ss.logger.Info(fr.HeaderBlock, fr.OpenBlock)
for i := 0; i < len(fsRemotes); i++ {
if !fsRemotes[i].OpenBlock.IsZero() {
for {
if !openBlockHash.IsZero() && (openBlockHash.String() < fsRemotes[i].OpenBlock.String()) {
// We have an account but remote peer have not.
push := &protos.Bulk{
StartHash: types.ZeroHash,
EndHash: headerBlockHash,
}
bulkPush = append(bulkPush, push)
ss.next()
} else {
break
}
}
if !openBlockHash.IsZero() {
if fsRemotes[i].OpenBlock == openBlockHash {
if headerBlockHash == fsRemotes[i].HeaderBlock {
//ss.logger.Infof("this token %s have the same block", openBlockHash)
} else {
exit, _ := ss.qlcLedger.HasStateBlockConfirmed(fsRemotes[i].HeaderBlock)
if exit == true {
push := &protos.Bulk{
StartHash: fsRemotes[i].HeaderBlock,
EndHash: headerBlockHash,
}
bulkPush = append(bulkPush, push)
} else {
pull := &protos.Bulk{
StartHash: headerBlockHash,
EndHash: fsRemotes[i].HeaderBlock,
}
bulkPull = append(bulkPull, pull)
}
}
ss.next()
} else {
if fsRemotes[i].OpenBlock.String() > openBlockHash.String() {
return nil
}
pull := &protos.Bulk{
StartHash: types.ZeroHash,
EndHash: fsRemotes[i].HeaderBlock,
}
bulkPull = append(bulkPull, pull)
}
} else {
pull := &protos.Bulk{
StartHash: types.ZeroHash,
EndHash: fsRemotes[i].HeaderBlock,
}
bulkPull = append(bulkPull, pull)
}
} else {
for {
if !openBlockHash.IsZero() {
// We have an account but remote peer have not.
push := &protos.Bulk{
StartHash: types.ZeroHash,
EndHash: headerBlockHash,
}
bulkPush = append(bulkPush, push)
ss.next()
} else {
if len(ss.frontiers) == 0 {
var err error
ss.frontiers, err = getLocalFrontier(ss.qlcLedger)
if err != nil {
ss.logger.Error("get local frontier error")
}
ss.next()
}
for _, value := range bulkPull {
blkReq := &protos.BulkPullReqPacket{
StartHash: value.StartHash,
EndHash: value.EndHash,
}
err := ss.netService.SendMessageToPeer(BulkPullRequest, blkReq, peerID)
if err != nil {
ss.logger.Errorf("err [%s] when send BulkPullRequest", err)
}
}
for _, value := range bulkPush {
startHash := value.StartHash
endHash := value.EndHash
var err error
if startHash.IsZero() {
//ss.logger.Infof("need to send all the blocks of this account")
var blk *types.StateBlock
var bulkBlk []*types.StateBlock
for {
blk, err = ss.qlcLedger.GetStateBlock(endHash)
if err != nil {
ss.logger.Errorf("err when get StateBlock:[%s]", endHash.String())
break
}
bulkBlk = append(bulkBlk, blk)
endHash = blk.GetPrevious()
if endHash.IsZero() == true {
break
}
}
for i := len(bulkBlk) - 1; i >= 0; i-- {
if !ss.netService.Node().streamManager.IsConnectWithPeerId(peerID) {
break
}
err = ss.netService.SendMessageToPeer(BulkPushBlock, bulkBlk[i], peerID)
if err != nil {
ss.logger.Errorf("err [%s] when send BulkPushBlock", err)
}
}
} else {
//ss.logger.Info("need to send some blocks of this account")
var blk *types.StateBlock
var bulkBlk []*types.StateBlock
for {
blk, err = ss.qlcLedger.GetStateBlock(endHash)
if err != nil {
ss.logger.Errorf("err when get StateBlock:[%s]", endHash.String())
break
}
bulkBlk = append(bulkBlk, blk)
endHash = blk.GetPrevious()
if endHash == startHash {
break
}
}
for i := len(bulkBlk) - 1; i >= 0; i-- {
if !ss.netService.Node().streamManager.IsConnectWithPeerId(peerID) {
break
}
err = ss.netService.SendMessageToPeer(BulkPushBlock, bulkBlk[i], peerID)
if err != nil {
ss.logger.Errorf("err [%s] when send BulkPushBlock", err)
}
}
}
}
break
}
}
}
}
return nil
}
func getLocalFrontier(ledger *ledger.Ledger) ([]*types.Frontier, error) {
frontiers, err := ledger.GetFrontiers()
if err != nil {
return nil, err
}
fsBack := new(types.Frontier)
frontiers = append(frontiers, fsBack)
return frontiers, nil
}
func (ss *ServiceSync) onBulkPullRequest(message *Message) error {
pullRemote, err := protos.BulkPullReqPacketFromProto(message.Data())
if err != nil {
return err
}
ss.netService.node.logger.Debugf("receive BulkPullRequest, type %d start %s end %s count %d",
pullRemote.PullType, pullRemote.StartHash, pullRemote.EndHash, pullRemote.Count)
startHash := pullRemote.StartHash
endHash := pullRemote.EndHash
pullType := pullRemote.PullType
if pullType != protos.PullTypeSegment {
return ss.onBulkPullRequestExt(message, pullRemote)
}
if startHash.IsZero() {
var blk *types.StateBlock
var bulkBlk []*types.StateBlock
//ss.logger.Info("need to send all the blocks of this account")
for {
blk, err = ss.qlcLedger.GetStateBlock(endHash)
if err != nil {
ss.logger.Errorf("err when get StateBlock:[%s]", endHash.String())
break
}
bulkBlk = append(bulkBlk, blk)
endHash = blk.GetPrevious()
if endHash.IsZero() == true {
break
}
}
for i := len(bulkBlk) - 1; i >= 0; i-- {
if !ss.netService.Node().streamManager.IsConnectWithPeerId(message.MessageFrom()) {
break
}
err = ss.netService.SendMessageToPeer(BulkPullRsp, bulkBlk[i], message.MessageFrom())
if err != nil {
ss.logger.Errorf("err [%s] when send BulkPullRsp", err)
}
}
} else {
var blk *types.StateBlock
var bulkBlk []*types.StateBlock
//ss.logger.Info("need to send some blocks of this account")
for {
blk, err = ss.qlcLedger.GetStateBlock(endHash)
if err != nil {
ss.logger.Errorf("err when get StateBlock:[%s]", endHash.String())
break
}
bulkBlk = append(bulkBlk, blk)
endHash = blk.GetPrevious()
if endHash == startHash {
break
}
}
for i := len(bulkBlk) - 1; i >= 0; i-- {
if !ss.netService.Node().streamManager.IsConnectWithPeerId(message.MessageFrom()) {
break
}
err = ss.netService.SendMessageToPeer(BulkPullRsp, bulkBlk[i], message.MessageFrom())
if err != nil {
ss.logger.Errorf("err [%s] when send BulkPullRsp", err)
}
}
}
return nil
}
func (ss *ServiceSync) onBulkPullRequestExt(message *Message, pullRemote *protos.BulkPullReqPacket) error {
var err error
var blk *types.StateBlock
var bulkBlk []*types.StateBlock
pullType := pullRemote.PullType
blkCnt := pullRemote.Count
if pullType == protos.PullTypeBackward {
scanHash := pullRemote.EndHash
ss.logger.Debugf("need to send %d blocks by backward", blkCnt)
for {
blk, err = ss.qlcLedger.GetStateBlock(scanHash)
if err != nil {
break
}
bulkBlk = append(bulkBlk, blk)
blkCnt--
if blkCnt <= 0 {
break
}
scanHash = blk.GetPrevious()
}
} else if pullType == protos.PullTypeForward {
startHash := pullRemote.StartHash
if blkCnt == 0 {
blkCnt = 1000
}
ss.logger.Debugf("need to send %d blocks by forward", blkCnt)
blk, err = ss.qlcLedger.GetStateBlock(startHash)
if err != nil {
return err
}
tm, err := ss.qlcLedger.GetTokenMeta(blk.GetAddress(), blk.GetToken())
if err != nil {
return err
}
scanHash := tm.Header
for {
if scanHash.IsZero() {
break
}
blk, err = ss.qlcLedger.GetStateBlock(scanHash)
if err != nil {
break
}
bulkBlk = append(bulkBlk, blk)
if startHash == scanHash {
break
}
scanHash = blk.GetPrevious()
}
if uint32(len(bulkBlk)) > blkCnt {
bulkBlk = bulkBlk[:blkCnt]
}
} else if pullType == protos.PullTypeBatch {
ss.logger.Debugf("need to send %d blocks by batch", blkCnt)
for _, scanHash := range pullRemote.Hashes {
if scanHash == nil {
continue
}
blk, err = ss.qlcLedger.GetStateBlock(*scanHash)
if err != nil {
continue
}
bulkBlk = append(bulkBlk, blk)
}
}
for i := len(bulkBlk) - 1; i >= 0; i-- {
if !ss.netService.Node().streamManager.IsConnectWithPeerId(message.MessageFrom()) {
break
}
err = ss.netService.SendMessageToPeer(BulkPullRsp, bulkBlk[i], message.MessageFrom())
if err != nil {
ss.logger.Errorf("err [%s] when send BulkPullRsp", err)
}
}
return nil
}
func (ss *ServiceSync) onBulkPullRsp(message *Message) error {
blkPacket, err := protos.BulkPushBlockFromProto(message.Data())
if err != nil {
return err
}
block := blkPacket.Blk
if block == nil {
return nil
}
ss.netService.node.logger.Debugf("receive BulkPullRsp, hash %s", block.GetHash())
if ss.netService.node.cfg.PerformanceEnabled {
hash := block.GetHash()
ss.netService.msgService.addPerformanceTime(hash)
}
ss.netService.msgEvent.Publish(common.EventSyncBlock, block)
return nil
}
func (ss *ServiceSync) onBulkPushBlock(message *Message) error {
ss.netService.node.logger.Debug("receive BulkPushBlock")
blkPacket, err := protos.BulkPushBlockFromProto(message.Data())
if err != nil {
return err
}
block := blkPacket.Blk
if ss.netService.node.cfg.PerformanceEnabled {
hash := block.GetHash()
ss.netService.msgService.addPerformanceTime(hash)
}
ss.netService.msgEvent.Publish(common.EventSyncBlock, block)
return nil
}
func (ss *ServiceSync) next() {
if len(ss.frontiers) > 0 {
openBlockHash = ss.frontiers[0].OpenBlock
headerBlockHash = ss.frontiers[0].HeaderBlock
ss.frontiers = ss.frontiers[1:]
}
}
|
package main
import (
"bytes"
"embed"
"github.com/Masterminds/sprig"
"go/format"
"text/template"
)
//go:embed *.tpl
var templateFiles embed.FS
var templates *template.Template
func Templates() (*template.Template, error) {
if templates == nil {
//sub, err := fs.Sub(templateFiles, "template")
//if err != nil {
// return nil, err
//}
tpls, err := template.New("base").Funcs(sprig.HermeticTxtFuncMap()).ParseFS(templateFiles, "*.tpl")
if err != nil {
return nil, err
}
templates = tpls
}
return templates, nil
}
func RenderGoTemplate(name string, data interface{}) ([]byte, error) {
var generated bytes.Buffer
tpl, err := Templates()
if err != nil {
return nil, err
}
err = tpl.ExecuteTemplate(&generated, name, data)
if err != nil {
return nil, err
}
formatted, err := format.Source(generated.Bytes())
if err != nil {
return generated.Bytes(), err
}
return formatted, nil
} |
package users
import (
"io"
"io/ioutil"
"log"
"github.com/google/uuid"
"github.com/pkg/errors"
. "2019_2_IBAT/pkg/pkg/models"
)
func (h *UserService) CreateEmployer(body io.ReadCloser) (uuid.UUID, error) {
bytes, err := ioutil.ReadAll(body)
defer body.Close()
if err != nil {
log.Printf("error while reading body: %s", err)
return uuid.UUID{}, errors.New(BadRequestMsg)
}
var newEmployerReg Employer
err = newEmployerReg.UnmarshalJSON(bytes)
if err != nil {
log.Printf("Error while unmarshaling: %s", err)
return uuid.UUID{}, errors.New(InvalidJSONMsg)
}
id := uuid.New()
newEmployerReg.ID = id
newEmployerReg.PathToImg = DefaultImg
ok := h.Storage.CreateEmployer(newEmployerReg)
if !ok {
log.Printf("Error while creating employer: %s", err)
return uuid.UUID{}, errors.New(EmailExistsMsg)
}
return id, nil
}
func (h *UserService) PutEmployer(body io.ReadCloser, id uuid.UUID) error {
bytes, err := ioutil.ReadAll(body)
if err != nil {
log.Printf("error while reading body: %s", err)
return errors.Wrap(err, BadRequestMsg)
}
var newEmployerReg EmployerReg
err = newEmployerReg.UnmarshalJSON(bytes)
if err != nil {
log.Printf("Error while unmarshaling: %s", err)
return errors.New(InvalidJSONMsg)
}
ok := h.Storage.PutEmployer(newEmployerReg, id)
if !ok {
log.Printf("Error while creating employer")
return errors.New(BadRequestMsg)
}
return nil
}
func (h *UserService) GetEmployer(id uuid.UUID) (Employer, error) {
log.Println("GetEmployer Service Start")
return h.Storage.GetEmployer(id)
}
func (h *UserService) GetEmployers(params map[string]interface{}) ([]Employer, error) {
return h.Storage.GetEmployers(params)
}
|
package fin_test
import (
"io"
"net/http"
"testing"
"github.com/xsymphony/fin"
)
func TestNewRouter(t *testing.T) {
r := fin.New()
r.ANY("/hello", func(ctx *fin.Context) {
ctx.WriteString("hello")
})
go func() {
r.Run(":8080")
}()
resp, err := http.Get("http://127.0.0.1:8080/hello")
if err != nil {
t.Fatalf("fetch fin server fail with %s", err)
}
defer resp.Body.Close()
payload := make([]byte, 5)
if _, err := resp.Body.Read(payload); err != io.EOF && err != nil {
t.Fatalf("read body fail with %s", err)
}
if string(payload) != "hello" {
t.Fatalf("server response body is not excepted %s", string(payload))
}
}
|
package app
import (
"glsamaker/pkg/app/handler/authentication/totp"
"glsamaker/pkg/config"
"glsamaker/pkg/database/connection"
"glsamaker/pkg/logger"
"glsamaker/pkg/models/users"
)
func defaultAdminPermissions() users.Permissions {
return users.Permissions{
Glsa: users.GlsaPermissions{
View: true,
UpdateBugs: true,
Comment: true,
Create: true,
Edit: true,
Approve: true,
ApproveOwnGlsa: true,
Decline: true,
Delete: true,
Release: true,
Confidential: true,
},
CVETool: users.CVEToolPermissions{
View: true,
UpdateCVEs: true,
Comment: true,
AddCVE: true,
AddPackage: true,
ChangeState: true,
AssignBug: true,
CreateBug: true,
},
Admin: users.AdminPermissions{
View: true,
CreateTemplates: true,
ManageUsers: true,
GlobalSettings: true,
},
}
}
func CreateDefaultAdmin() {
token, qrcode := totp.Generate(config.AdminEmail())
badge := users.Badge{
Name: "admin",
Description: "Admin Account",
Color: "orange",
}
passwordParameters := users.Argon2Parameters{
Type: "argon2id",
Time: 1,
Memory: 64 * 1024,
Threads: 4,
KeyLen: 32,
}
passwordParameters.GenerateSalt(32)
passwordParameters.GeneratePassword(config.AdminInitialPassword())
defaultUser := &users.User{
Email: config.AdminEmail(),
Password: passwordParameters,
Nick: "admin",
Name: "Admin Account",
Role: "admin",
ForcePasswordChange: false,
TOTPSecret: token,
TOTPQRCode: qrcode,
IsUsingTOTP: false,
WebauthnCredentials: nil,
IsUsingWebAuthn: false,
Show2FANotice: true,
Badge: badge,
Disabled: false,
ForcePasswordRotation: false,
Force2FA: false,
Permissions: defaultAdminPermissions(),
}
_, err := connection.DB.Model(defaultUser).OnConflict("(email) DO Nothing").Insert()
if err != nil {
logger.Error.Println("Err during creating default admin user")
logger.Error.Println(err)
}
}
|
package main
func minSumSubArray(nums []int) int {
const IntMax = int(^uint(0) >> 1)
const IntMin = -int(^uint(0)>>1) - 1
MinInt := func(args ...int) int {
if len(args) == 0 {
return IntMin
}
r := IntMax
for _, e := range args {
if e < r {
r = e
}
}
return r
}
n := len(nums)
if n == 0 {
return IntMax
} else if n == 1 {
return nums[0]
}
m := n / 2
minLeft := minSumSubArray(nums[:m])
minRight := minSumSubArray(nums[m:])
sumCrossLeft := nums[m-1]
minCrossLeft := sumCrossLeft
for i := m - 2; i >= 0; i-- {
sumCrossLeft += nums[i]
minCrossLeft = MinInt(minCrossLeft, sumCrossLeft)
}
sumCrossRight := nums[m]
minCrossRight := sumCrossRight
for i := m + 1; i < n; i++ {
sumCrossRight += nums[i]
minCrossRight = MinInt(minCrossRight, sumCrossRight)
}
return MinInt(minLeft, minRight, minCrossLeft+minCrossRight)
}
|
package eventmanager
import (
"context"
cluster "github.com/bsm/sarama-cluster"
"github.com/lovoo/goka"
"github.com/lovoo/goka/kafka"
"log"
"microservices_template_golang/payment_processing/src/models"
"microservices_template_golang/payment_processing/src/utils"
"os"
"os/signal"
"syscall"
)
var storageTopic goka.Stream = "payment-storage"
type EventProcessor struct {
processor *goka.Processor
}
func (e *EventProcessor) InitSimpleProcessor(brokers []string, group goka.Group, groupCallback func(ctx goka.Context, msg interface{}),
topic goka.Stream, pc, cc *cluster.Config) {
p, err := goka.NewProcessor(brokers,
goka.DefineGroup(group,
goka.Input(topic, new(models.PaymentCodec), groupCallback),
),
goka.WithProducerBuilder(kafka.ProducerBuilderWithConfig(pc)), // our config, mostly default
goka.WithConsumerBuilder(kafka.ConsumerBuilderWithConfig(cc)), // our config, mostly default
)
if err != nil {
log.Fatalf("error creating processor: %v", err)
}
e.processor = p
}
func (e *EventProcessor) InitDefaultProcessor(brokers []string, group goka.Group, topic goka.Stream) {
pc := NewConfig()
cc := NewConfig()
emitter := NewAppEmitter(brokers, storageTopic, new(models.ProcessedPaymentCodec), pc)
cb := func(ctx goka.Context, msg interface{}) {
payment, ok := msg.(*models.Payment)
if !ok {
log.Println("Error while parsing message to the structure")
}
log.Printf("Payment from %v was just processed", payment.Author)
processedPayment := &models.ProcessedPayment{utils.GenShortUUID(), *payment}
err := emitter.EmitSync(processedPayment.Author, processedPayment)
if err != nil {
log.Fatalf("error emitting message: %v", err)
}
}
e.InitSimpleProcessor(brokers, group, cb, topic, pc, cc)
}
func (e *EventProcessor) Run() {
ctx, cancel := context.WithCancel(context.Background())
done := make(chan bool)
go func() {
defer close(done)
if err := e.processor.Run(ctx); err != nil {
log.Fatalf("error running processor: %v", err)
}
}()
wait := make(chan os.Signal, 1)
signal.Notify(wait, syscall.SIGINT, syscall.SIGTERM)
<-wait // wait for SIGINT/SIGTERM
cancel() // gracefully stop processor
<-done
}
|
package main
import "fmt"
func main() {
printEveryThirdInRange(10, 35)
}
func printEveryThirdInRange(n int, m int) {
for i := n ; i <= m ; i += 3 {
fmt.Println(i)
}
} |
package main
// 3. 无重复字符的最长子串
// 来源:力扣(LeetCode)
// 链接:https://leetcode-cn.com/problems/longest-substring-without-repeating-characters
/* 题目描述
给定一个字符串,请你找出其中不含有重复字符的 最长子串 的长度。
示例 1:
输入: "abcabcbb"
输出: 3
解释: 因为无重复字符的最长子串是 "abc",所以其长度为 3。
示例 2:
输入: "bbbbb"
输出: 1
解释: 因为无重复字符的最长子串是 "b",所以其长度为 1。
示例 3:
输入: "pwwkew"
输出: 3
解释: 因为无重复字符的最长子串是 "wke",所以其长度为 3。
请注意,你的答案必须是 子串 的长度,"pwke" 是一个子序列,不是子串。
*/
/* 解题思路1
此题为经典的窗口滑动中的非定长算法。
解题思路为抽象符合条件的字符串在窗口内,依次遍历字符串,当前位置字符串如果包含在窗口中,窗口中的开始位置更新为重复位置的下一位 最后得到最长非重复字符串的值 代码如下:
作者:colas
链接:https://leetcode-cn.com/problems/longest-substring-without-repeating-characters/solution/gosliding-window-algorithm-zhi-fei-ding-chang-by-c/
来源:力扣(LeetCode)
*/
func lengthOfLongestSubstring_1(s string) int {
// 定义游标尺寸大小,游标的左边位置
window,start := 0,0
// 循环字符串
for key := 0; key < len(s); key++ {
// 查看当前字符串string(s[key])在滑动窗口所在游标(string(s[start:key]))内的索引,不存在isExist = -1
isExist := strings.Index(string(s[start:key]), string(s[key]));
// 如果不存在游标内部,滑动窗口长度重新计算并赋值当前字符索引 - 窗口其实字符索引 + 1
if (isExist == -1) {
if (key - start + 1 > window) {
window = key - start + 1
}
} else { //存在,游标开始位置更换为重复字符串位置的下一个位置
start = start + 1 + isExist
}
}
return window
}
/* 解题思路2
利用location保存字符上次出现的序列号,可以避免了查询工作。location和Two Sum中的m是一样的作用。
利用s[left:i+1]来表示s[:i+1]中的包含s[i]的最长子字符串。 location[s[i]]是字符s[i]在s[:i+1]中倒数第二次出现的序列号。 当left < location[s[i]]的时候,说明字符s[i]出现了两次。需要设置 left = location[s[i]] + 1, 保证字符s[i]只出现一次。
总结
// m 负责保存map[整数]整数的序列号
m := make(map[int]int, len(nums))
*/
func lengthOfLongestSubstring_2(s string) int {
// location[s[i]] == j 表示:
// s中第i个字符串,上次出现在s的j位置,所以,在s[j+1:i]中没有s[i]
// location[s[i]] == -1 表示: s[i] 在s中第一次出现
location := [256]int{} // 只有256长是因为,假定输入的字符串只有ASCII字符
for i := range location {
location[i] = -1 // 先设置所有的字符都没有见过
}
maxLen, left := 0, 0
for i := 0; i < len(s); i++ {
// 说明s[i]已经在s[left:i+1]中重复了
// 并且s[i]上次出现的位置在location[s[i]]
if location[s[i]] >= left {
left = location[s[i]] + 1 // 在s[left:i+1]中去除s[i]字符及其之前的部分
} else if i+1-left > maxLen {
// fmt.Println(s[left:i+1])
maxLen = i + 1 - left
}
location[s[i]] = i
}
return maxLen
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.