text stringlengths 11 4.05M |
|---|
package service
import (
"context"
"github.com/arabian9ts/geekfes/domain/model"
"github.com/arabian9ts/geekfes/domain/repository"
)
type Service interface {
DatasetService
SeriesService
SeasonService
EpisodeService
}
type DatasetService interface {
GetDataset(ctx context.Context, seriesID string) (model.Dataset, error)
}
type SeriesService interface {
GetSeries(ctx context.Context, id string) (model.Series, error)
ListSeries(ctx context.Context, limit int, since string) ([]model.Series, error)
}
type SeasonService interface {
ListSeasons(ctx context.Context, seriesID string) ([]model.Season, error)
}
type EpisodeService interface {
GetNextEpisode(ctx context.Context, epID string) (model.Episode, error)
GetPrevEpisode(ctx context.Context, epID string) (model.Episode, error)
ListEpisodes(ctx context.Context, seasonID string) ([]model.Episode, error)
}
var _ Service = &service{}
type service struct {
series repository.SeriesRepository
season repository.SeasonRepository
episode repository.EpisodeRepository
}
func New(
series repository.SeriesRepository,
season repository.SeasonRepository,
episode repository.EpisodeRepository,
) *service {
return &service{
series: series,
season: season,
episode: episode,
}
}
|
package handler
import (
"fmt"
"strings"
"github.com/gin-gonic/gin"
"github.com/sirupsen/logrus"
)
const (
aliasHeader = "X-Original-URI"
realIpHeader = "X-Real-IP"
forwardedHeader = "X-Forwarded-For"
UriHeader = "Requested-Uri"
)
func (h *Handler) auth(c *gin.Context) {
channelAllias := c.GetHeader(aliasHeader)
if channelAllias == "" {
channelAllias = "hz"
}
userIp := c.GetHeader(realIpHeader)
if userIp == "" {
userIp = c.GetHeader(forwardedHeader)
}
Uri := c.GetHeader(UriHeader)
var response int
authQueryType, err := detectAuthQueryType(Uri)
if err != nil {
response = 403
}
// on timeshift we check field user.cacheItem.Arh and if channel code is availabe
if authQueryType == "timeshift" {
response, _ = h.services.GetResponseCodeChannel(userIp, channelAllias, true)
}
// on streaming we check only if channel code is available
if authQueryType == "streaming" {
response, _ = h.services.GetResponseCodeChannel(userIp, channelAllias, false)
}
if authQueryType == "playlist/program" {
response, _ = h.services.GetResponseCodeArchive(userIp)
}
if response == 403 {
logrus.Errorf("response 403 for ip %v uri %v", userIp, Uri)
}
c.Status(response)
}
func detectAuthQueryType(uri string) (string, error) {
splitted := strings.Split(uri, "/")
for i, val := range splitted {
if val == "timeshift" {
return "timeshift", nil
}
if val == "playlist" && i < len(splitted)-1 && splitted[i+1] == "program" {
return "playlist/program", nil
}
if val == "streaming" {
return "streaming", nil
}
}
return "", fmt.Errorf("unexpected auth query type for %v", uri)
}
|
package base
import (
"context"
"encoding/json"
"strings"
"sync"
"github.com/rs/zerolog/log"
"github.com/elastic/go-elasticsearch/v7"
"github.com/elastic/go-elasticsearch/v7/esapi"
)
func es1(es *elasticsearch.Client) {
res, err := es.Index(
"test", // Index name
strings.NewReader(`{"title" : "Test"}`), // Document body
es.Index.WithDocumentID("1"), // Document ID
es.Index.WithRefresh("true"), // Refresh
)
if err != nil {
log.Fatal().Msgf("ERROR: %s", err)
}
defer res.Body.Close()
log.Print(res)
}
func testElasticsearch() {
es, _ := elasticsearch.NewDefaultClient()
info, err := es.Info()
if err != nil {
log.Error().Msg(err.Error())
return
}
log.Print(info)
r := map[string]interface{}{}
if err := json.NewDecoder(info.Body).Decode(&r); err != nil {
log.Error().Msg(err.Error())
}
var wg sync.WaitGroup
for k, value := range r {
log.Debug().Msg(k)
wg.Add(1)
go func(k string, value interface{}) {
defer wg.Done()
v1, ok := value.(string)
if ok {
log.Debug().Str("key", k).Msg(v1)
}
req := esapi.IndexRequest{
Index: "test",
}
res, err := req.Do(context.Background(), es)
if err != nil {
//log.Fatalf("Error getting response: %s", err)
log.Error().Msg(err.Error())
}
defer res.Body.Close()
if res.IsError() {
log.Print(114, res.Status())
return
}
log.Print(res.Body)
}(k, value)
}
wg.Wait()
es1(es)
b, err := json.Marshal(r)
log.Debug().Msg(string(b))
}
|
/* Project Euler - Problems
Main function to execute completed problems. When a problem is done,
add its case here.
*/
package euler
// Return the result of the specified problem
func Problems(num int) string {
switch num {
case 6:
return Euler006()
case 13:
return Euler013()
case 14:
return Euler014()
case 15:
return Euler015()
case 16:
return Euler016()
case 20:
return Euler020()
case 48:
return Euler048()
default:
return "So unsolved, go back to school."
}
return "Returned?"
}
|
//go:build tools
// +build tools
package ledger
// import _ "github.com/99designs/gqlgen"
|
package column
import (
"fmt"
"io"
"github.com/vahid-sohrabloo/chconn/v2/internal/helper"
"github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter"
)
type stringPos struct {
start int
end int
}
// StringBase is a column of String ClickHouse data type with generic type
type StringBase[T ~string] struct {
column
numRow int
writerData []byte
vals []byte
pos []stringPos
}
// NewString is a column of String ClickHouse data type with generic type
func NewStringBase[T ~string]() *StringBase[T] {
return &StringBase[T]{}
}
// Data get all the data in current block as a slice.
func (c *StringBase[T]) Data() []T {
val := make([]T, len(c.pos))
for i, v := range c.pos {
val[i] = T(c.vals[v.start:v.end])
}
return val
}
// Data get all the data in current block as a slice of []byte.
func (c *StringBase[T]) DataBytes() [][]byte {
return c.ReadBytes(nil)
}
// Read reads all the data in current block and append to the input.
func (c *StringBase[T]) Read(value []T) []T {
if cap(value)-len(value) >= len(c.pos) {
value = (value)[:len(value)+len(c.pos)]
} else {
value = append(value, make([]T, len(c.pos))...)
}
val := (value)[len(value)-len(c.pos):]
for i, v := range c.pos {
val[i] = T(c.vals[v.start:v.end])
}
return value
}
// Read reads all the data as `[]byte` in current block and append to the input.
//
// data is valid only in the current block.
func (c *StringBase[T]) ReadBytes(value [][]byte) [][]byte {
if cap(value)-len(value) >= len(c.pos) {
value = (value)[:len(value)+len(c.pos)]
} else {
value = append(value, make([][]byte, len(c.pos))...)
}
val := (value)[len(value)-len(c.pos):]
for i, v := range c.pos {
val[i] = c.vals[v.start:v.end]
}
return value
}
// Row return the value of given row.
//
// NOTE: Row number start from zero
func (c *StringBase[T]) Row(row int) T {
return T(c.RowBytes(row))
}
// Row return the value of given row.
//
// Data is valid only in the current block.
func (c *StringBase[T]) RowBytes(row int) []byte {
pos := c.pos[row]
return c.vals[pos.start:pos.end]
}
func (c *StringBase[T]) Each(f func(i int, b []byte) bool) {
for i, p := range c.pos {
if !f(i, c.vals[p.start:p.end]) {
return
}
}
}
func (c *StringBase[T]) appendLen(x int) {
i := 0
for x >= 0x80 {
c.writerData = append(c.writerData, byte(x)|0x80)
x >>= 7
i++
}
c.writerData = append(c.writerData, byte(x))
}
// Append value for insert
func (c *StringBase[T]) Append(v ...T) {
for _, v := range v {
c.appendLen(len(v))
c.writerData = append(c.writerData, v...)
}
c.numRow += len(v)
}
// AppendBytes value of bytes for insert
func (c *StringBase[T]) AppendBytes(v ...[]byte) {
for _, v := range v {
c.appendLen(len(v))
c.writerData = append(c.writerData, v...)
}
c.numRow += len(v)
}
// NumRow return number of row for this block
func (c *StringBase[T]) NumRow() int {
return c.numRow
}
// Array return a Array type for this column
func (c *StringBase[T]) Array() *Array[T] {
return NewArray[T](c)
}
// Nullable return a nullable type for this column
func (c *StringBase[T]) Nullable() *Nullable[T] {
return NewNullable[T](c)
}
// LC return a low cardinality type for this column
func (c *StringBase[T]) LC() *LowCardinality[T] {
return NewLC[T](c)
}
// LowCardinality return a low cardinality type for this column
func (c *StringBase[T]) LowCardinality() *LowCardinality[T] {
return NewLC[T](c)
}
// Reset all status and buffer data
//
// Reading data does not require a reset after each read. The reset will be triggered automatically.
//
// However, writing data requires a reset after each write.
func (c *StringBase[T]) Reset() {
c.numRow = 0
c.vals = c.vals[:0]
c.pos = c.pos[:0]
c.writerData = c.writerData[:0]
}
// SetWriteBufferSize set write buffer (number of bytes)
// this buffer only used for writing.
// By setting this buffer, you will avoid allocating the memory several times.
func (c *StringBase[T]) SetWriteBufferSize(b int) {
if cap(c.writerData) < b {
c.writerData = make([]byte, 0, b)
}
}
// ReadRaw read raw data from the reader. it runs automatically when you call `ReadColumns()`
func (c *StringBase[T]) ReadRaw(num int, r *readerwriter.Reader) error {
c.Reset()
c.r = r
c.numRow = num
var p stringPos
for i := 0; i < num; i++ {
l, err := c.r.Uvarint()
if err != nil {
return fmt.Errorf("error read string len: %w", err)
}
p.start = p.end
p.end += int(l)
c.vals = append(c.vals, make([]byte, l)...)
if _, err := c.r.Read(c.vals[p.start:p.end]); err != nil {
return fmt.Errorf("error read string: %w", err)
}
c.pos = append(c.pos, p)
}
return nil
}
// HeaderReader reads header data from read
// it uses internally
func (c *StringBase[T]) HeaderReader(r *readerwriter.Reader, readColumn bool, revision uint64) error {
c.r = r
return c.readColumn(readColumn, revision)
}
func (c *StringBase[T]) Validate() error {
chType := helper.FilterSimpleAggregate(c.chType)
if !helper.IsString(chType) {
return ErrInvalidType{
column: c,
}
}
return nil
}
func (c *StringBase[T]) ColumnType() string {
return helper.StringStr
}
// WriteTo write data to ClickHouse.
// it uses internally
func (c *StringBase[T]) WriteTo(w io.Writer) (int64, error) {
nw, err := w.Write(c.writerData)
return int64(nw), err
}
// HeaderWriter writes header data to writer
// it uses internally
func (c *StringBase[T]) HeaderWriter(w *readerwriter.Writer) {
}
func (c *StringBase[T]) appendEmpty() {
var emptyValue T
c.Append(emptyValue)
}
func (c *StringBase[T]) Elem(arrayLevel int, nullable, lc bool) ColumnBasic {
if nullable {
return c.Nullable().elem(arrayLevel, lc)
}
if lc {
return c.LowCardinality().elem(arrayLevel)
}
if arrayLevel > 0 {
return c.Array().elem(arrayLevel - 1)
}
return c
}
|
package main
import (
"context"
"fmt"
"log"
sms "github.com/klaus01/GoMicro_LBSServer/api/sms/proto"
smscode "github.com/klaus01/GoMicro_LBSServer/srv/smscode/proto"
yuntongxun "github.com/klaus01/GoMicro_LBSServer/srv/yuntongxun/proto"
"github.com/klaus01/GoMicro_LBSServer/utils"
"github.com/micro/go-micro/v2"
"github.com/micro/go-micro/v2/client"
"github.com/micro/go-micro/v2/errors"
)
const gServiceName = "go.micro.api.sms"
// Sms api
type Sms struct {
client client.Client
}
// SendVerificationCode 发送验证码
func (s *Sms) SendVerificationCode(context context.Context, req *sms.Request, rep *sms.Response) error {
const method string = "sendVerificationCode"
const id string = gServiceName + "." + method
ctx, tr := utils.CreateTracing(context, gServiceName, method)
defer tr.Finish()
if len(req.PhoneNumber) <= 0 {
return errors.BadRequest(id, "缺少手机号")
}
if len(req.Time) <= 0 {
return errors.BadRequest(id, "缺少参数 time")
}
if len(req.Sign) <= 0 {
return errors.BadRequest(id, "缺少参数 sign")
}
sig := fmt.Sprintf("SMS%sCODE%sS", req.PhoneNumber, req.Time)
if utils.Md5(sig) != req.Sign {
return errors.BadRequest(id, "sign 错误")
}
smscodeClient := smscode.NewSmscodeService("go.micro.srv.smscode", s.client)
cvcRep, err := smscodeClient.CreateVerificationCode(ctx, &smscode.CreateVerificationCodeRequest{PhoneNumber: req.PhoneNumber})
if err != nil {
return err
}
yuntongxunClient := yuntongxun.NewYuntongxunService("go.micro.srv.yuntongxun", s.client)
if _, err := yuntongxunClient.SendVerificationCode(ctx, &yuntongxun.SendVerificationCodeRequest{PhoneNumber: req.PhoneNumber, Code: cvcRep.Code}); err != nil {
return err
}
return nil
}
func main() {
service := micro.NewService(micro.Name(gServiceName))
service.Init()
if err := sms.RegisterSmsHandler(service.Server(), &Sms{service.Client()}); err != nil {
log.Fatal(err)
}
if err := service.Run(); err != nil {
log.Fatal(err)
}
}
|
package decoder
import (
"bytes"
"compress/gzip"
"io"
)
// tmplsrc returns raw, uncompressed file data.
func tmplsrc() []byte {
gz, err := gzip.NewReader(bytes.NewBuffer([]byte{
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x00, 0xff, 0xbc, 0x57,
0xdf, 0x4f, 0xe3, 0x38, 0x10, 0x7e, 0x4e, 0xfe, 0x8a, 0x21, 0x12, 0x90,
0xb0, 0x28, 0x45, 0x77, 0xab, 0x7d, 0xe0, 0xd4, 0x07, 0x96, 0x63, 0x4f,
0x7b, 0xd7, 0x2d, 0x27, 0xa0, 0x4f, 0x68, 0x75, 0x72, 0x53, 0x17, 0x4c,
0x5b, 0x3b, 0x67, 0xbb, 0xdd, 0x45, 0xb9, 0xfe, 0xef, 0x37, 0x63, 0x37,
0xfd, 0x91, 0x92, 0x20, 0xa0, 0xbb, 0x2f, 0xc5, 0xb5, 0x67, 0xe6, 0xfb,
0x66, 0xbe, 0x31, 0x9e, 0xe6, 0x2c, 0x1b, 0xb1, 0x3b, 0x0e, 0x45, 0x91,
0x76, 0xd9, 0x84, 0xbb, 0x8f, 0xf9, 0x3c, 0x0c, 0xc5, 0x24, 0x57, 0xda,
0x42, 0x1c, 0x06, 0x11, 0xd7, 0x5a, 0x69, 0x13, 0xe1, 0x6a, 0x38, 0xb1,
0xf4, 0x47, 0x28, 0xfa, 0xbc, 0x13, 0xf6, 0x7e, 0xda, 0x4f, 0x33, 0x35,
0x69, 0xf5, 0xb9, 0xec, 0x3f, 0xa8, 0x7b, 0x69, 0x94, 0x6c, 0x4d, 0xf8,
0x1d, 0x7b, 0xa0, 0x85, 0xc9, 0x98, 0x94, 0x5c, 0x47, 0x61, 0x12, 0x86,
0x45, 0xa1, 0x99, 0x44, 0x14, 0xfb, 0x98, 0x73, 0x03, 0x29, 0x02, 0xd0,
0xaa, 0x02, 0xfa, 0xe7, 0xf5, 0x65, 0xf7, 0x77, 0x9e, 0xa9, 0x01, 0xd7,
0x60, 0xac, 0x9e, 0x66, 0x16, 0x8a, 0x30, 0x30, 0xb0, 0x08, 0x94, 0x5e,
0xfb, 0xbf, 0x21, 0xb2, 0x1b, 0x4e, 0x65, 0x06, 0x5d, 0xfe, 0xad, 0x36,
0x40, 0xac, 0x41, 0xa8, 0xf4, 0x8a, 0x33, 0x5c, 0x27, 0x70, 0x54, 0x0f,
0x84, 0x08, 0x9a, 0xdb, 0xa9, 0x96, 0x70, 0x50, 0x6b, 0x54, 0x98, 0xd3,
0x25, 0x09, 0x04, 0x5d, 0xf0, 0x88, 0x75, 0x32, 0x6f, 0xe6, 0x42, 0x86,
0x25, 0x9f, 0xad, 0x34, 0x76, 0xc3, 0x6a, 0xc5, 0x20, 0xe6, 0x0d, 0x01,
0x13, 0xf0, 0x8b, 0x38, 0xb7, 0x1a, 0x8e, 0x2a, 0x76, 0x09, 0x38, 0x85,
0x7d, 0xb1, 0x4f, 0xdb, 0xc0, 0x53, 0x13, 0x06, 0x62, 0x08, 0x56, 0x8d,
0x8e, 0xe9, 0x63, 0xc6, 0xc6, 0xc7, 0x64, 0x42, 0x67, 0xc6, 0xd1, 0x8f,
0x93, 0xdf, 0xdc, 0xc6, 0x5e, 0x1b, 0xa4, 0x18, 0x93, 0x63, 0x49, 0x17,
0x77, 0xc3, 0x60, 0x0e, 0x7c, 0x6c, 0x38, 0xf8, 0x10, 0xd0, 0x6e, 0x2f,
0x53, 0xbf, 0xe9, 0xf6, 0x3a, 0x1d, 0x67, 0x7e, 0x44, 0x44, 0x9c, 0xf7,
0xca, 0xd7, 0x7d, 0xd9, 0xf4, 0xdd, 0x5b, 0xf3, 0xed, 0x7c, 0xbc, 0x3a,
0x3b, 0xbf, 0x58, 0x07, 0xc3, 0x76, 0x4c, 0x2f, 0x88, 0xfa, 0x30, 0x8e,
0x7a, 0x92, 0x7f, 0xcf, 0x79, 0x66, 0xf9, 0x00, 0xf6, 0x0d, 0x30, 0x0b,
0xfb, 0x83, 0x53, 0x5c, 0x21, 0xcf, 0x72, 0xfb, 0xb0, 0x38, 0x8c, 0x8e,
0x57, 0xe1, 0xd4, 0x88, 0x4b, 0xca, 0x3f, 0x46, 0x9c, 0x04, 0xf7, 0xd3,
0xbf, 0x95, 0x89, 0x69, 0x61, 0xb5, 0x90, 0x77, 0xb1, 0xcf, 0x3b, 0x49,
0x90, 0x51, 0x18, 0x06, 0xad, 0x16, 0x9c, 0x6b, 0xce, 0x2c, 0x76, 0xf0,
0x3d, 0x07, 0xd5, 0x7f, 0xc0, 0x90, 0xc4, 0x51, 0x58, 0x18, 0x28, 0x6e,
0xe4, 0xa1, 0x45, 0x1c, 0x61, 0x6c, 0xea, 0x0a, 0xe7, 0x93, 0x5b, 0xd5,
0x66, 0x91, 0x6c, 0x45, 0xca, 0x62, 0x4e, 0xb1, 0x83, 0x19, 0x95, 0x95,
0x2c, 0x3c, 0x4c, 0x47, 0xa9, 0x1c, 0xd4, 0x0c, 0x7b, 0x60, 0xc4, 0x1f,
0x5b, 0x48, 0x61, 0xca, 0x21, 0x67, 0x42, 0x1b, 0x0a, 0x2d, 0x07, 0xfc,
0x3b, 0x99, 0x9f, 0x84, 0xc1, 0xd0, 0x0b, 0x46, 0x2e, 0xd4, 0xe5, 0x20,
0x24, 0x39, 0xa0, 0x51, 0x30, 0x63, 0xce, 0x77, 0x91, 0x08, 0x6e, 0x34,
0xe9, 0x88, 0xc7, 0x48, 0xb8, 0xa2, 0xe5, 0x86, 0x98, 0x0d, 0x6a, 0x5e,
0xad, 0x14, 0xd9, 0xd0, 0xb0, 0xc1, 0xe5, 0xfc, 0xf2, 0xcb, 0x97, 0x33,
0xef, 0x41, 0xe5, 0x73, 0x09, 0xe1, 0xf9, 0x89, 0xdf, 0x7a, 0x46, 0x58,
0xfc, 0x57, 0x33, 0x61, 0x5e, 0xdb, 0x68, 0xa9, 0x18, 0xa5, 0x40, 0x75,
0x0c, 0x9e, 0x6c, 0xd9, 0x86, 0x8e, 0xad, 0xa4, 0xe9, 0x62, 0x90, 0xd6,
0xc1, 0x13, 0xbd, 0x77, 0x7d, 0x73, 0xf5, 0xb9, 0xfb, 0xc7, 0x46, 0xa6,
0x2f, 0x6e, 0x3e, 0x50, 0x7a, 0xa1, 0xc9, 0xab, 0xda, 0xb0, 0x2c, 0xaa,
0xe3, 0x40, 0xfa, 0xb6, 0x2b, 0x36, 0x25, 0xfd, 0xb5, 0x8e, 0xa0, 0x66,
0xcd, 0xd4, 0x58, 0xc9, 0x34, 0x7c, 0xba, 0x3c, 0x4d, 0x37, 0xba, 0xa9,
0x0b, 0xf6, 0x36, 0x24, 0xed, 0x5c, 0x76, 0xdf, 0x50, 0x1a, 0x47, 0xf0,
0x95, 0x25, 0xa1, 0x7c, 0xcd, 0x37, 0x61, 0xb3, 0x7b, 0xd7, 0xf2, 0x44,
0xa2, 0x7c, 0x6a, 0x86, 0x82, 0x8f, 0x07, 0xfe, 0xad, 0x09, 0x68, 0x17,
0x99, 0xa3, 0x89, 0xc4, 0xb8, 0xe5, 0x5e, 0xc6, 0xa8, 0x9c, 0xc5, 0x72,
0x17, 0xfe, 0x83, 0x1c, 0x11, 0xec, 0x10, 0xa2, 0xfd, 0x7f, 0xa3, 0xf9,
0xfc, 0xd4, 0x35, 0x89, 0xbb, 0x9f, 0x07, 0xb3, 0xb4, 0x28, 0x5c, 0xc4,
0x65, 0x00, 0x77, 0xe8, 0xc2, 0x0a, 0x83, 0x6e, 0x13, 0x61, 0xc5, 0x8c,
0xbb, 0x57, 0x6d, 0x11, 0x7e, 0x79, 0xea, 0x37, 0x21, 0x5a, 0x88, 0x5f,
0x9e, 0x96, 0x17, 0xcf, 0x69, 0x40, 0x8a, 0x5d, 0xfb, 0xfc, 0x66, 0x4f,
0xb5, 0xea, 0x56, 0xb7, 0x96, 0x4d, 0x4f, 0x28, 0x5c, 0x0e, 0x6a, 0x20,
0x31, 0x9b, 0x7a, 0xbc, 0xcf, 0xd2, 0xee, 0x1a, 0xec, 0xc3, 0xfb, 0x46,
0xb8, 0x0f, 0xef, 0x77, 0x0a, 0x38, 0x6d, 0x4c, 0xaf, 0x27, 0x76, 0x9c,
0xdf, 0xf4, 0x99, 0x04, 0x7b, 0x62, 0xe7, 0x19, 0x0e, 0xc7, 0x8a, 0xd9,
0x5f, 0x7f, 0xa9, 0xc7, 0xfc, 0xe4, 0x0d, 0x76, 0x0f, 0xda, 0x94, 0xe8,
0x27, 0x6f, 0xb0, 0x53, 0xd0, 0xbe, 0x52, 0xe3, 0x7a, 0xc4, 0x8f, 0x78,
0xfa, 0x26, 0xb8, 0xcd, 0xf5, 0x06, 0xf2, 0xd1, 0x12, 0x76, 0x85, 0xea,
0xc6, 0x39, 0x33, 0xed, 0x97, 0xf7, 0x79, 0x6b, 0x9a, 0x4b, 0xd2, 0xc5,
0x40, 0x55, 0x43, 0xaa, 0xca, 0xe9, 0x59, 0x16, 0xb7, 0x5f, 0xdf, 0x42,
0xe3, 0x4c, 0x6b, 0xf6, 0xf8, 0x6a, 0x2e, 0xab, 0xe5, 0x6a, 0xe5, 0x5f,
0x42, 0x7a, 0x9f, 0xdf, 0xbd, 0xf3, 0x33, 0xd0, 0xda, 0x0b, 0xff, 0xa2,
0x99, 0xd3, 0x73, 0x73, 0x83, 0xe7, 0xed, 0xd7, 0x97, 0x8c, 0x9e, 0xff,
0xbc, 0x65, 0xea, 0xdc, 0x9a, 0x1c, 0xff, 0xba, 0xb8, 0xa9, 0xb8, 0xe0,
0x8f, 0x1a, 0x1a, 0xe7, 0xe3, 0xe8, 0x62, 0xf9, 0x4a, 0xdf, 0x1e, 0x46,
0x8b, 0x89, 0xcf, 0x8c, 0x45, 0xc6, 0x09, 0x7b, 0xc2, 0x46, 0x3c, 0xde,
0x22, 0x7e, 0x0c, 0x27, 0x49, 0x75, 0x60, 0x13, 0x96, 0x4f, 0xea, 0xc6,
0xb4, 0x1f, 0x3b, 0x83, 0x95, 0xb9, 0x95, 0x93, 0xa6, 0x23, 0xff, 0x73,
0xa7, 0x32, 0x9c, 0x36, 0x18, 0x09, 0xfd, 0x83, 0xc7, 0x33, 0x7c, 0xee,
0xd3, 0x9e, 0x24, 0xe2, 0xf1, 0x5a, 0xb0, 0xc4, 0x35, 0x2b, 0x96, 0xdf,
0xbd, 0xd5, 0xdb, 0x63, 0xf6, 0xda, 0x95, 0xe2, 0xe5, 0xc5, 0x3d, 0x20,
0xfb, 0x67, 0xa7, 0x1e, 0x02, 0x74, 0x9d, 0xd0, 0x06, 0x96, 0xe7, 0x78,
0x39, 0x62, 0xf7, 0xf5, 0xd8, 0x89, 0x9d, 0x54, 0xee, 0xc8, 0x9c, 0x7e,
0xe8, 0xfa, 0x0b, 0xf4, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xda, 0xa5,
0x93, 0x69, 0x57, 0x0f, 0x00, 0x00,
}))
if err != nil {
panic("Decompression failed: " + err.Error())
}
var b bytes.Buffer
io.Copy(&b, gz)
gz.Close()
return b.Bytes()
}
|
package sinks
import (
"github.com/stretchr/testify/assert"
"io/ioutil"
"os"
"testing"
"time"
)
func TestFileSink_Dump_NoLimits(t *testing.T) {
sink := NewFileSink(FileSinkConfig{}, nil)
person1 := &person{
Id: "1",
Name: "Metuselah",
Age: 700,
}
person2 := &person{
Id: "2",
Name: "Noah",
Age: 88,
}
expected := `{"Id":"1","Name":"Metuselah","Age":700}
{"Id":"2","Name":"Noah","Age":88}
`
err := sink.Dump(person1, person2)
assert.Nil(t, err)
time.Sleep(100 * time.Millisecond)
bytes, err := ioutil.ReadFile(sink.file.Name())
assert.Nil(t, err)
assert.NotNil(t, bytes)
assert.EqualValues(t, expected, string(bytes))
assert.Nil(t, sink.Close())
if err := os.Remove(sink.file.Name()); err != nil {
panic(err)
}
}
|
package main
import "fmt"
func binary(arr []int, num int) int {
var k = len(arr) / 2
for i := 0; i < len(arr); {
switch {
case arr[i] == num:
{
return i
}
case arr[i] < num:
{
i += k
}
case arr[i] > num:
{
i -= k
}
}
if k > 1 {
k = k / 2
}
}
return -1
}
func main() {
var arr = make([]int, 100)
arr[99] = 20
fmt.Println(binary(arr, 20))
}
|
package main
import (
"flag"
"log"
"math/rand"
"github.com/gobridge-kr/bot-sample/internal"
"github.com/sbstjn/hanu"
)
const version = "0.0.1"
var tokenContainer = bot.NewContainer("")
func init() {
var token string
flag.StringVar(&token, "token", "", "Slack bot API token")
flag.Parse()
tokenContainer.Set(token)
tokenContainer.Freeze()
}
func main() {
token := tokenContainer.Get().(string)
if (token == "") {
log.Fatal("API token not provided.\nPlease try with option -h for usage")
}
slack, err := hanu.New(token)
if err != nil {
log.Fatal(err)
}
slack.Command("^안(녕|뇽|냥).*", func(conv hanu.ConversationInterface) {
user := conv.Message().User()
conv.Reply("안녕하세요, %s님!", user)
})
slack.Command("[야\\s]*<something:string>\\s+(말해줘?|해봐)", func(conv hanu.ConversationInterface) {
something, _ := conv.String("something")
conv.Reply(something)
})
slack.Command("^.+?$", func(conv hanu.ConversationInterface) {
yes := rand.Float64() < 0.5
if yes {
conv.Reply("네!")
} else {
conv.Reply("아니요!")
}
})
slack.Listen()
}
|
// Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package bench
import (
"context"
gosql "database/sql"
"fmt"
"net"
"net/url"
"reflect"
"runtime"
"strings"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
_ "github.com/go-sql-driver/mysql" // registers the MySQL driver to gosql
_ "github.com/lib/pq" // registers the pg driver to gosql
)
// BenchmarkFn is a function that runs a benchmark using the given SQLRunner.
type BenchmarkFn func(b *testing.B, db *sqlutils.SQLRunner)
func benchmarkCockroach(b *testing.B, f BenchmarkFn) {
s, db, _ := serverutils.StartServer(
b, base.TestServerArgs{UseDatabase: "bench"})
defer s.Stopper().Stop(context.TODO())
if _, err := db.Exec(`CREATE DATABASE bench`); err != nil {
b.Fatal(err)
}
f(b, sqlutils.MakeSQLRunner(db))
}
func benchmarkMultinodeCockroach(b *testing.B, f BenchmarkFn) {
tc := testcluster.StartTestCluster(b, 3,
base.TestClusterArgs{
ReplicationMode: base.ReplicationAuto,
ServerArgs: base.TestServerArgs{
UseDatabase: "bench",
},
})
if _, err := tc.Conns[0].Exec(`CREATE DATABASE bench`); err != nil {
b.Fatal(err)
}
defer tc.Stopper().Stop(context.TODO())
f(b, sqlutils.MakeRoundRobinSQLRunner(tc.Conns[0], tc.Conns[1], tc.Conns[2]))
}
func benchmarkPostgres(b *testing.B, f BenchmarkFn) {
// Note: the following uses SSL. To run this, make sure your local
// Postgres server has SSL enabled. To use Cockroach's checked-in
// testing certificates for Postgres' SSL, first determine the
// location of your Postgres server's configuration file:
// ```
// $ psql -h localhost -p 5432 -c 'SHOW config_file'
// config_file
// -----------------------------------------
// /usr/local/var/postgres/postgresql.conf
// (1 row)
//```
//
// Now open this file and set the following values:
// ```
// $ grep ^ssl /usr/local/var/postgres/postgresql.conf
// ssl = on # (change requires restart)
// ssl_cert_file = '$GOPATH/src/github.com/cockroachdb/cockroach/pkg/security/securitytest/test_certs/node.crt' # (change requires restart)
// ssl_key_file = '$GOPATH/src/github.com/cockroachdb/cockroach/pkg/security/securitytest/test_certs/node.key' # (change requires restart)
// ssl_ca_file = '$GOPATH/src/github.com/cockroachdb/cockroach/pkg/security/securitytest/test_certs/ca.crt' # (change requires restart)
// ```
// Where `$GOPATH/src/github.com/cockroachdb/cockroach`
// is replaced with your local Cockroach source directory.
// Be sure to restart Postgres for this to take effect.
pgURL := url.URL{
Scheme: "postgres",
Host: "localhost:5432",
RawQuery: "sslmode=require&dbname=postgres",
}
if conn, err := net.Dial("tcp", pgURL.Host); err != nil {
skip.IgnoreLintf(b, "unable to connect to postgres server on %s: %s", pgURL.Host, err)
} else {
conn.Close()
}
db, err := gosql.Open("postgres", pgURL.String())
if err != nil {
b.Fatal(err)
}
defer db.Close()
r := sqlutils.MakeSQLRunner(db)
r.Exec(b, `CREATE SCHEMA IF NOT EXISTS bench`)
f(b, r)
}
func benchmarkMySQL(b *testing.B, f BenchmarkFn) {
const addr = "localhost:3306"
if conn, err := net.Dial("tcp", addr); err != nil {
skip.IgnoreLintf(b, "unable to connect to mysql server on %s: %s", addr, err)
} else {
conn.Close()
}
db, err := gosql.Open("mysql", fmt.Sprintf("root@tcp(%s)/", addr))
if err != nil {
b.Fatal(err)
}
defer db.Close()
r := sqlutils.MakeSQLRunner(db)
r.Exec(b, `CREATE DATABASE IF NOT EXISTS bench`)
f(b, r)
}
// ForEachDB iterates the given benchmark over multiple database engines.
func ForEachDB(b *testing.B, fn BenchmarkFn) {
for _, dbFn := range []func(*testing.B, BenchmarkFn){
benchmarkCockroach,
benchmarkMultinodeCockroach,
benchmarkPostgres,
benchmarkMySQL,
} {
dbName := runtime.FuncForPC(reflect.ValueOf(dbFn).Pointer()).Name()
dbName = strings.TrimPrefix(dbName, "github.com/cockroachdb/cockroach/pkg/bench.benchmark")
b.Run(dbName, func(b *testing.B) {
dbFn(b, fn)
})
}
}
|
/**
* (C) Copyright IBM Corp. 2021.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models
import (
constants "github.com/IBM/appconfiguration-go-sdk/lib/internal/constants"
messages "github.com/IBM/appconfiguration-go-sdk/lib/internal/messages"
utils "github.com/IBM/appconfiguration-go-sdk/lib/internal/utils"
"sort"
"github.com/IBM/appconfiguration-go-sdk/lib/internal/utils/log"
)
// Feature : Feature struct
type Feature struct {
Name string `json:"name"`
FeatureID string `json:"feature_id"`
DataType string `json:"type"`
Format string `json:"format"`
EnabledValue interface{} `json:"enabled_value"`
DisabledValue interface{} `json:"disabled_value"`
SegmentRules []SegmentRule `json:"segment_rules"`
Enabled bool `json:"enabled"`
}
// GetFeatureName : Get Feature Name
func (f *Feature) GetFeatureName() string {
return f.Name
}
// GetDisabledValue : Get Disabled Value
func (f *Feature) GetDisabledValue() interface{} {
if f.Format == "YAML" {
return getTypeCastedValue(f.DisabledValue, f.GetFeatureDataType(), f.GetFeatureDataFormat())
}
return f.DisabledValue
}
// GetEnabledValue : Get Enabled Value
func (f *Feature) GetEnabledValue() interface{} {
if f.Format == "YAML" {
return getTypeCastedValue(f.EnabledValue, f.GetFeatureDataType(), f.GetFeatureDataFormat())
}
return f.EnabledValue
}
// GetFeatureID : Get Feature ID
func (f *Feature) GetFeatureID() string {
return f.FeatureID
}
// GetFeatureDataType : Get Feature Data Type
func (f *Feature) GetFeatureDataType() string {
return f.DataType
}
// GetFeatureDataFormat : Get Feature Data Format
func (f *Feature) GetFeatureDataFormat() string {
// Format will be empty string ("") for Boolean & Numeric feature flags
// If the Format is empty for a String type, we default it to TEXT
if f.Format == "" && f.DataType == "STRING" {
f.Format = "TEXT"
}
return f.Format
}
// IsEnabled : Is Enabled
func (f *Feature) IsEnabled() bool {
return f.Enabled
}
// GetSegmentRules : Get Segment Rules
func (f *Feature) GetSegmentRules() []SegmentRule {
return f.SegmentRules
}
// GetCurrentValue : Get Current Value
func (f *Feature) GetCurrentValue(entityID string, entityAttributes map[string]interface{}) interface{} {
log.Debug(messages.RetrievingFeature)
if len(entityID) <= 0 {
log.Error(messages.SetEntityObjectIDError)
return nil
}
if f.isFeatureValid() {
val := f.featureEvaluation(entityID, entityAttributes)
return getTypeCastedValue(val, f.GetFeatureDataType(), f.GetFeatureDataFormat())
}
return nil
}
func (f *Feature) isFeatureValid() bool {
return !(f.Name == "" || f.FeatureID == "" || f.DataType == "" || f.EnabledValue == nil || f.DisabledValue == nil)
}
func (f *Feature) featureEvaluation(entityID string, entityAttributes map[string]interface{}) interface{} {
var evaluatedSegmentID string = constants.DefaultSegmentID
defer func() {
utils.GetMeteringInstance().RecordEvaluation(f.GetFeatureID(), "", entityID, evaluatedSegmentID)
}()
if f.IsEnabled() {
log.Debug(messages.EvaluatingFeature)
defer utils.GracefullyHandleError()
if len(entityAttributes) < 0 {
log.Debug(f.GetEnabledValue())
return f.GetEnabledValue()
}
if len(f.GetSegmentRules()) > 0 {
var rulesMap map[int]SegmentRule
rulesMap = f.parseRules(f.GetSegmentRules())
// sort the map elements as per ascending order of keys
var keys []int
for k := range rulesMap {
keys = append(keys, k)
}
sort.Ints(keys)
// after sorting , pick up each map element as per keys order
for _, k := range keys {
segmentRule := rulesMap[k]
for _, rule := range segmentRule.GetRules() {
for _, segmentKey := range rule.Segments {
if f.evaluateSegment(string(segmentKey), entityAttributes) {
evaluatedSegmentID = segmentKey
if segmentRule.GetValue() == "$default" {
log.Debug(messages.FeatureValue)
log.Debug(f.GetEnabledValue())
return f.GetEnabledValue()
}
log.Debug(messages.FeatureValue)
log.Debug(segmentRule.GetValue())
return segmentRule.GetValue()
}
}
}
}
} else {
return f.GetEnabledValue()
}
return f.GetEnabledValue()
}
return f.GetDisabledValue()
}
func (f *Feature) parseRules(segmentRules []SegmentRule) map[int]SegmentRule {
log.Debug(messages.ParsingFeatureRules)
defer utils.GracefullyHandleError()
var rulesMap map[int]SegmentRule
rulesMap = make(map[int]SegmentRule)
for _, rule := range segmentRules {
rulesMap[rule.GetOrder()] = rule
}
log.Debug(rulesMap)
return rulesMap
}
func (f *Feature) evaluateSegment(segmentKey string, entityAttributes map[string]interface{}) bool {
log.Debug(messages.EvaluatingSegments)
segment, ok := GetCacheInstance().SegmentMap[segmentKey]
if ok {
return segment.EvaluateRule(entityAttributes)
}
return false
}
|
package Surrounded_Regions
func solve(board [][]byte) {
if len(board) == 0 {
return
}
visited := make([][]bool, len(board))
for i, rows := range board {
visited[i] = make([]bool, len(rows))
}
var mark func(row, col int)
mark = func(row, col int) {
if row < 0 || col < 0 || row >= len(board) || col >= len(board[row]) ||
visited[row][col] || board[row][col] == 'X' {
return
}
visited[row][col] = true
mark(row-1, col)
mark(row+1, col)
mark(row, col-1)
mark(row, col+1)
}
for i := 0; i < len(board); i++ {
mark(i, 0)
mark(i, len(board[0])-1)
}
for i := 1; i < len(board[0])-1; i++ {
mark(0, i)
mark(len(board)-1, i)
}
for row := 0; row < len(board); row++ {
for col := 0; col < len(board[row]); col++ {
if board[row][col] == 'X' || visited[row][col] {
continue
}
board[row][col] = 'X'
}
}
}
|
package wire
import (
"errors"
"strings"
"testing"
"github.com/moov-io/base"
"github.com/stretchr/testify/require"
)
// mockSenderDepositoryInstitution creates a SenderDepositoryInstitution
func mockSenderDepositoryInstitution() *SenderDepositoryInstitution {
sdi := NewSenderDepositoryInstitution()
sdi.SenderABANumber = "121042882"
sdi.SenderShortName = "Wells Fargo NA"
return sdi
}
// TestMockSenderDepositoryInstitution validates mockSenderDepositoryInstitution
func TestMockSenderDepositoryInstitution(t *testing.T) {
sdi := mockSenderDepositoryInstitution()
require.NoError(t, sdi.Validate(), "mockSenderDepositoryInstitution does not validate and will break other tests")
}
// TestSenderABANumberAlphaNumeric validates SenderDepositoryInstitution SenderABANumber is alphanumeric
func TestSenderABANumberAlphaNumeric(t *testing.T) {
rdi := mockSenderDepositoryInstitution()
rdi.SenderABANumber = "®"
err := rdi.Validate()
if !base.Match(err, ErrNonNumeric) {
t.Errorf("%T: %s", err, err)
}
}
// TestSenderShortNameAlphaNumeric validates SenderDepositoryInstitution SenderShortName is alphanumeric
func TestSenderShortNameAlphaNumeric(t *testing.T) {
rdi := mockSenderDepositoryInstitution()
rdi.SenderShortName = "®"
err := rdi.Validate()
require.EqualError(t, err, fieldError("SenderShortName", ErrNonAlphanumeric, rdi.SenderShortName).Error())
}
// TestSenderABANumberRequired validates SenderDepositoryInstitution SenderABANumber is required
func TestSenderABANumberRequired(t *testing.T) {
rdi := mockSenderDepositoryInstitution()
rdi.SenderABANumber = ""
err := rdi.Validate()
require.EqualError(t, err, fieldError("SenderABANumber", ErrFieldRequired, rdi.SenderABANumber).Error())
}
// TestParseSenderWrongLength parses a wrong Sender record length
func TestParseSenderWrongLength(t *testing.T) {
var line = "{3100}0012"
r := NewReader(strings.NewReader(line))
r.line = line
err := r.parseSenderDepositoryInstitution()
require.EqualError(t, err, r.parseError(fieldError("SenderABANumber", ErrValidLength)).Error())
}
// TestParseSenderReaderParseError parses a wrong Sender reader parse error
func TestParseSenderReaderParseError(t *testing.T) {
var line = "{3100}1210Z2882Wells Fargo NA "
r := NewReader(strings.NewReader(line))
r.line = line
err := r.parseSenderDepositoryInstitution()
require.EqualError(t, err, r.parseError(fieldError("SenderABANumber", ErrNonNumeric, "1210Z2882")).Error())
_, err = r.Read()
require.EqualError(t, err, r.parseError(fieldError("SenderABANumber", ErrNonNumeric, "1210Z2882")).Error())
}
// TestSenderDepositoryInstitutionTagError validates a SenderDepositoryInstitution tag
func TestSenderDepositoryInstitutionTagError(t *testing.T) {
sdi := mockSenderDepositoryInstitution()
sdi.tag = "{9999}"
require.EqualError(t, sdi.Validate(), fieldError("tag", ErrValidTagForType, sdi.tag).Error())
}
// TestStringSenderDepositoryInstitutionVariableLength parses using variable length
func TestStringSenderDepositoryInstitutionVariableLength(t *testing.T) {
var line = "{3100}1*A*"
r := NewReader(strings.NewReader(line))
r.line = line
err := r.parseSenderDepositoryInstitution()
require.Nil(t, err)
line = "{3100}1 A NNN"
r = NewReader(strings.NewReader(line))
r.line = line
err = r.parseSenderDepositoryInstitution()
require.ErrorContains(t, err, r.parseError(NewTagMaxLengthErr(errors.New(""))).Error())
line = "{3100}1*A***"
r = NewReader(strings.NewReader(line))
r.line = line
err = r.parseSenderDepositoryInstitution()
require.ErrorContains(t, err, r.parseError(NewTagMaxLengthErr(errors.New(""))).Error())
line = "{3100}1*A*"
r = NewReader(strings.NewReader(line))
r.line = line
err = r.parseSenderDepositoryInstitution()
require.Equal(t, err, nil)
}
// TestStringSenderDepositoryInstitutionOptions validates Format() formatted according to the FormatOptions
func TestStringSenderDepositoryInstitutionOptions(t *testing.T) {
var line = "{3100}1*A*"
r := NewReader(strings.NewReader(line))
r.line = line
err := r.parseSenderDepositoryInstitution()
require.Equal(t, err, nil)
record := r.currentFEDWireMessage.SenderDepositoryInstitution
require.Equal(t, record.String(), "{3100}1 A ")
require.Equal(t, record.Format(FormatOptions{VariableLengthFields: true}), "{3100}1*A*")
require.Equal(t, record.String(), record.Format(FormatOptions{VariableLengthFields: false}))
line = "{3100}1*"
r = NewReader(strings.NewReader(line))
r.line = line
err = r.parseSenderDepositoryInstitution()
require.Equal(t, err, nil)
record = r.currentFEDWireMessage.SenderDepositoryInstitution
require.Equal(t, record.String(), "{3100}1 ")
require.Equal(t, record.Format(FormatOptions{VariableLengthFields: true}), "{3100}1*")
require.Equal(t, record.String(), record.Format(FormatOptions{VariableLengthFields: false}))
line = "{3100}111111111*"
r = NewReader(strings.NewReader(line))
r.line = line
err = r.parseSenderDepositoryInstitution()
require.Equal(t, err, nil)
record = r.currentFEDWireMessage.SenderDepositoryInstitution
require.Equal(t, record.String(), "{3100}111111111 ")
require.Equal(t, record.Format(FormatOptions{VariableLengthFields: true}), "{3100}111111111")
require.Equal(t, record.String(), record.Format(FormatOptions{VariableLengthFields: false}))
}
|
// Package hex implements hexadecimal encoding and decoding.
package hex
import (
"encoding/hex"
"fmt"
"regexp"
"strings"
)
var (
pat = regexp.MustCompile("[0-9a-z]{8} ([0-9a-z ]+) ")
)
// Dump takes a byte slice and transforms it
func Dump(data []byte) string {
in := hex.Dump(data)
out := ""
matches := pat.FindAllStringSubmatch(in, -1)
for _, subs := range matches {
for i, sub := range subs {
if i == 0 {
continue
}
out += fmt.Sprintf("0x%s,\n", strings.ReplaceAll(strings.TrimSpace(strings.ReplaceAll(sub, " ", " ")), " ", ", 0x"))
}
}
return out
}
|
package api
import (
"context"
"fmt"
"log"
"net/http"
"github.com/edoardo849/bezos/pkg/order"
"github.com/gorilla/mux"
)
const (
// apiVersion is version of API is provided by server
apiVersion = "v1"
)
// New Creates a new handler
func New(os order.Service, r *mux.Router, stopChan chan struct{}) *Server {
return &Server{
orderService: os,
router: mux.NewRouter(),
stopChan: stopChan,
}
}
// Server is the server
type Server struct {
orderService order.Service
router *mux.Router
stopChan chan struct{}
http *http.Server
}
// Run runs the server
func (s *Server) ServeHTTP(http *http.Server) error {
// Initialize routes
s.http = http
// http://zabana.me/notes/enable-cors-in-go-api.html
s.registerHandlers()
s.http.Handler = s.router
log.Printf("Server listening on %s\n", s.http.Addr)
go func() {
<-s.stopChan
log.Println("Shutting down server")
s.http.Shutdown(context.Background())
}()
return s.http.ListenAndServe()
}
// Register routes
func (s *Server) registerHandlers() {
// Use gorilla/mux for rich routing.
// See http://www.gorillatoolkit.org/pkg/mux
r := s.router.PathPrefix(fmt.Sprintf("/%s", apiVersion)).Subrouter()
r.HandleFunc("/orders", withBasicAuth(handleOrdersCreate(s.orderService))).Methods("POST")
r.NotFoundHandler = handle404()
}
|
package main
import (
"fmt"
)
func main() {
student := []string{} // There is an underlying data structure, but there is nothing to reference - it's not nil, but it's not ready.
students := [][]string{} // There is an underlying data structure, but there is nothing to reference - it's not nil, but it's not ready.
fmt.Println(student)
fmt.Println(students)
fmt.Println(student == nil)
}
|
package main
import (
"fmt"
)
func seasonGenerator(month int) string {
switch month {
case 3, 4, 5: // * no more breaks
return "Spring"
case 6, 7, 8:
return "Summer"
case 9, 10, 11:
return "Fall"
case 12, 1, 2:
return "Winter"
default:
}
return "none"
}
func main() {
fmt.Println(seasonGenerator(10))
}
|
package mapqueryparam
import (
"encoding/json"
"errors"
"fmt"
"net/url"
"reflect"
"strconv"
"strings"
"time"
)
// EncodeValues takes a input struct and encodes the content into the form of a set of query parameters.
// Input must be a pointer to a struct. Same as Encode.
func EncodeValues(v interface{}) (url.Values, error) {
return Encode(v)
}
// Encode takes a input struct and encodes the content into the form of a set of query parameters.
// Input must be a pointer to a struct. Same as EncodeValues.
func Encode(v interface{}) (map[string][]string, error) {
if v == nil {
return map[string][]string{}, nil
}
res := make(map[string][]string)
val := reflect.ValueOf(v)
for val.Kind() == reflect.Ptr {
if val.IsNil() {
return map[string][]string{}, nil
}
val = val.Elem()
}
if val.Kind() != reflect.Struct {
return nil, errors.New("unable to encode non-struct")
}
err := encodeFields(val, res)
if err != nil {
return res, err
}
return res, nil
}
// encodeFields iterates over the fields of the value passed to it, and stores the encoded fields in the results map.
func encodeFields(val reflect.Value, result map[string][]string) error {
for i := 0; i < val.NumField(); i++ {
fTyp := val.Type().Field(i)
// don't encode unexported fields
isUnexported := fTyp.PkgPath != ""
if isUnexported {
continue
}
// don't attempt to encode empty fields
fVal := val.Field(i)
if isEmptyValue(fVal) {
continue
}
// iterate over embedded fields
if fTyp.Anonymous {
for fVal.Kind() == reflect.Ptr {
if fVal.IsNil() {
continue
}
fVal = fVal.Elem()
}
err := encodeFields(fVal, result)
if err != nil {
return err
}
continue
}
d, err := encodeField(fVal)
if err != nil {
return err
}
if len(d) == 0 {
continue
}
fieldTags := getFieldTags(fTyp)
result[fieldTags[0]] = d
}
return nil
}
// getFieldTags returns the tags or names that a struct field is identified by. It prioritizes the MQP tag over the
// json tag. It defaults to the field name if neither tag is available.
func getFieldTags(t reflect.StructField) (res []string) {
if tags := t.Tag.Get(mapQueryParameterTagName); len(tags) > 0 {
for _, s := range strings.Split(tags, ",") {
if len(s) > 0 {
res = append(res, s)
}
}
}
// ignore json tags and field name if mqp tag is present
if len(res) > 0 {
return
}
if tags := t.Tag.Get("json"); len(tags) > 0 {
jsonTags := strings.Split(tags, ",")
if len(jsonTags) > 0 && len(jsonTags[0]) > 0 {
res = append(res, jsonTags[0])
}
}
// ignore field name if json tag is present
if len(res) > 0 {
return
}
res = append(res, t.Name)
return
}
// encodeField encodes a field of the input struct as a set of parameter strings. Arrays and slices are represented as
// multiple strings. Other values are encoded as a single string
func encodeField(v reflect.Value) ([]string, error) {
switch v.Kind() {
case reflect.Array, reflect.Slice:
res := make([]string, v.Len())
for i := 0; i < v.Len(); i++ {
s, err := encodeValue(v.Index(i))
if err != nil {
return nil, err
}
res[i] = s
}
return res, nil
case reflect.Interface, reflect.Ptr:
return encodeField(v.Elem())
default:
s, err := encodeValue(v)
if err != nil {
return nil, err
}
return []string{s}, nil
}
}
// encodeValue encodes a single value as a string. Base types are formatted using `strconv`. Maps and structs are
// encoded as json objects using standard json marshaling. Channels and functions are skipped, as they're not supported.
func encodeValue(v reflect.Value) (string, error) {
switch v.Kind() {
case reflect.String:
return v.String(), nil
case reflect.Bool:
return strconv.FormatBool(v.Bool()), nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.FormatInt(v.Int(), 10), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return strconv.FormatUint(v.Uint(), 10), nil
case reflect.Float32:
return strconv.FormatFloat(v.Float(), 'f', -1, 32), nil
case reflect.Float64:
return strconv.FormatFloat(v.Float(), 'f', -1, 64), nil
case reflect.Complex64:
return strconv.FormatComplex(v.Complex(), 'f', -1, 64), nil
case reflect.Complex128:
return strconv.FormatComplex(v.Complex(), 'f', -1, 128), nil
case reflect.Map, reflect.Struct:
i := v.Interface()
switch t := i.(type) {
case time.Time:
return t.Format(time.RFC3339Nano), nil
default:
b, err := json.Marshal(i)
return string(b), err
}
case reflect.Interface, reflect.Ptr:
return encodeValue(v.Elem())
case reflect.Chan, reflect.Func:
return "", nil
default:
return "", fmt.Errorf("unsupported field kind: %s", v.Kind().String())
}
}
// isEmptyValue validated whether a value is empty/zero/nil. Used to determine if a field should be omitted from the
// encoded result.
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Complex64, reflect.Complex128:
return v.Complex() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
case reflect.Chan, reflect.Func:
return true
case reflect.Struct:
i := v.Interface()
switch t := i.(type) {
case time.Time:
return t.IsZero()
}
}
return false
}
|
package rtsp
import (
"fmt"
"log"
"net"
"os"
"sync"
)
type Server struct {
SessionLogger
TCPListener *net.TCPListener
TCPPort int
Stoped bool
pushers map[string]*Pusher // Path <-> Pusher
pushersLock sync.RWMutex
}
var Instance *Server = &Server{
SessionLogger: SessionLogger{log.New(os.Stdout, "[RTSPServer]", log.LstdFlags|log.Lshortfile)},
Stoped: true,
TCPPort: 554,
pushers: make(map[string]*Pusher),
}
func GetServer() *Server {
return Instance
}
func (server *Server) Start() (err error) {
var (
logger = server.logger
addr *net.TCPAddr
listener *net.TCPListener
)
if addr, err = net.ResolveTCPAddr("tcp", fmt.Sprintf(":%d", server.TCPPort)); err != nil {
return
}
if listener, err = net.ListenTCP("tcp", addr); err != nil {
return
}
server.Stoped = false
server.TCPListener = listener
logger.Println("rtsp server start on", server.TCPPort)
networkBuffer := 1048576
for !server.Stoped {
var (
conn net.Conn
)
if conn, err = server.TCPListener.Accept(); err != nil {
logger.Println(err)
continue
}
if tcpConn, ok := conn.(*net.TCPConn); ok {
if err = tcpConn.SetReadBuffer(networkBuffer); err != nil {
logger.Printf("rtsp server conn set read buffer error, %v", err)
}
if err = tcpConn.SetWriteBuffer(networkBuffer); err != nil {
logger.Printf("rtsp server conn set write buffer error, %v", err)
}
}
session := NewSession(server, conn)
go session.Start()
}
return
}
func (server *Server) Stop() {
logger := server.logger
logger.Println("rtsp server stop on", server.TCPPort)
server.Stoped = true
if server.TCPListener != nil {
server.TCPListener.Close()
server.TCPListener = nil
}
server.pushersLock.Lock()
server.pushers = make(map[string]*Pusher)
server.pushersLock.Unlock()
}
func (server *Server) AddPusher(pusher *Pusher) bool {
server.pushersLock.Lock()
_, ok := server.pushers[pusher.Path()]
if !ok {
server.pushers[pusher.Path()] = pusher
go pusher.Start()
}
server.pushersLock.Unlock()
return true
}
func (server *Server) TryAttachToPusher(session *Session) (int, *Pusher) {
server.pushersLock.Lock()
attached := 0
var pusher *Pusher = nil
if _pusher, ok := server.pushers[session.Path]; ok {
if _pusher.RebindSession(session) {
session.logger.Printf("Attached to a pusher")
attached = 1
pusher = _pusher
} else {
attached = -1
}
}
server.pushersLock.Unlock()
return attached, pusher
}
func (server *Server) RemovePusher(pusher *Pusher) {
logger := server.logger
server.pushersLock.Lock()
if _pusher, ok := server.pushers[pusher.Path()]; ok && pusher.ID() == _pusher.ID() {
delete(server.pushers, pusher.Path())
logger.Printf("%v end, now pusher size[%d]\n", pusher, len(server.pushers))
}
server.pushersLock.Unlock()
}
func (server *Server) GetPusher(path string) (pusher *Pusher) {
server.pushersLock.RLock()
pusher = server.pushers[path]
server.pushersLock.RUnlock()
return
}
func (server *Server) GetPushers() (pushers map[string]*Pusher) {
pushers = make(map[string]*Pusher)
server.pushersLock.RLock()
for k, v := range server.pushers {
pushers[k] = v
}
server.pushersLock.RUnlock()
return
}
func (server *Server) GetPusherSize() (size int) {
server.pushersLock.RLock()
size = len(server.pushers)
server.pushersLock.RUnlock()
return
}
|
package main
import (
"bufio"
"container/heap"
"fmt"
"log"
"os"
"sort"
)
type intHeap []int
func (h intHeap) Len() int { return len(h) }
func (h intHeap) Less(i, j int) bool { return h[i] < h[j] }
func (h intHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h *intHeap) Push(x interface{}) {
*h = append(*h, x.(int))
}
func (h *intHeap) Pop() interface{} {
old := *h
n := len(old)
x := old[n-1]
*h = old[0 : n-1]
return x
}
func (h intHeap) notyet(x int) bool {
for _, i := range h {
if i == x {
return false
}
}
return true
}
func notagain(t []int, x int) bool {
for _, i := range t {
if i == x {
return false
}
}
return true
}
func findMin(n, k, a, b, c, r int) int {
m := []int{a}
for i := 1; i < k; i++ {
m = append(m, (b*m[i-1]+c)%r)
}
o := make([]int, k)
copy(o, m)
sort.Ints(o)
h := &intHeap{}
heap.Init(h)
var x, y int
for i := 0; i <= k; {
if y >= k || x < o[y] {
heap.Push(h, x)
x++
i++
} else {
if x == o[y] {
x++
}
y++
}
}
for len(m)+1 < n {
p := heap.Pop(h).(int)
if h.notyet(m[len(m)-k]) && notagain(m[len(m)-k+1:len(m)], m[len(m)-k]) {
heap.Push(h, m[len(m)-k])
}
m = append(m, p)
}
return heap.Pop(h).(int)
}
func main() {
var n, k, a, b, c, r int
data, err := os.Open(os.Args[1])
if err != nil {
log.Fatal(err)
}
defer data.Close()
scanner := bufio.NewScanner(data)
for scanner.Scan() {
fmt.Sscanf(scanner.Text(), "%d,%d,%d,%d,%d,%d", &n, &k, &a, &b, &c, &r)
fmt.Println(findMin(n, k, a, b, c, r))
}
}
|
//Package ingest provides the Ingest interface
package ingest
import (
"github.com/littlebunch/gnutdata-bfpd-api/ds"
)
// Counts holds counts for documents loaded during an ingest
// process
type Counts struct {
Foods int `json:"foods"`
Servings int `json:"servings"`
Nutrients int `json:"nutrients"`
Other int `json:"other"`
}
// Ingest wraps the basic methods used for loading different
// Food Data Central documents, i.e. Branded Foods, Standard release legacy,
// , Nutrients, etc..
type Ingest interface {
ProcessFiles(path string, dc ds.DataSource) error
}
|
package main
import (
"fmt"
)
func sequenceEquation(px []int) (py []int) {
d := map[int]int{}
f := map[int]int{}
for i, v := range px {
d[i+1] = v
}
for k, v := range d {
f[v] = k
}
for i := 0; i < len(px); i++ {
py = append(py, f[f[i+1]])
}
return py
}
func main() {
x := []int{2, 3, 1}
fmt.Println(sequenceEquation(x))
}
|
package main
import "fmt"
type Contact struct {
greeting string
name string
}
// FUNCTIONS ARE TYPES
// functions in go are types
// functions behave as types in go
// you can pass functions around just as you'd pass types around
// pass functions just like any other argument / parameter
// STEP 1:
// create a function that takes as a parameter a function
func Greet(person Contact, myWassa func(string)) {
myGreetingMas, myNameMas := CreateMessage(person.name, person.greeting, "howdy")
// had this before:
// fmt.Print(myGreetingMas)
// fmt.Print(myNameMas)
myWassa(myGreetingMas)
myWassa(myNameMas)
}
func CreateMessage(name string, greeting ...string) (myGreeting string, myName string) {
myGreeting = greeting[1] + " " + name
myName = "\nHey, " + name + "\n"
return
}
// STEP 2:
// create some functions you might want to pass in
func myPrint(s string) {
fmt.Print(s)
}
// STEP 2:
// create some functions you might want to pass in
func myPrintln(s string) {
fmt.Println(s)
}
// STEP 3:
// pass in functions (passing a function as an argument to another function)
func main() {
var t = Contact{"Good to see you", "Tim"}
Greet(t, myPrint)
u := Contact{"Glad you're in class,", "Jenny"}
Greet(u, myPrint)
v := Contact{}
v.greeting = "We're learning great things,"
v.name = "Julian"
Greet(v, myPrintln)
}
|
package ellipsis
import (
"github.com/ajduncan/vulcan/internal/vulcan"
"github.com/ajduncan/vulcan/pkg/service"
)
func RunEllipsisService() {
address := vulcan.Getenv("BEACON_HOST", "127.0.0.1") + ":" + vulcan.Getenv("BEACON_PORT", "8003")
vs := service.NewVulcanService("ellipsis", address)
vs.RunVulcanServer()
}
|
package tasks
import (
"github.com/jsm/gode/worker/tasknames"
)
// TaskMap correlates task names to their respective functions
var TaskMap = map[string]interface{}{
tasknames.Test: test,
}
// Test worker availability
func test() (string, error) {
return "tested", nil
}
|
package v2
import (
"encoding/hex"
"errors"
"log"
"net/http"
"net/url"
"github.com/labstack/echo/v4"
"github.com/traPtitech/trap-collection-server/src/domain/values"
"github.com/traPtitech/trap-collection-server/src/handler/v2/openapi"
"github.com/traPtitech/trap-collection-server/src/service"
)
type GameFile struct {
gameFileService service.GameFileV2
}
func NewGameFile(gameFileService service.GameFileV2) *GameFile {
return &GameFile{
gameFileService: gameFileService,
}
}
// ゲームファイル一覧の取得
// (GET /games/{gameID}/files)
func (gameFile GameFile) GetGameFiles(c echo.Context, gameID openapi.GameIDInPath) error {
files, err := gameFile.gameFileService.GetGameFiles(c.Request().Context(), values.NewGameIDFromUUID(gameID))
if errors.Is(err, service.ErrInvalidGameID) {
return echo.NewHTTPError(http.StatusNotFound, "invalid gameID")
}
if err != nil {
log.Printf("error: failed to get game files: %v\n", err)
return echo.NewHTTPError(http.StatusInternalServerError, "failed to get game files")
}
resFiles := make([]openapi.GameFile, 0, len(files))
for _, file := range files {
var fileType openapi.GameFileType
switch file.GetFileType() {
case values.GameFileTypeJar:
fileType = openapi.Jar
case values.GameFileTypeWindows:
fileType = openapi.Win32
case values.GameFileTypeMac:
fileType = openapi.Darwin
default:
log.Printf("error: unknown game file type: %v\n", file.GetFileType())
return echo.NewHTTPError(http.StatusInternalServerError, "unknown game file type")
}
resFiles = append(resFiles, openapi.GameFile{
Id: openapi.GameFileID(file.GetID()),
Type: fileType,
EntryPoint: string(file.GetEntryPoint()),
Md5: hex.EncodeToString(file.GetHash()),
CreatedAt: file.GetCreatedAt(),
})
}
return c.JSON(http.StatusOK, resFiles)
}
// ゲームファイルの作成
// (POST /games/{gameID}/files)
func (gameFile GameFile) PostGameFile(c echo.Context, gameID openapi.GameIDInPath) error {
headerFile, err := c.FormFile("content")
if err != nil {
return echo.NewHTTPError(http.StatusBadRequest, "invalid file")
}
headerEntryPoint := c.FormValue("entryPoint")
headerFileType := c.FormValue("type")
if headerEntryPoint == "" {
return echo.NewHTTPError(http.StatusBadRequest, "entry point is empty")
}
if headerFileType == "" {
return echo.NewHTTPError(http.StatusBadRequest, "file type is empty")
}
file, err := headerFile.Open()
if err != nil {
log.Printf("error: failed to open file: %v\n", err)
return echo.NewHTTPError(http.StatusInternalServerError, "failed to open file")
}
defer file.Close()
entryPoint := values.NewGameFileEntryPoint(headerEntryPoint)
var fileType values.GameFileType
switch openapi.GameFileType(headerFileType) {
case openapi.Jar:
fileType = values.GameFileTypeJar
case openapi.Win32:
fileType = values.GameFileTypeWindows
case openapi.Darwin:
fileType = values.GameFileTypeMac
default:
return echo.NewHTTPError(http.StatusBadRequest, "file type is unknown")
}
savedFile, err := gameFile.gameFileService.SaveGameFile(c.Request().Context(), file, values.NewGameIDFromUUID(gameID), fileType, entryPoint)
if errors.Is(err, service.ErrInvalidGameID) {
return echo.NewHTTPError(http.StatusNotFound, "invalid gameID")
}
if err != nil {
log.Printf("error: failed to save game file: %v\n", err)
return echo.NewHTTPError(http.StatusInternalServerError, "failed to save game file")
}
return c.JSON(http.StatusCreated, openapi.GameFile{
Id: openapi.GameFileID(savedFile.GetID()),
Type: openapi.GameFileType(headerFileType),
EntryPoint: openapi.GameFileEntryPoint(savedFile.GetEntryPoint()),
Md5: openapi.GameFileMd5(hex.EncodeToString(savedFile.GetHash())),
CreatedAt: savedFile.GetCreatedAt(),
})
}
// ゲームファイルのバイナリの取得
// (GET /games/{gameID}/files/{gameFileID})
func (gameFile GameFile) GetGameFile(c echo.Context, gameID openapi.GameIDInPath, gameFileID openapi.GameFileIDInPath) error {
tmpURL, err := gameFile.gameFileService.GetGameFile(c.Request().Context(), values.NewGameIDFromUUID(gameID), values.NewGameFileIDFromUUID(gameFileID))
if errors.Is(err, service.ErrInvalidGameID) {
return echo.NewHTTPError(http.StatusNotFound, "invalid gameID")
}
if errors.Is(err, service.ErrInvalidGameFileID) {
return echo.NewHTTPError(http.StatusNotFound, "invalid gameFileID")
}
if err != nil {
log.Printf("error: failed to get game file: %v\n", err)
return echo.NewHTTPError(http.StatusInternalServerError, "failed to get game file")
}
return c.Redirect(http.StatusSeeOther, (*url.URL)(tmpURL).String())
}
// ゲームファイルのメタ情報の取得
// (GET /games/{gameID}/files/{gameFileID}/meta)
func (gameFile GameFile) GetGameFileMeta(ctx echo.Context, gameID openapi.GameIDInPath, gameFileID openapi.GameFileIDInPath) error {
file, err := gameFile.gameFileService.GetGameFileMeta(ctx.Request().Context(), values.NewGameIDFromUUID(gameID), values.NewGameFileIDFromUUID(gameFileID))
if errors.Is(err, service.ErrInvalidGameID) {
return echo.NewHTTPError(http.StatusNotFound, "invalid gameID")
}
if errors.Is(err, service.ErrInvalidGameFileID) {
return echo.NewHTTPError(http.StatusNotFound, "invalid gameFileID")
}
if err != nil {
log.Printf("error: failed to get game file meta: %v\n", err)
return echo.NewHTTPError(http.StatusInternalServerError, "failed to get game file meta")
}
var fileType openapi.GameFileType
switch file.GetFileType() {
case values.GameFileTypeJar:
fileType = openapi.Jar
case values.GameFileTypeWindows:
fileType = openapi.Win32
case values.GameFileTypeMac:
fileType = openapi.Darwin
default:
log.Printf("error: unknown game file type: %v\n", file.GetFileType())
return echo.NewHTTPError(http.StatusInternalServerError, "unknown game file type")
}
return ctx.JSON(http.StatusOK, openapi.GameFile{
Id: openapi.GameFileID(file.GetID()),
Type: fileType,
EntryPoint: openapi.GameFileEntryPoint(file.GetEntryPoint()),
Md5: openapi.GameFileMd5(hex.EncodeToString(file.GetHash())),
CreatedAt: file.GetCreatedAt(),
})
}
|
package main
import (
"fmt"
)
type Saiyan struct {
Name string
Power int
}
func main() {
saiyans := make([]Saiyan, 0, 10)
saiyans = append(saiyans, Saiyan{"Goku", 9000})
fmt.Println(extractPowers(&saiyans))
}
func extractPowers(Saiyans []*Saiyan) []int {
powers := make([]int, 0, len(Saiyans))
for _, saiyan := range Saiyans {
powers[index] = saiyan.Power
}
return powers
}
|
package scheduler
import "logserver/slaver/common"
type Scheduler struct {
logCount chan int
JobEventChan chan *common.JobEvent
JobWorkTable map[string]*common.JobWorkInfo
}
var Gscheduler *Scheduler
|
package e2e
import (
"fmt"
"os"
"os/user"
"path/filepath"
"testing"
appset "github.com/openshift/client-go/apps/clientset/versioned"
buildset "github.com/openshift/client-go/build/clientset/versioned"
imageset "github.com/openshift/client-go/image/clientset/versioned"
projectset "github.com/openshift/client-go/project/clientset/versioned"
routeset "github.com/openshift/client-go/route/clientset/versioned"
templateset "github.com/openshift/client-go/template/clientset/versioned"
kubeset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
var (
kubeConfig *rest.Config
kubeClient *kubeset.Clientset
buildClient *buildset.Clientset
appClient *appset.Clientset
projectClient *projectset.Clientset
templateClient *templateset.Clientset
imageClient *imageset.Clientset
routeClient *routeset.Clientset
)
func getConfig() (*rest.Config, error) {
// If an env variable is specified with the config locaiton, use that
if len(os.Getenv("KUBECONFIG")) > 0 {
return clientcmd.BuildConfigFromFlags("", os.Getenv("KUBECONFIG"))
}
// If no explicit location, try the in-cluster config
if c, err := rest.InClusterConfig(); err == nil {
return c, nil
}
// If no in-cluster config, try the default location in the user's home directory
if usr, err := user.Current(); err == nil {
if c, err := clientcmd.BuildConfigFromFlags(
"", filepath.Join(usr.HomeDir, ".kube", "config")); err == nil {
return c, nil
}
}
return nil, fmt.Errorf("could not locate a kubeconfig")
}
func setupClients(t *testing.T) {
var err error
if kubeConfig == nil {
kubeConfig, err = getConfig()
if err != nil {
t.Fatalf("%#v", err)
}
}
if kubeClient == nil {
kubeClient, err = kubeset.NewForConfig(kubeConfig)
if err != nil {
t.Fatalf("%#v", err)
}
}
if buildClient == nil {
buildClient, err = buildset.NewForConfig(kubeConfig)
if err != nil {
t.Fatalf("%#v", err)
}
}
if projectClient == nil {
projectClient, err = projectset.NewForConfig(kubeConfig)
if err != nil {
t.Fatalf("%#v", err)
}
}
if templateClient == nil {
templateClient, err = templateset.NewForConfig(kubeConfig)
if err != nil {
t.Fatalf("%#v", err)
}
}
if imageClient == nil {
imageClient, err = imageset.NewForConfig(kubeConfig)
if err != nil {
t.Fatalf("%#v", err)
}
}
if routeClient == nil {
routeClient, err = routeset.NewForConfig(kubeConfig)
if err != nil {
t.Fatalf("%#v", err)
}
}
if appClient == nil {
appClient, err = appset.NewForConfig(kubeConfig)
if err != nil {
t.Fatalf("%#v", err)
}
}
}
|
// Work in progress
package optimga
//TODO type Roulette struct {
// Size int
//}
// Roulette selection, select 1 parent with better chance to be selected for the best individual
// However, the underperforming individuals have a small chance to be selected too to preserve a diversity
// TODO Local normalisation of the fitness for the negative fitness
//func (t *Roulette) Select(parents *Pop, offspring *Pop) {}
|
package main
import "fmt"
func main() {
for i := 60; i < 122; i++ {
fmt.Printf("decimal : %d\tbinary : %b\thexa : %x\t utf-8 : %q \nt", i, i, i, i)
}
}
|
package marshal
import (
"bytes"
"encoding/json"
)
func PureMarshal(t interface{}) ([]byte, error) {
buf := bytes.NewBuffer([]byte{})
enc := json.NewEncoder(buf)
enc.SetEscapeHTML(false)
err := enc.Encode(t)
return buf.Bytes(), err
}
func PureMarshalIndent(t interface{}, prefix, indent string) ([]byte, error) {
b, err := PureMarshal(t)
if err != nil {
return nil, err
}
var buf bytes.Buffer
err = json.Indent(&buf, b, prefix, indent)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
|
/*
Copyright 2021 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pipelinetotaskrun
import (
"fmt"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
)
type linkedTask struct {
task *v1beta1.PipelineTask
next *linkedTask
}
func putTasksInOrder(tasks []v1beta1.PipelineTask) ([]v1beta1.PipelineTask, error) {
seen := map[string]*linkedTask{}
var root *linkedTask
for i := 0; i < len(tasks); i++ {
seen[tasks[i].Name] = &linkedTask{task: &tasks[i]}
}
for _, task := range tasks {
if len(task.RunAfter) > 1 {
return nil, fmt.Errorf("fan in not yet supported but %s has more than one runAfter", task.Name)
}
l, _ := seen[task.Name]
if len(task.RunAfter) == 0 {
if root != nil {
return nil, fmt.Errorf("parallel tasks not yet supported by %s and %s are trying to run in parallel", task.Name, root.task.Name)
} else {
root = l
}
} else {
before, ok := seen[task.RunAfter[0]]
if !ok {
return nil, fmt.Errorf("task %s trying to run after task %s which is not present", task.Name, task.RunAfter[0])
}
before.next = l
}
}
if root == nil {
return nil, fmt.Errorf("invalid sequence, there was no starting task (probably a loop?)")
}
ordered := []v1beta1.PipelineTask{*root.task}
curr := root.next
for {
if curr == nil {
if len(ordered) < len(tasks) {
return nil, fmt.Errorf("sequence was not completely connected, gap after %s", ordered[len(ordered)-1].Name)
}
break
}
ordered = append(ordered, *curr.task)
curr = curr.next
}
return ordered, nil
}
|
package redisstream
import (
"net"
"strconv"
"strings"
"log"
"sort"
"time"
"github.com/pkg/errors"
_redis "github.com/go-redis/redis"
"openreplay/backend/pkg/queue/types"
)
type idsInfo struct{
id []string
ts []int64
}
type streamPendingIDsMap map[string]*idsInfo
type Consumer struct {
redis *_redis.Client
streams []string
group string
messageHandler types.MessageHandler
idsPending streamPendingIDsMap
lastTs int64
autoCommit bool
}
func NewConsumer(group string, streams []string, messageHandler types.MessageHandler) *Consumer {
redis := getRedisClient()
for _, stream := range streams {
err := redis.XGroupCreateMkStream(stream, group, "0").Err()
if err != nil && err.Error() != "BUSYGROUP Consumer Group name already exists" {
log.Fatalln(err)
}
}
idsPending := make(streamPendingIDsMap)
streamsCount := len(streams)
for i := 0; i < streamsCount; i++ {
// ">" is for never-delivered messages.
// Otherwise - never acknoledged only
// TODO: understand why in case of "0" it eats 100% cpu
streams = append(streams, ">")
idsPending[streams[i]] = new(idsInfo)
}
return &Consumer{
redis: redis,
messageHandler: messageHandler,
streams: streams,
group: group,
autoCommit: true,
idsPending: idsPending,
}
}
const READ_COUNT = 10
func (c *Consumer) ConsumeNext() error {
// MBTODO: read in go routine, send messages to channel
res, err := c.redis.XReadGroup(&_redis.XReadGroupArgs{
Group: c.group,
Consumer: c.group,
Streams: c.streams,
Count: int64(READ_COUNT),
Block: 200 * time.Millisecond,
}).Result()
if err != nil {
if err, ok := err.(net.Error); ok && err.Timeout() {
return nil
}
if err == _redis.Nil {
return nil
}
return err
}
for _, r := range res {
for _, m := range r.Messages {
sessionIDString, ok := m.Values["sessionID"].(string)
if !ok {
return errors.Errorf("Can not cast value for messageID %v", m.ID)
}
sessionID, err := strconv.ParseUint(sessionIDString, 10, 64)
if err != nil {
return errors.Wrapf(err, "Can not parse sessionID '%v' for messageID %v", sessionID, m.ID)
}
valueString, ok := m.Values["value"].(string)
if !ok {
return errors.Errorf("Can not cast value for messageID %v", m.ID)
}
// assumming that ID has a correct format
idParts := strings.Split(m.ID, "-")
ts, _ := strconv.ParseUint(idParts[0], 10, 64)
idx, _ := strconv.ParseUint(idParts[1], 10, 64)
if idx > 0x1FFF {
return errors.New("Too many messages per ms in redis")
}
c.messageHandler(sessionID, []byte(valueString), &types.Meta{
Topic: r.Stream,
Timestamp: int64(ts),
ID: ts << 13 | (idx & 0x1FFF), // Max: 4096 messages/ms for 69 years
})
if c.autoCommit {
if err = c.redis.XAck(r.Stream, c.group, m.ID).Err(); err != nil {
return errors.Wrapf(err, "Acknoledgment error for messageID %v", m.ID)
}
} else {
c.lastTs = int64(ts)
c.idsPending[r.Stream].id = append(c.idsPending[r.Stream].id, m.ID)
c.idsPending[r.Stream].ts = append(c.idsPending[r.Stream].ts, int64(ts))
}
}
}
return nil
}
func (c *Consumer) Commit() error {
for stream, idsInfo := range c.idsPending {
if len(idsInfo.id) == 0 {
continue
}
if err := c.redis.XAck(stream, c.group, idsInfo.id...).Err(); err != nil {
return errors.Wrapf(err, "Redisstreams: Acknoledgment error on commit %v", err)
}
c.idsPending[stream].id = nil
c.idsPending[stream].ts = nil
}
return nil
}
func (c *Consumer) CommitBack(gap int64) error {
if c.lastTs == 0 {
return nil
}
maxTs := c.lastTs - gap
for stream, idsInfo := range c.idsPending {
if len(idsInfo.id) == 0 {
continue
}
maxI := sort.Search(len(idsInfo.ts), func(i int) bool {
return idsInfo.ts[i] > maxTs
})
if err := c.redis.XAck(stream, c.group, idsInfo.id[:maxI]...).Err(); err != nil {
return errors.Wrapf(err, "Redisstreams: Acknoledgment error on commit %v", err)
}
c.idsPending[stream].id = idsInfo.id[maxI:]
c.idsPending[stream].ts = idsInfo.ts[maxI:]
}
return nil
}
func (c *Consumer) DisableAutoCommit() {
//c.autoCommit = false
}
func (c *Consumer) Close() {
// noop
} |
/*
* Copyright 2019 Open Networking Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package util
import (
"fmt"
"github.com/atomix/atomix-operator/pkg/apis/agent/v1alpha1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
// newBenchmarkControllerLabels returns a new labels for benchmark nodes
func newBenchmarkControllerLabels(benchmark *v1alpha1.AtomixBenchmark) map[string]string {
return map[string]string{
AppKey: AtomixApp,
ClusterKey: benchmark.Spec.Cluster,
TypeKey: BenchCoordinatorType,
}
}
func getBenchmarkControllerResourceName(benchmark *v1alpha1.AtomixBenchmark, resource string) string {
return fmt.Sprintf("%s-%s", benchmark.Name, resource)
}
func GetBenchmarkControllerServiceName(benchmark *v1alpha1.AtomixBenchmark) string {
return getBenchmarkControllerResourceName(benchmark, ServiceSuffix)
}
func GetBenchmarkControllerInitConfigMapName(benchmark *v1alpha1.AtomixBenchmark) string {
return getBenchmarkControllerResourceName(benchmark, InitSuffix)
}
func GetBenchmarkControllerSystemConfigMapName(benchmark *v1alpha1.AtomixBenchmark) string {
return getBenchmarkControllerResourceName(benchmark, ConfigSuffix)
}
func GetBenchmarkControllerPodName(benchmark *v1alpha1.AtomixBenchmark) string {
return benchmark.Name
}
func NewBenchmarkControllerInitConfigMap(benchmark *v1alpha1.AtomixBenchmark) *corev1.ConfigMap {
return &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: GetBenchmarkControllerInitConfigMapName(benchmark),
Namespace: benchmark.Namespace,
Labels: newBenchmarkControllerLabels(benchmark),
},
Data: map[string]string{
"create_config.sh": newBenchmarkControllerInitConfigMapScript(benchmark),
},
}
}
// newBenchmarkControllerInitConfigMapScript returns a new script for generating an Atomix configuration
func newBenchmarkControllerInitConfigMapScript(benchmark *v1alpha1.AtomixBenchmark) string {
return fmt.Sprintf(`
#!/usr/bin/env bash
HOST=$(hostname -s)
function create_config() {
echo "atomix.service=%s"
echo "atomix.node.id=$HOST"
echo "atomix.node.host=%s"
echo "atomix.node.port=5679"
}
create_config`, getManagementServiceDnsName(types.NamespacedName{benchmark.Namespace, benchmark.Spec.Cluster}), GetBenchmarkControllerServiceName(benchmark))
}
func NewBenchmarkControllerSystemConfigMap(benchmark *v1alpha1.AtomixBenchmark) *corev1.ConfigMap {
return &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: GetBenchmarkControllerSystemConfigMapName(benchmark),
Namespace: benchmark.Namespace,
Labels: newBenchmarkControllerLabels(benchmark),
},
Data: map[string]string{
"atomix.conf": newBenchmarkControllerConfig(benchmark),
},
}
}
// newBenchmarkControllerConfig returns a new configuration string for a benchmark coordinator node
func newBenchmarkControllerConfig(benchmark *v1alpha1.AtomixBenchmark) string {
return fmt.Sprintf(`
cluster {
node: ${atomix.node}
discovery {
type: dns
service: ${atomix.service},
}
}`)
}
func NewBenchmarkControllerPod(benchmark *v1alpha1.AtomixBenchmark) *corev1.Pod {
return &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: GetBenchmarkControllerPodName(benchmark),
Namespace: benchmark.Namespace,
Labels: newBenchmarkControllerLabels(benchmark),
},
Spec: corev1.PodSpec{
InitContainers: newInitContainers(1),
Containers: newBenchmarkContainers(benchmark.Spec.Version, benchmark.Spec.Env, benchmark.Spec.Resources),
Volumes: []corev1.Volume{
newInitScriptsVolume(GetBenchmarkControllerInitConfigMapName(benchmark)),
newUserConfigVolume(GetBenchmarkControllerSystemConfigMapName(benchmark)),
newSystemConfigVolume(),
},
},
}
}
func NewBenchmarkControllerService(benchmark *v1alpha1.AtomixBenchmark) *corev1.Service {
return &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: GetBenchmarkControllerServiceName(benchmark),
Namespace: benchmark.Namespace,
Labels: newBenchmarkControllerLabels(benchmark),
Annotations: map[string]string{
"service.alpha.kubernetes.io/tolerate-unready-endpoints": "true",
},
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Name: benchmark.Name + "-api",
Port: 5678,
},
{
Name: benchmark.Name + "-node",
Port: 5679,
},
},
PublishNotReadyAddresses: true,
ClusterIP: "None",
Selector: newBenchmarkControllerLabels(benchmark),
},
}
}
// newBenchmarkLabels returns a new labels for benchmark nodes
func newBenchmarkWorkerLabels(benchmark *v1alpha1.AtomixBenchmark) map[string]string {
return map[string]string{
AppKey: AtomixApp,
ClusterKey: benchmark.Spec.Cluster,
TypeKey: BenchWorkerType,
}
}
func getBenchmarkWorkerResourceName(benchmark *v1alpha1.AtomixBenchmark, resource string) string {
return fmt.Sprintf("%s-%s", benchmark.Name, resource)
}
func GetBenchmarkWorkerServiceName(benchmark *v1alpha1.AtomixBenchmark) string {
return getBenchmarkWorkerResourceName(benchmark, ServiceSuffix)
}
func GetBenchmarkWorkerInitConfigMapName(benchmark *v1alpha1.AtomixBenchmark) string {
return getBenchmarkWorkerResourceName(benchmark, InitSuffix)
}
func GetBenchmarkWorkerSystemConfigMapName(benchmark *v1alpha1.AtomixBenchmark) string {
return getBenchmarkWorkerResourceName(benchmark, ConfigSuffix)
}
func GetBenchmarkWorkerStatefulSetName(benchmark *v1alpha1.AtomixBenchmark) string {
return benchmark.Name
}
func NewBenchmarkWorkerInitConfigMap(benchmark *v1alpha1.AtomixBenchmark) *corev1.ConfigMap {
return &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: GetBenchmarkWorkerInitConfigMapName(benchmark),
Namespace: benchmark.Namespace,
Labels: newBenchmarkWorkerLabels(benchmark),
},
Data: map[string]string{
"create_config.sh": newInitConfigMapScript(types.NamespacedName{benchmark.Namespace, benchmark.Name}),
},
}
}
func NewBenchmarkWorkerSystemConfigMap(benchmark *v1alpha1.AtomixBenchmark) *corev1.ConfigMap {
return &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: GetBenchmarkWorkerSystemConfigMapName(benchmark),
Namespace: benchmark.Namespace,
Labels: newBenchmarkWorkerLabels(benchmark),
},
Data: map[string]string{
"atomix.conf": newBenchmarkWorkerConfig(benchmark),
},
}
}
// newBenchmarkWorkerConfig returns a new configuration string for a benchmark coordinator node
func newBenchmarkWorkerConfig(benchmark *v1alpha1.AtomixBenchmark) string {
return fmt.Sprintf(`
cluster {
node: ${atomix.node}
discovery {
type: dns
service: ${atomix.service},
}
}`)
}
func NewBenchmarkWorkerStatefulSet(benchmark *v1alpha1.AtomixBenchmark) *appsv1.StatefulSet {
return &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: GetBenchmarkWorkerStatefulSetName(benchmark),
Namespace: benchmark.Namespace,
Labels: newBenchmarkWorkerLabels(benchmark),
},
Spec: appsv1.StatefulSetSpec{
ServiceName: GetBenchmarkWorkerServiceName(benchmark),
Replicas: &benchmark.Spec.Workers,
Selector: &metav1.LabelSelector{
MatchLabels: newBenchmarkWorkerLabels(benchmark),
},
PodManagementPolicy: appsv1.ParallelPodManagement,
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: newBenchmarkWorkerLabels(benchmark),
},
Spec: corev1.PodSpec{
InitContainers: newInitContainers(benchmark.Spec.Workers),
Containers: newBenchmarkContainers(benchmark.Spec.Version, benchmark.Spec.Env, benchmark.Spec.Resources),
Volumes: []corev1.Volume{
newInitScriptsVolume(GetBenchmarkWorkerInitConfigMapName(benchmark)),
newUserConfigVolume(GetBenchmarkWorkerSystemConfigMapName(benchmark)),
newSystemConfigVolume(),
},
},
},
},
}
}
func NewBenchmarkWorkerService(benchmark *v1alpha1.AtomixBenchmark) *corev1.Service {
return &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: GetBenchmarkWorkerServiceName(benchmark),
Namespace: benchmark.Namespace,
Labels: newBenchmarkWorkerLabels(benchmark),
Annotations: map[string]string{
"service.alpha.kubernetes.io/tolerate-unready-endpoints": "true",
},
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Name: benchmark.Name + "-node",
Port: 5679,
},
},
PublishNotReadyAddresses: true,
ClusterIP: "None",
Selector: newBenchmarkWorkerLabels(benchmark),
},
}
}
|
package stringutil
import "sync"
type StringSet = Set
type Set struct {
sync.Once
Set map[string]struct{}
}
func (s *Set) Add(vs ...string) {
if len(vs) == 0 {
return
}
s.Do(func() {
if s.Set == nil {
s.Set = make(map[string]struct{}, len(vs))
}
})
for _, v := range vs {
s.Set[v] = struct{}{}
}
}
func (s *Set) Strings() []string {
if len(s.Set) == 0 {
return nil
}
res := make([]string, 0, len(s.Set))
for v := range s.Set {
res = append(res, v)
}
return res
}
|
package ui
import "image"
// Point is a absolute position
type Point image.Point
// In returns true if `p` is inside of `rect`
func (p *Point) In(rect image.Rectangle) bool {
return image.Point(*p).In(rect)
}
|
package factory
import (
"context"
"github.com/loft-sh/devspace/pkg/devspace/analyze"
"github.com/loft-sh/devspace/pkg/devspace/build"
"github.com/loft-sh/devspace/pkg/devspace/config/loader"
"github.com/loft-sh/devspace/pkg/devspace/config/localcache"
"github.com/loft-sh/devspace/pkg/devspace/config/versions/latest"
"github.com/loft-sh/devspace/pkg/devspace/configure"
devspacecontext "github.com/loft-sh/devspace/pkg/devspace/context"
"github.com/loft-sh/devspace/pkg/devspace/dependency"
"github.com/loft-sh/devspace/pkg/devspace/deploy"
"github.com/loft-sh/devspace/pkg/devspace/docker"
"github.com/loft-sh/devspace/pkg/devspace/helm"
"github.com/loft-sh/devspace/pkg/devspace/helm/types"
"github.com/loft-sh/devspace/pkg/devspace/kubectl"
"github.com/loft-sh/devspace/pkg/devspace/plugin"
"github.com/loft-sh/devspace/pkg/util/kubeconfig"
"github.com/loft-sh/devspace/pkg/util/log"
)
// Factory is the main interface for various client creations
type Factory interface {
// NewConfigLoader creates a new config loader
NewConfigLoader(configPath string) (loader.ConfigLoader, error)
// NewConfigureManager creates a new configure manager
NewConfigureManager(config *latest.Config, generated localcache.Cache, log log.Logger) configure.Manager
// NewKubeDefaultClient creates a new kube client
NewKubeDefaultClient() (kubectl.Client, error)
NewKubeClientFromContext(context, namespace string) (kubectl.Client, error)
// NewHelmClient creates a new helm client
NewHelmClient(log log.Logger) (types.Client, error)
// NewDependencyManager creates a new dependency manager
NewDependencyManager(ctx devspacecontext.Context, configOptions *loader.ConfigOptions) dependency.Manager
// NewDockerClient creates a new docker API client
NewDockerClient(ctx context.Context, log log.Logger) (docker.Client, error)
NewDockerClientWithMinikube(ctx context.Context, client kubectl.Client, preferMinikube bool, log log.Logger) (docker.Client, error)
// NewBuildController & NewDeployController
NewBuildController() build.Controller
NewDeployController() deploy.Controller
// NewAnalyzer creates a new analyzer
NewAnalyzer(client kubectl.Client, log log.Logger) analyze.Analyzer
// NewKubeConfigLoader creates a new kube config loader
NewKubeConfigLoader() kubeconfig.Loader
// NewPluginManager creates a new plugin manager
NewPluginManager(log log.Logger) plugin.Interface
// GetLog retrieves the log instance
GetLog() log.Logger
}
// DefaultFactoryImpl is the default factory implementation
type DefaultFactoryImpl struct{}
// DefaultFactory returns the default factory implementation
func DefaultFactory() Factory {
return &DefaultFactoryImpl{}
}
// NewPluginManager creates a new plugin manager
func (f *DefaultFactoryImpl) NewPluginManager(log log.Logger) plugin.Interface {
return plugin.NewClient(log)
}
// NewAnalyzer creates a new analyzer
func (f *DefaultFactoryImpl) NewAnalyzer(client kubectl.Client, log log.Logger) analyze.Analyzer {
return analyze.NewAnalyzer(client, log)
}
// NewBuildController implements interface
func (f *DefaultFactoryImpl) NewBuildController() build.Controller {
return build.NewController()
}
// NewDeployController implements interface
func (f *DefaultFactoryImpl) NewDeployController() deploy.Controller {
return deploy.NewController()
}
// NewKubeConfigLoader implements interface
func (f *DefaultFactoryImpl) NewKubeConfigLoader() kubeconfig.Loader {
return kubeconfig.NewLoader()
}
// GetLog implements interface
func (f *DefaultFactoryImpl) GetLog() log.Logger {
return log.GetInstance()
}
// NewDependencyManager implements interface
func (f *DefaultFactoryImpl) NewDependencyManager(ctx devspacecontext.Context, configOptions *loader.ConfigOptions) dependency.Manager {
return dependency.NewManager(ctx, configOptions)
}
// NewConfigLoader implements interface
func (f *DefaultFactoryImpl) NewConfigLoader(configPath string) (loader.ConfigLoader, error) {
return loader.NewConfigLoader(configPath)
}
// NewConfigureManager implements interface
func (f *DefaultFactoryImpl) NewConfigureManager(config *latest.Config, generated localcache.Cache, log log.Logger) configure.Manager {
return configure.NewManager(f, config, generated, log)
}
// NewDockerClient implements interface
func (f *DefaultFactoryImpl) NewDockerClient(ctx context.Context, log log.Logger) (docker.Client, error) {
return docker.NewClient(ctx, log)
}
// NewDockerClientWithMinikube implements interface
func (f *DefaultFactoryImpl) NewDockerClientWithMinikube(ctx context.Context, kubectlClient kubectl.Client, preferMinikube bool, log log.Logger) (docker.Client, error) {
return docker.NewClientWithMinikube(ctx, kubectlClient, preferMinikube, log)
}
// NewKubeDefaultClient implements interface
func (f *DefaultFactoryImpl) NewKubeDefaultClient() (kubectl.Client, error) {
return kubectl.NewDefaultClient()
}
// NewKubeClientFromContext implements interface
func (f *DefaultFactoryImpl) NewKubeClientFromContext(context, namespace string) (kubectl.Client, error) {
kubeLoader := f.NewKubeConfigLoader()
client, err := kubectl.NewClientFromContext(context, namespace, false, kubeLoader)
if err != nil {
return nil, err
}
plugin.SetPluginKubeContext(client.CurrentContext(), client.Namespace())
return client, nil
}
// NewHelmClient implements interface
func (f *DefaultFactoryImpl) NewHelmClient(log log.Logger) (types.Client, error) {
return helm.NewClient(log)
}
|
package garbagecollection
import (
"context"
"fmt"
"time"
"github.com/go-logr/logr"
"github.com/hashicorp/go-multierror"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/gardener/test-infra/pkg/util/s3"
argov1 "github.com/argoproj/argo/v2/pkg/apis/workflow/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/gardener/test-infra/pkg/testmachinery"
)
// GCWorkflowArtifacts collects all outputs of a workflow by traversing through nodes and collect outputs artifacts from the s3 storage.
// These artifacts are then deleted form the s3 storage.
func GCWorkflowArtifacts(log logr.Logger, s3Client s3.Client, wf *argov1.Workflow) (reconcile.Result, error) {
if s3Client == nil {
log.V(3).Info("skip garbage collection of artifacts")
return reconcile.Result{}, nil
}
for _, node := range wf.Status.Nodes {
if node.Outputs == nil {
continue
}
for _, artifact := range node.Outputs.Artifacts {
log.V(5).Info(fmt.Sprintf("Processing artifact %s", artifact.Name))
if artifact.S3 != nil {
err := s3Client.RemoveObject("", artifact.S3.Key)
if err != nil {
log.Error(err, "unable to delete object from object storage", "artifact", artifact.S3.Key)
// do not retry deletion if the key does not not exist in s3 anymore
// maybe use const from aws lib -> need to change to aws lib
if err.Error() != "The specified key does not exist." {
return reconcile.Result{Requeue: true, RequeueAfter: 30 * time.Second}, err
}
}
log.V(5).Info("object deleted", "artifact", artifact.S3.Key)
}
}
}
return reconcile.Result{}, nil
}
// CleanWorkflowPods deletes all pods of a completed workflow.
// cleanup pods to remove workload from the api server and etcd.
// logs are still accessible through "archiveLogs" option in argo
func CleanWorkflowPods(c client.Client, wf *argov1.Workflow) error {
var result *multierror.Error
if testmachinery.CleanWorkflowPods() {
for nodeName, node := range wf.Status.Nodes {
if node.Type == argov1.NodeTypePod {
if err := deletePod(c, testmachinery.GetNamespace(), nodeName); err != nil {
result = multierror.Append(result, fmt.Errorf("unable delete pod %s: %s", nodeName, err.Error()))
}
}
}
}
return result.ErrorOrNil()
}
func deletePod(c client.Client, namespace, name string) error {
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
}
return c.Delete(context.TODO(), pod)
}
|
package util
import (
"github.com/360EntSecGroup-Skylar/excelize"
"log"
"strconv"
)
// ReadExcel读取一个excel文件,返回一个map,key为姓名,value为学号
// 注:部分人有不写学号的习惯,所以用学号做key会影响后续处理
func ReadExcel(name string) map[string]string {
f, err := excelize.OpenFile(name)
if err != nil {
log.Fatalf("打开excel文件失败:%s", err.Error())
}
userList := make(map[string]string)
// 获取工作表中指定单元格的值
var i = 1
for {
s := strconv.Itoa(i)
number := f.GetCellValue("Sheet1", "A"+s)
name := f.GetCellValue("Sheet1", "B"+s)
// 如果name为空,这里学号是可选的,可以只写姓名不写学号
if name == "" {
break
}
userList[name] = number
i++
}
return userList
}
|
package graphql_endpoint
type Config struct {
Elastic elasticSearch `toml:"elastic"`
Graphql graphqlServer `toml:"graphql"`
}
type elasticSearch struct {
Url string
}
type graphqlServer struct {
Port int
}
|
/*
Copyright 2019 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package integration
import (
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/google/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"github.com/GoogleContainerTools/skaffold/v2/integration/skaffold"
"github.com/GoogleContainerTools/skaffold/v2/testutil"
)
const (
emptydir = "testdata/empty-dir"
)
// Note: `custom-buildx` is not included as it depends on having a
// `skaffold-builder` builder configured and a registry to push to.
// TODO: remove nolint once we've reenabled integration tests
//
//nolint:golint,unused
var tests = []struct {
description string
dir string
args []string
deployments []string
pods []string
env []string
targetLog string
}{
{
description: "copying-empty-directory",
dir: emptydir,
pods: []string{"empty-dir"},
targetLog: "Hello world!",
},
{
description: "getting-started",
dir: "examples/getting-started",
pods: []string{"getting-started"},
targetLog: "Hello world!",
},
{
description: "nodejs",
dir: "examples/nodejs",
deployments: []string{"node"},
},
{
description: "structure-tests",
dir: "examples/structure-tests",
pods: []string{"getting-started"},
},
{
description: "custom-tests",
dir: "examples/custom-tests",
pods: []string{"custom-test"},
},
{
description: "microservices",
dir: "examples/microservices",
// See https://github.com/GoogleContainerTools/skaffold/issues/2372
args: []string{"--status-check=false"},
deployments: []string{"leeroy-app", "leeroy-web"},
},
{
description: "multi-config-microservices",
dir: "examples/multi-config-microservices",
deployments: []string{"leeroy-app", "leeroy-web"},
},
{
description: "remote-multi-config-microservices",
dir: "examples/remote-multi-config-microservices",
deployments: []string{"leeroy-app", "leeroy-web"},
},
{
description: "envTagger",
dir: "examples/tagging-with-environment-variables",
pods: []string{"getting-started"},
env: []string{"FOO=foo"},
},
{
description: "bazel",
dir: "examples/bazel",
pods: []string{"bazel"},
},
{
description: "jib",
dir: "testdata/jib",
deployments: []string{"web"},
},
{
description: "jib gradle",
dir: "examples/jib-gradle",
deployments: []string{"web"},
},
{
description: "profiles",
dir: "examples/profiles",
args: []string{"-p", "minikube-profile"},
pods: []string{"hello-service"},
},
{
description: "multiple deployers",
dir: "testdata/deploy-multiple",
pods: []string{"deploy-kubectl", "deploy-kustomize"},
},
{
description: "custom builder",
dir: "examples/custom",
pods: []string{"getting-started-custom"},
},
// TODO(#8811): Enable this test when issue is solve.
// {
// description: "buildpacks Go",
// dir: "examples/buildpacks",
// deployments: []string{"web"},
// },
// TODO(#8811): Enable this test when issue is solve.
// {
// description: "buildpacks NodeJS",
// dir: "examples/buildpacks-node",
// deployments: []string{"web"},
// },
// TODO(#8811): Enable this test when issue is solve.
// {
// description: "buildpacks Python",
// dir: "examples/buildpacks-python",
// deployments: []string{"web"},
// },
// TODO(#8811): Enable this test when issue is solve.
// {
// description: "buildpacks Java",
// dir: "examples/buildpacks-java",
// deployments: []string{"web"},
// },
{
description: "kustomize",
dir: "examples/getting-started-kustomize",
deployments: []string{"skaffold-kustomize-dev"},
targetLog: "Hello world!",
},
{
description: "helm",
dir: "examples/helm-deployment",
deployments: []string{"skaffold-helm"},
targetLog: "Hello world!",
},
{
description: "multiple renderers mixed in",
dir: "examples/multiple-renderers",
deployments: []string{"frontend", "backend", "go-guestbook-mongodb"},
},
{
description: "multiple renderers mixed in",
dir: "examples/multiple-renderers",
args: []string{"-p", "mix-deploy"},
deployments: []string{"frontend", "backend", "go-guestbook-mongodb"},
},
}
func TestRun(t *testing.T) {
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
MarkIntegrationTest(t, CanRunWithoutGcp)
ns, client := SetupNamespace(t)
args := append(test.args, "--cache-artifacts=false")
if test.dir == emptydir {
err := os.MkdirAll(filepath.Join(test.dir, "emptydir"), 0755)
t.Log("Creating empty directory")
if err != nil {
t.Errorf("Error creating empty dir: %s", err)
}
}
skaffold.Run(args...).InDir(test.dir).InNs(ns.Name).WithEnv(test.env).RunOrFail(t)
client.WaitForPodsReady(test.pods...)
client.WaitForDeploymentsToStabilize(test.deployments...)
skaffold.Delete().InDir(test.dir).InNs(ns.Name).WithEnv(test.env).RunOrFail(t)
})
}
}
func TestRunTail(t *testing.T) {
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
MarkIntegrationTest(t, CanRunWithoutGcp)
if test.targetLog == "" {
t.SkipNow()
}
if test.dir == emptydir {
err := os.MkdirAll(filepath.Join(test.dir, "emptydir"), 0755)
t.Log("Creating empty directory")
if err != nil {
t.Errorf("Error creating empty dir: %s", err)
}
}
ns, _ := SetupNamespace(t)
args := append(test.args, "--tail")
out := skaffold.Run(args...).InDir(test.dir).InNs(ns.Name).WithEnv(test.env).RunLive(t)
WaitForLogs(t, out, test.targetLog)
skaffold.Delete().InDir(test.dir).InNs(ns.Name).WithEnv(test.env).RunOrFail(t)
})
}
}
func TestRunTailDefaultNamespace(t *testing.T) {
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
MarkIntegrationTest(t, CanRunWithoutGcp)
if test.targetLog == "" {
t.SkipNow()
}
if test.dir == emptydir {
err := os.MkdirAll(filepath.Join(test.dir, "emptydir"), 0755)
t.Log("Creating empty directory")
if err != nil {
t.Errorf("Error creating empty dir: %s", err)
}
}
args := append(test.args, "--tail")
out := skaffold.Run(args...).InDir(test.dir).WithEnv(test.env).RunLive(t)
defer skaffold.Delete().InDir(test.dir).WithEnv(test.env).RunOrFail(t)
WaitForLogs(t, out, test.targetLog)
})
}
}
func TestRunTailTolerateFailuresUntilDeadline(t *testing.T) {
var tsts = []struct {
description string
dir string
args []string
deployments []string
env []string
targetLogOne string
targetLogTwo string
}{
{
description: "status-check-tolerance",
dir: "testdata/status-check-tolerance",
args: []string{"--tolerate-failures-until-deadline"},
deployments: []string{"tolerance-check"},
targetLogOne: "container will exit with error",
targetLogTwo: "Hello world!",
env: []string{fmt.Sprintf("STOP_FAILING_TIME=%d", time.Now().Unix()+10)},
},
}
for _, test := range tsts {
t.Run(test.description, func(t *testing.T) {
MarkIntegrationTest(t, CanRunWithoutGcp)
if test.targetLogOne == "" || test.targetLogTwo == "" {
t.SkipNow()
}
ns, _ := SetupNamespace(t)
args := append(test.args, "--tail")
out := skaffold.Run(args...).InDir(test.dir).InNs(ns.Name).WithEnv(test.env).RunLive(t)
defer skaffold.Delete().InDir(test.dir).InNs(ns.Name).WithEnv(test.env).Run(t)
WaitForLogs(t, out, test.targetLogOne)
WaitForLogs(t, out, test.targetLogTwo)
})
}
}
func TestRunRenderOnly(t *testing.T) {
MarkIntegrationTest(t, CanRunWithoutGcp)
testutil.Run(t, "write rendered manifest to provided filepath", func(tu *testutil.T) {
tmpDir := tu.NewTempDir()
renderPath := tmpDir.Path("output.yaml")
test := struct {
description string
renderPath string
args []string
dir string
pods []string
}{
args: []string{"--digest-source=local", "--render-only", "--render-output", renderPath},
dir: "examples/getting-started",
pods: []string{"getting-started"},
}
skaffold.Run(test.args...).InDir(test.dir).RunOrFail(t)
dat, err := os.ReadFile(renderPath)
tu.CheckNoError(err)
tu.CheckMatches("name: getting-started", string(dat))
})
}
func TestRunGCPOnly(t *testing.T) {
tests := []struct {
description string
dir string
args []string
deployments []string
pods []string
skipCrossPlatform bool
}{
{
description: "Google Cloud Build",
dir: "examples/google-cloud-build",
pods: []string{"getting-started"},
},
{
description: "Google Cloud Build with sub folder",
dir: "testdata/gcb-sub-folder",
pods: []string{"getting-started"},
},
{
description: "Google Cloud Build with location",
dir: "testdata/gcb-with-location",
pods: []string{"getting-started"},
},
{
description: "Google Cloud Build with source artifact dependencies",
dir: "testdata/multi-config-pods",
args: []string{"-p", "gcb"},
pods: []string{"module1", "module2"},
},
{
description: "Google Cloud Build with Kaniko",
dir: "examples/gcb-kaniko",
pods: []string{"getting-started-kaniko"},
// building machines on gcb are linux/amd64, kaniko doesn't support cross-platform builds.
skipCrossPlatform: true,
},
{
description: "kaniko",
dir: "examples/kaniko",
pods: []string{"getting-started-kaniko"},
},
{
description: "kaniko with target",
dir: "testdata/kaniko-target",
pods: []string{"getting-started-kaniko"},
},
{
description: "kaniko with sub folder",
dir: "testdata/kaniko-sub-folder",
pods: []string{"getting-started-kaniko"},
},
{
description: "kaniko microservices",
dir: "testdata/kaniko-microservices",
deployments: []string{"leeroy-app", "leeroy-web"},
},
{
description: "jib in googlecloudbuild",
dir: "testdata/jib",
args: []string{"-p", "gcb"},
deployments: []string{"web"},
},
{
description: "jib gradle in googlecloudbuild",
dir: "examples/jib-gradle",
args: []string{"-p", "gcb"},
deployments: []string{"web"},
},
{
description: "buildpacks on Cloud Build",
dir: "examples/buildpacks",
args: []string{"-p", "gcb"},
deployments: []string{"web"},
// buildpacks doesn't support arm64 builds.
skipCrossPlatform: true,
},
}
for _, test := range tests {
if (os.Getenv("GKE_CLUSTER_NAME") == "integration-tests-arm" || os.Getenv("GKE_CLUSTER_NAME") == "integration-tests-hybrid") && test.skipCrossPlatform {
continue
}
t.Run(test.description, func(t *testing.T) {
MarkIntegrationTest(t, NeedsGcp)
ns, client := SetupNamespace(t)
test.args = append(test.args, "--tag", uuid.New().String())
skaffold.Run(test.args...).InDir(test.dir).InNs(ns.Name).RunOrFail(t)
client.WaitForPodsReady(test.pods...)
client.WaitForDeploymentsToStabilize(test.deployments...)
skaffold.Delete().InDir(test.dir).InNs(ns.Name).RunOrFail(t)
})
}
}
func TestRunIdempotent(t *testing.T) {
MarkIntegrationTest(t, CanRunWithoutGcp)
ns, _ := SetupNamespace(t)
// The first `skaffold run` creates resources (deployment.apps/leeroy-web, service/leeroy-app, deployment.apps/leeroy-app)
out := skaffold.Run("-l", "skaffold.dev/run-id=notunique").InDir("examples/microservices").InNs(ns.Name).RunOrFailOutput(t)
firstOut := string(out)
if strings.Count(firstOut, "created") == 0 {
t.Errorf("resources should have been created: %s", firstOut)
}
// Because we use the same custom `run-id`, the second `skaffold run` is idempotent:
// + It has nothing to rebuild
// + It leaves all resources unchanged
out = skaffold.Run("-l", "skaffold.dev/run-id=notunique").InDir("examples/microservices").InNs(ns.Name).RunOrFailOutput(t)
secondOut := string(out)
if strings.Count(secondOut, "created") != 0 {
t.Errorf("no resource should have been created: %s", secondOut)
}
if !strings.Contains(secondOut, "leeroy-web: Found") || !strings.Contains(secondOut, "leeroy-app: Found") {
t.Errorf("both artifacts should be in cache: %s", secondOut)
}
}
func TestRunUnstableChecked(t *testing.T) {
MarkIntegrationTest(t, CanRunWithoutGcp)
ns, _ := SetupNamespace(t)
output, err := skaffold.Run().InDir("testdata/unstable-deployment").InNs(ns.Name).RunWithCombinedOutput(t)
if err == nil {
t.Errorf("expected to see an error since the deployment is not stable: %s", output)
} else if !strings.Contains(string(output), "unstable-deployment failed") {
t.Errorf("failed without saying the reason: %s", output)
}
}
func TestRunUnstableNotChecked(t *testing.T) {
MarkIntegrationTest(t, CanRunWithoutGcp)
ns, _ := SetupNamespace(t)
skaffold.Run("--status-check=false").InDir("testdata/unstable-deployment").InNs(ns.Name).RunOrFail(t)
}
func TestRunTailPod(t *testing.T) {
MarkIntegrationTest(t, CanRunWithoutGcp)
ns, _ := SetupNamespace(t)
out := skaffold.Run("--tail", "-p", "pod").InDir("testdata/hello").InNs(ns.Name).RunLive(t)
WaitForLogs(t, out,
"Hello world! 0",
"Hello world! 1",
"Hello world! 2",
)
}
func TestRunTailDeployment(t *testing.T) {
MarkIntegrationTest(t, CanRunWithoutGcp)
ns, _ := SetupNamespace(t)
out := skaffold.Run("--tail", "-p", "deployment").InDir("testdata/hello").InNs(ns.Name).RunLive(t)
WaitForLogs(t, out,
"Hello world! 0",
"Hello world! 1",
"Hello world! 2",
)
}
func TestRunTest(t *testing.T) {
tests := []struct {
description string
testDir string
testFile string
args []string
skipTests bool
expectedText string
}{
{
description: "Run test",
testDir: "testdata/custom-test",
testFile: "testdata/custom-test/runtest",
args: []string{"--profile", "custom"},
skipTests: false,
expectedText: "foo\n",
},
{
description: "Run test with skip test false",
testDir: "testdata/custom-test",
testFile: "testdata/custom-test/runtest",
args: []string{"--profile", "custom", "--skip-tests=false"},
skipTests: false,
expectedText: "foo\n",
},
{
description: "Run test with skip test true",
testDir: "testdata/custom-test",
testFile: "testdata/custom-test/runtest",
args: []string{"--profile", "custom", "--skip-tests=True"},
skipTests: true,
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
MarkIntegrationTest(t, CanRunWithoutGcp)
defer os.Remove(test.testFile)
// Run skaffold build first to fail quickly on a build failure
skaffold.Build().InDir(test.testDir).RunOrFail(t)
ns, client := SetupNamespace(t)
skaffold.Run(test.args...).InDir(test.testDir).InNs(ns.Name).RunBackground(t)
client.WaitForPodsReady("custom-test-example")
err := wait.PollImmediate(time.Millisecond*500, 1*time.Minute, func() (bool, error) {
_, e := os.Stat(test.testFile)
if test.skipTests {
if !os.IsNotExist(e) {
t.Fatalf("Tests are not skipped.")
}
return true, nil
}
out, e := os.ReadFile(test.testFile)
failNowIfError(t, e)
return string(out) == test.expectedText, nil
})
failNowIfError(t, err)
})
}
}
// TestRunNoOptFlags tests to ensure that flags that don't require a value to be passed work when no value is passed
func TestRunNoOptFlags(t *testing.T) {
test := struct {
description string
dir string
targetLog string
pods []string
args []string
}{
description: "getting-started",
dir: "testdata/getting-started",
pods: []string{"getting-started"},
targetLog: "Hello world!",
args: []string{
"--port-forward",
"--status-check",
},
}
t.Run(test.description, func(t *testing.T) {
MarkIntegrationTest(t, CanRunWithoutGcp)
ns, _ := SetupNamespace(t)
args := append(test.args, "--tail")
out := skaffold.Run(args...).InDir(test.dir).InNs(ns.Name).RunLive(t)
defer skaffold.Delete().InDir(test.dir).InNs(ns.Name).RunOrFail(t)
WaitForLogs(t, out, test.targetLog)
})
}
func TestRunKubectlDefaultNamespace(t *testing.T) {
tests := []struct {
description string
namespaceToCreate string
projectDir string
podName string
envVariable string
}{
{
description: "run with defaultNamespace when namespace exists in cluster",
namespaceToCreate: "namespace-test",
projectDir: "testdata/kubectl-with-default-namespace",
podName: "getting-started",
envVariable: "ENV1",
},
}
for _, test := range tests {
testutil.Run(t, test.description, func(t *testutil.T) {
MarkIntegrationTest(t.T, CanRunWithoutGcp)
ns, client := SetupNamespace(t.T)
t.Setenv(test.envVariable, ns.Name)
skaffold.Run().InDir(test.projectDir).RunOrFail(t.T)
pod := client.GetPod(test.podName)
t.CheckNotNil(pod)
})
}
}
|
// Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/bits"
"gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
)
// Stat implements Linux syscall stat(2).
func Stat(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
pathAddr := args[0].Pointer()
statAddr := args[1].Pointer()
return 0, nil, fstatat(t, linux.AT_FDCWD, pathAddr, statAddr, 0 /* flags */)
}
// Lstat implements Linux syscall lstat(2).
func Lstat(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
pathAddr := args[0].Pointer()
statAddr := args[1].Pointer()
return 0, nil, fstatat(t, linux.AT_FDCWD, pathAddr, statAddr, linux.AT_SYMLINK_NOFOLLOW)
}
// Newfstatat implements Linux syscall newfstatat, which backs fstatat(2).
func Newfstatat(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
dirfd := args[0].Int()
pathAddr := args[1].Pointer()
statAddr := args[2].Pointer()
flags := args[3].Int()
return 0, nil, fstatat(t, dirfd, pathAddr, statAddr, flags)
}
func fstatat(t *kernel.Task, dirfd int32, pathAddr, statAddr hostarch.Addr, flags int32) error {
if flags&^(linux.AT_EMPTY_PATH|linux.AT_SYMLINK_NOFOLLOW) != 0 {
return linuxerr.EINVAL
}
opts := vfs.StatOptions{
Mask: linux.STATX_BASIC_STATS,
}
path, err := copyInPath(t, pathAddr)
if err != nil {
return err
}
root := t.FSContext().RootDirectory()
defer root.DecRef(t)
start := root
if !path.Absolute {
if !path.HasComponents() && flags&linux.AT_EMPTY_PATH == 0 {
return linuxerr.ENOENT
}
if dirfd == linux.AT_FDCWD {
start = t.FSContext().WorkingDirectory()
defer start.DecRef(t)
} else {
dirfile := t.GetFile(dirfd)
if dirfile == nil {
return linuxerr.EBADF
}
if !path.HasComponents() {
// Use FileDescription.Stat() instead of
// VirtualFilesystem.StatAt() for fstatat(fd, ""), since the
// former may be able to use opened file state to expedite the
// Stat.
statx, err := dirfile.Stat(t, opts)
dirfile.DecRef(t)
if err != nil {
return err
}
var stat linux.Stat
convertStatxToUserStat(t, &statx, &stat)
_, err = stat.CopyOut(t, statAddr)
return err
}
start = dirfile.VirtualDentry()
start.IncRef()
defer start.DecRef(t)
dirfile.DecRef(t)
}
}
statx, err := t.Kernel().VFS().StatAt(t, t.Credentials(), &vfs.PathOperation{
Root: root,
Start: start,
Path: path,
FollowFinalSymlink: flags&linux.AT_SYMLINK_NOFOLLOW == 0,
}, &opts)
if err != nil {
return err
}
var stat linux.Stat
convertStatxToUserStat(t, &statx, &stat)
_, err = stat.CopyOut(t, statAddr)
return err
}
func timespecFromStatxTimestamp(sxts linux.StatxTimestamp) linux.Timespec {
return linux.Timespec{
Sec: sxts.Sec,
Nsec: int64(sxts.Nsec),
}
}
// Fstat implements Linux syscall fstat(2).
func Fstat(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
fd := args[0].Int()
statAddr := args[1].Pointer()
file := t.GetFile(fd)
if file == nil {
return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
statx, err := file.Stat(t, vfs.StatOptions{
Mask: linux.STATX_BASIC_STATS,
})
if err != nil {
return 0, nil, err
}
var stat linux.Stat
convertStatxToUserStat(t, &statx, &stat)
_, err = stat.CopyOut(t, statAddr)
return 0, nil, err
}
// Statx implements Linux syscall statx(2).
func Statx(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
dirfd := args[0].Int()
pathAddr := args[1].Pointer()
flags := args[2].Int()
mask := args[3].Uint()
statxAddr := args[4].Pointer()
// TODO(b/270247637): gVisor does not yet support automount, so
// AT_NO_AUTOMOUNT flag is a no-op.
flags &= ^linux.AT_NO_AUTOMOUNT
if flags&^(linux.AT_EMPTY_PATH|linux.AT_SYMLINK_NOFOLLOW|linux.AT_STATX_SYNC_TYPE) != 0 {
return 0, nil, linuxerr.EINVAL
}
// Make sure that only one sync type option is set.
syncType := uint32(flags & linux.AT_STATX_SYNC_TYPE)
if syncType != 0 && !bits.IsPowerOfTwo32(syncType) {
return 0, nil, linuxerr.EINVAL
}
if mask&linux.STATX__RESERVED != 0 {
return 0, nil, linuxerr.EINVAL
}
opts := vfs.StatOptions{
Mask: mask,
Sync: uint32(flags & linux.AT_STATX_SYNC_TYPE),
}
path, err := copyInPath(t, pathAddr)
if err != nil {
return 0, nil, err
}
root := t.FSContext().RootDirectory()
defer root.DecRef(t)
start := root
if !path.Absolute {
if !path.HasComponents() && flags&linux.AT_EMPTY_PATH == 0 {
return 0, nil, linuxerr.ENOENT
}
if dirfd == linux.AT_FDCWD {
start = t.FSContext().WorkingDirectory()
defer start.DecRef(t)
} else {
dirfile := t.GetFile(dirfd)
if dirfile == nil {
return 0, nil, linuxerr.EBADF
}
if !path.HasComponents() {
// Use FileDescription.Stat() instead of
// VirtualFilesystem.StatAt() for statx(fd, ""), since the
// former may be able to use opened file state to expedite the
// Stat.
statx, err := dirfile.Stat(t, opts)
dirfile.DecRef(t)
if err != nil {
return 0, nil, err
}
userifyStatx(t, &statx)
_, err = statx.CopyOut(t, statxAddr)
return 0, nil, err
}
start = dirfile.VirtualDentry()
start.IncRef()
defer start.DecRef(t)
dirfile.DecRef(t)
}
}
statx, err := t.Kernel().VFS().StatAt(t, t.Credentials(), &vfs.PathOperation{
Root: root,
Start: start,
Path: path,
FollowFinalSymlink: flags&linux.AT_SYMLINK_NOFOLLOW == 0,
}, &opts)
if err != nil {
return 0, nil, err
}
userifyStatx(t, &statx)
_, err = statx.CopyOut(t, statxAddr)
return 0, nil, err
}
func userifyStatx(t *kernel.Task, statx *linux.Statx) {
userns := t.UserNamespace()
statx.UID = uint32(auth.KUID(statx.UID).In(userns).OrOverflow())
statx.GID = uint32(auth.KGID(statx.GID).In(userns).OrOverflow())
}
// Statfs implements Linux syscall statfs(2).
func Statfs(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
pathAddr := args[0].Pointer()
bufAddr := args[1].Pointer()
path, err := copyInPath(t, pathAddr)
if err != nil {
return 0, nil, err
}
tpop, err := getTaskPathOperation(t, linux.AT_FDCWD, path, disallowEmptyPath, followFinalSymlink)
if err != nil {
return 0, nil, err
}
defer tpop.Release(t)
statfs, err := t.Kernel().VFS().StatFSAt(t, t.Credentials(), &tpop.pop)
if err != nil {
return 0, nil, err
}
_, err = statfs.CopyOut(t, bufAddr)
return 0, nil, err
}
// Fstatfs implements Linux syscall fstatfs(2).
func Fstatfs(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
fd := args[0].Int()
bufAddr := args[1].Pointer()
tpop, err := getTaskPathOperation(t, fd, fspath.Path{}, allowEmptyPath, nofollowFinalSymlink)
if err != nil {
return 0, nil, err
}
defer tpop.Release(t)
statfs, err := t.Kernel().VFS().StatFSAt(t, t.Credentials(), &tpop.pop)
if err != nil {
return 0, nil, err
}
_, err = statfs.CopyOut(t, bufAddr)
return 0, nil, err
}
|
//
// Copyright (c) SAS Institute Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Simple, incomplete red-black tree implementation meant only to rebuild the
// directory tree of a CDF file.
package redblack
func New(less LessFunc) *Tree {
return &Tree{Less: less}
}
func (t *Tree) Insert(item interface{}) {
node := &Node{Item: item, Less: t.Less}
t.Root = t.Root.insert(node)
t.Count++
}
func (t *Tree) Nodes() []*Node {
if t.Root == nil {
return nil
}
ret := make([]*Node, 0, t.Count)
stack := []*Node{t.Root}
for len(stack) > 0 {
i := len(stack) - 1
node := stack[i]
stack = stack[:i]
ret = append(ret, node)
if node.Children[0] != nil {
stack = append(stack, node.Children[0])
}
if node.Children[1] != nil {
stack = append(stack, node.Children[1])
}
}
return ret
}
type LessFunc func(i, j interface{}) bool
type Tree struct {
Root *Node
Less LessFunc
Count uint
}
type Node struct {
Item interface{}
Less LessFunc
Red bool
Children [2]*Node
}
func (n *Node) isRed() bool {
return n != nil && n.Red
}
func (n *Node) rotate(dir int) *Node {
a := n.Children[1-dir]
n.Children[1-dir] = a.Children[dir]
a.Children[dir] = n
n.Red = true
a.Red = false
return a
}
func (n *Node) insert(a *Node) *Node {
if n == nil {
return a
}
dir := 0
if n.Less(n.Item, a.Item) {
dir = 1
}
n.Children[dir] = n.Children[dir].insert(a)
if !n.Children[dir].isRed() {
return n
} else if n.Children[1-dir].isRed() {
n.Red = true
n.Children[0].Red = false
n.Children[1].Red = false
return n
} else if n.Children[dir].Children[dir].isRed() {
return n.rotate(1 - dir)
} else if n.Children[dir].Children[1-dir].isRed() {
n.Children[dir] = n.Children[dir].rotate(dir)
return n.rotate(1 - dir)
} else {
return n
}
}
|
package main
import (
"fmt"
"sort"
)
/*
Given an integer array with all positive numbers and no duplicates, find the number of possible combinations that add up to a positive integer target.
Example:
nums = [1, 2, 3]
target = 4
The possible combination ways are:
(1, 1, 1, 1)
(1, 1, 2)
(1, 2, 1)
(1, 3)
(2, 1, 1)
(2, 2)
(3, 1)
Note that different sequences are counted as different combinations.
Therefore the output is 7.
Follow up:
What if negative numbers are allowed in the given array?
How does it change the problem?
What limitation we need to add to the question to allow negative numbers?
Credits:
Special thanks to @pbrother for adding this problem and creating all test cases.
dp[i]=sum(dp[i-x],x in nums)
*/
func combinationSum4(nums []int, target int) int {
if len(nums) <= 0 {return 0}
dp := make([]int,target+1)
sort.Ints(nums)
dp[0]=1
for i:=1;i<=target;i++ {
for _,n := range nums {
if i-n>=0{
dp[i]+=dp[i-n]
}
}
}
fmt.Println(dp)
return dp[target]
}
func combinationSum41(nums []int, target int) int {
num := 0
ret := make([]int,0)
solve(nums,0,target,&num,&ret)
return num
}
func solve(nums []int, j int, target int,num *int,ret *[]int) {
if target == 0 {
*num += 1
fmt.Println(*ret)
return
}
if target < 0 {
return
}
for i:=j;i<len(nums);i++ {
*ret = append(*ret,nums[i])
solve(nums, j, target - nums[i], num,ret)
*ret = (*ret)[:len(*ret)-1]
solve(nums, j + 1, target, num,ret)
}
}
func combinationSum4(nums []int, target int) int {
dp := make([]int,target+1)
dp[0]=1
for i:=0;i<len(dp);i++ {
for j:=0;j<len(nums);j++ {
if i >= nums[j] {
dp[i] += dp[i-nums[j]]
}
}
}
return dp[target]
}
func combinationSum44(nums []int, target int) int {
if len(nums) <= 0 {return 0}
dp := make([]int,target+1)
sort.Ints(nums)
dp[0]=1
for i:=1;i<=target;i++ {
for _,n := range nums {
if i-n>=0{
dp[i]+=dp[i-n]
}
}
}
fmt.Println(dp)
return dp[target]
}
func combinationSum41(nums []int, target int) int {
ret := make([][]int,0)
res := make([]int,0)
bt(nums,target,0,&res,&ret)
fmt.Println(ret)
return len(ret)
}
func bt(nums []int,target int,sum int,res *[]int, ret *[][]int) {
if sum > target {return}
if sum == target {
fmt.Println(res)
n := make([]int,len(*res))
copy(n,*res)
*ret = append(*ret,n)
return
}
for i:=0;i<len(nums);i++ {
*res = append(*res,nums[i])
bt(nums,target,sum+nums[i],res,ret)
*res = (*res)[:len(*res)-1]
}
}
func main() {
fmt.Println(combinationSum4([]int{4,2,1},32))
fmt.Println(combinationSum44([]int{4,2,1},32))
}
|
package controller
import (
"context"
"employees/models"
"github.com/gin-gonic/gin"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"log"
"net/http"
"strconv"
)
var collection *mongo.Collection
var ctx context.Context
func EmployeeCollection(c *mongo.Database) {
collection = c.Collection("persons")
ctx = context.TODO()
}
func GetAllPersonsFromDB(c *gin.Context) {
persons := []models.Person{}
cursor, err := collection.Find(ctx, bson.M{})
if err != nil {
log.Printf("Error while getting all persons, Reason: %v\n", err)
c.JSON(http.StatusInternalServerError, gin.H{
"status": http.StatusInternalServerError,
"message": "Something went wrong",
})
return
}
for cursor.Next(ctx) {
var person models.Person
cursor.Decode(&person)
persons = append(persons, person)
}
c.JSON(http.StatusOK, gin.H{
"status": http.StatusOK,
"message": "All Persons",
"data": persons,
})
return
}
func AddPersonInDB(c *gin.Context) {
var person models.Person
c.BindJSON(&person)
newPerson := models.Person{
Id: person.Id,
Name: person.Name,
Active: person.Active,
Address: person.Address,
}
_, err := collection.InsertOne(ctx, newPerson)
if err != nil {
log.Printf("Error while inserting new person into db, Reason: %v\n", err)
c.JSON(http.StatusInternalServerError, gin.H{
"status": http.StatusInternalServerError,
"message": "Something went wrong",
})
return
}
c.JSON(http.StatusCreated, gin.H{
"status": http.StatusCreated,
"message": "Person added Successfully",
})
return
}
func FindOnePersonInDB(c *gin.Context) {
personId, convErr := strconv.Atoi(c.Param("id"))
if convErr != nil {
log.Printf("Error, Reason: %v\n", convErr)
c.JSON(http.StatusInternalServerError, gin.H{
"status": 500,
"message": "Can't convert parameter into integer",
})
return
}
person := models.Person{}
err := collection.FindOne(ctx, bson.M{"id": personId}).Decode(&person)
if err != nil {
log.Printf("Error while getting a person, Reason: %v\n", err)
c.JSON(http.StatusNotFound, gin.H{
"status": http.StatusNotFound,
"message": "Person not found",
})
return
}
c.JSON(http.StatusOK, gin.H{
"status": http.StatusOK,
"message": "Found the person",
"data": person,
})
return
}
func UpdatePersonInDB(c *gin.Context) {
personId, convErr := strconv.Atoi(c.Param("id"))
if convErr != nil {
log.Printf("Error, Reason: %v\n", convErr)
c.JSON(http.StatusInternalServerError, gin.H{
"status": 500,
"message": "Can't convert parameter into integer",
})
return
}
findErr := collection.FindOne(ctx, bson.M{"id": personId}).Err()
if findErr != nil {
log.Printf("Error while getting a person, Reason: %v\n", findErr)
c.JSON(http.StatusNotFound, gin.H{
"status": http.StatusNotFound,
"message": "Person not found",
})
return
}
var person models.Person
c.BindJSON(&person)
newPerson := bson.M{
"$set": bson.M{
"name": person.Name,
"active": person.Active,
"address": person.Address,
},
}
_, err := collection.UpdateOne(ctx, bson.M{"id": personId}, newPerson)
if err != nil {
log.Printf("Error, Reason: %v\n", err)
c.JSON(http.StatusInternalServerError, gin.H{
"status": 500,
"message": "Something went wrong",
})
return
}
c.JSON(http.StatusOK, gin.H{
"status": 200,
"message": "Person Updated Successfully",
})
return
}
func DeletePersonFromDB(c *gin.Context) {
personId, convErr := strconv.Atoi(c.Param("id"))
if convErr != nil {
log.Printf("Error, Reason: %v\n", convErr)
c.JSON(http.StatusInternalServerError, gin.H{
"status": 500,
"message": "Can't convert parameter into integer",
})
return
}
_, err := collection.DeleteOne(ctx, bson.M{"id": personId})
if err != nil {
log.Printf("Error while deleting a person, Reason: %v\n", err)
c.JSON(http.StatusInternalServerError, gin.H{
"status": http.StatusInternalServerError,
"message": "Something went wrong",
})
return
}
c.JSON(http.StatusOK, gin.H{
"status": http.StatusOK,
"message": "Person deleted successfully",
})
return
}
|
package main
import (
"context"
"grpc/payload"
"log"
"net"
"google.golang.org/grpc"
)
func errorHandel(msg string, err error) {
if err != nil {
log.Fatalf("[x] error %s : %v", msg, err)
}
}
func main() {
lis, err := net.Listen("tcp", ":9000")
errorHandel("can't listen tcp connection", err)
grpcServer := grpc.NewServer()
server := payload.SignInService{}
server.Auth = func(ctx context.Context, in *payload.SignInRequest) (*payload.SignInResponse, error) {
log.Printf(`incoming request auth from email: %s password %s`, in.Email, in.Password)
return &payload.SignInResponse{
UserId: "123",
Jwt: "456",
}, nil
}
payload.RegisterSignInService(grpcServer, &server)
log.Println("server start at :9000")
err = grpcServer.Serve(lis)
errorHandel("can't serve server", err)
}
|
package model
import (
"time"
"code.gitea.io/sdk/gitea"
"github.com/google/go-github/v47/github"
"github.com/xanzy/go-gitlab"
)
type User struct {
Common
Login string `json:"login,omitempty"` // 登录名
AvatarURL string `json:"avatar_url,omitempty"` // 头像地址
Name string `json:"name,omitempty"` // 昵称
Blog string `json:"blog,omitempty"` // 网站链接
Email string `json:"email,omitempty"` // 邮箱
Hireable bool `json:"hireable,omitempty"`
Bio string `json:"bio,omitempty"` // 个人简介
Token string `json:"-"` // 认证 Token
TokenExpired time.Time `json:"token_expired,omitempty"` // Token 过期时间
SuperAdmin bool `json:"super_admin,omitempty"` // 超级管理员
}
func NewUserFromGitea(gu *gitea.User) User {
var u User
u.ID = uint64(gu.ID)
u.Login = gu.UserName
u.AvatarURL = gu.AvatarURL
u.Name = gu.FullName
if u.Name == "" {
u.Name = u.Login
}
u.Blog = gu.Website
u.Email = gu.Email
u.Bio = gu.Description
return u
}
func NewUserFromGitlab(gu *gitlab.User) User {
var u User
u.ID = uint64(gu.ID)
u.Login = gu.Username
u.AvatarURL = gu.AvatarURL
u.Name = gu.Name
if u.Name == "" {
u.Name = u.Login
}
u.Blog = gu.WebsiteURL
u.Email = gu.Email
u.Bio = gu.Bio
return u
}
func NewUserFromGitHub(gu *github.User) User {
var u User
u.ID = uint64(gu.GetID())
u.Login = gu.GetLogin()
u.AvatarURL = gu.GetAvatarURL()
u.Name = gu.GetName()
// 昵称为空的情况
if u.Name == "" {
u.Name = u.Login
}
u.Blog = gu.GetBlog()
u.Email = gu.GetEmail()
u.Hireable = gu.GetHireable()
u.Bio = gu.GetBio()
return u
}
|
package client
import (
"bytes"
"encoding/json"
"fmt"
"github.com/commitdev/kafka-connect/config"
"github.com/commitdev/kafka-connect/pkg/utils"
"io/ioutil"
"log"
"net/http"
"net/url"
)
//KafkaConnectClient implements functions interacting with kafka connect configurations.
type KafkaConnectClient interface {
List() ([]string, error)
Create(*config.KafkaConnectorConfigWrapper) error
Get(name string) (*KafkaConnectGetResponse, error)
GetStatus(name string) (*KafkaConnectGetStatusResponse, error)
Update(name string, config *config.KafkaConnectorConfigInstance) error
Delete(name string) error
}
type kafkaConnectHTTPClient struct {
base *url.URL
client utils.HttpClient
}
//NewKafkaConnectClient creates a new kafka connect client.
func NewKafkaConnectClient(address string, user string, password string) (KafkaConnectClient, error) {
client := utils.NewHttpClient(user, password)
baseURL, err := url.Parse(address)
if err != nil {
return nil, err
}
return &kafkaConnectHTTPClient{
base: baseURL,
client: client,
}, nil
}
func (c *kafkaConnectHTTPClient) List() ([]string, error) {
u, _ := url.Parse("/connectors")
endpoint := c.base.ResolveReference(u).String()
req, _ := http.NewRequest("GET", endpoint, nil)
resp, err := c.client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
result := make([]string, 0)
err = json.NewDecoder(resp.Body).Decode(&result)
return result, err
}
func (c *kafkaConnectHTTPClient) Create(config *config.KafkaConnectorConfigWrapper) error {
u, _ := url.Parse("/connectors")
endpoint := c.base.ResolveReference(u).String()
payload, _ := json.Marshal(config)
req, _ := http.NewRequest("POST", endpoint, bytes.NewBuffer(payload))
resp, err := c.client.Do(req)
if resp.StatusCode != 200 && resp.StatusCode != 201 {
message, _ := ioutil.ReadAll(resp.Body)
log.Printf("Status: %v Message: %s", resp.StatusCode, string(message))
}
return err
}
type KafkaConnectGetResponse struct {
Name string `json:"name"`
Config interface{} `json:"config"`
Tasks []struct {
Connector string `json:"connector"`
Task int `json:"task"`
} `json:"tasks"`
Type string `json:"type"`
}
type NotFound struct {
msg string
}
func NewNotFoundError(msg string) *NotFound {
return &NotFound{msg: msg}
}
func (e *NotFound) Error() string {
return e.msg
}
func (c *kafkaConnectHTTPClient) Get(name string) (*KafkaConnectGetResponse, error) {
u, _ := url.Parse(fmt.Sprintf("/connectors/%s", name))
endpoint := c.base.ResolveReference(u).String()
req, _ := http.NewRequest("GET", endpoint, nil)
resp, err := c.client.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode == 404 {
return nil, NewNotFoundError("This config does not exist.")
}
defer resp.Body.Close()
result := KafkaConnectGetResponse{}
err = json.NewDecoder(resp.Body).Decode(&result)
return &result, nil
}
type KafkaConnectGetStatusResponse struct {
Name string `json:"name"`
Connector struct {
State string `json:"state"`
WorkerID string `json:"worker_id"`
} `json:"connector"`
Tasks []struct {
ID int `json:"id"`
State string `json:"state"`
WorkerID string `json:"worker_id"`
Trace string `json:"trace,omitempty"`
} `json:"tasks"`
}
func (c *kafkaConnectHTTPClient) GetStatus(name string) (*KafkaConnectGetStatusResponse, error) {
u, _ := url.Parse(fmt.Sprintf("/connectors/%s/status", name))
endpoint := c.base.ResolveReference(u).String()
req, _ := http.NewRequest("GET", endpoint, nil)
resp, err := c.client.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode == 404 {
return nil, NewNotFoundError("This config does not exist.")
}
defer resp.Body.Close()
result := KafkaConnectGetStatusResponse{}
err = json.NewDecoder(resp.Body).Decode(&result)
return &result, nil
}
func (c *kafkaConnectHTTPClient) Update(name string, config *config.KafkaConnectorConfigInstance) error {
u, _ := url.Parse(fmt.Sprintf("/connectors/%s/config", name))
endpoint := c.base.ResolveReference(u).String()
payload, _ := json.Marshal(config)
req, _ := http.NewRequest("PUT", endpoint, bytes.NewBuffer(payload))
resp, err := c.client.Do(req)
if err != nil {
return err
}
if resp.StatusCode == 404 {
return NewNotFoundError("This config does not exist.")
}
return nil
}
func (c *kafkaConnectHTTPClient) Delete(name string) error {
u, _ := url.Parse(fmt.Sprintf("/connectors/%s", name))
endpoint := c.base.ResolveReference(u).String()
req, _ := http.NewRequest("DELETE", endpoint, nil)
resp, err := c.client.Do(req)
if err != nil {
return err
}
if resp.StatusCode == 404 {
return NewNotFoundError("This config does not exist.")
}
return nil
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package useractions
// AttributeTestScenario describes the test scenario that the user action is running in.
const AttributeTestScenario string = "TestScenario"
// AttributeFeature describes the feature that the user action is using.
const AttributeFeature string = "Feature"
// Attribute keys used to represent DUT environment.
const (
AttributeDeviceMode string = "DeviceMode"
AttributeDeviceRegion string = "DeviceRegion"
AttributeKeyboardType string = "KeyboardType"
AttributeBoardName string = "BoardName"
AttributeIncognitoMode string = "IncognitoMode"
AttributeUserMode string = "UserMode"
AttributeInputMethod string = "InputMethod"
AttributeInputField string = "InputField"
AttributeFloatVK string = "FloatVK"
)
// Available attribute values of device mode.
const (
DeviceModeClamshell string = "Clamshell"
DeviceModeTablet string = "Tablet"
DeviceModeUnknown string = "Unknown"
)
// Available attribute values of keyboard type.
const (
KeyboardTypePhysicalKeyboard string = "Physical Keyboard"
KeyboardTypeTabletVK string = "Tablet Virtual Keyboard"
KeyboardTypeA11yVK string = "A11y Virtual Keyboard"
KeyboardTypeUnknown string = "Unknown"
)
// ActionTag is a string type to represent tag type of UserAction.
type ActionTag string
// Action tags to indicate interested products / teams.
const (
ActionTagEssentialInputs ActionTag = "Essential Inputs"
ActionTagARC ActionTag = "ARC++"
ActionTagOSSettings ActionTag = "OS Settings"
ActionTagIMESettings ActionTag = "IME Settings"
ActionTagIMEShelf ActionTag = "IME Shelf"
)
// E14s feature definition.
const (
FeatureIMEManagement string = "IME Management"
FeatureIMESpecific string = "IME Specific Feature"
FeaturePKTyping string = "PK Typing Input"
FeatureDeadKeys string = "Dead Keys"
FeatureVKTyping string = "VK Typing Input"
FeatureVKAutoShift string = "VK AutoShift"
FeatureVoiceInput string = "Voice Input"
FeatureHandWriting string = "Handwriting"
FeatureFloatVK string = "Float VK"
FeatureGlideTyping string = "Glide Typing"
FeatureEmoji string = "Emoji"
FeatureEmojiPicker string = "Emoji Picker"
FeatureEmojiSuggestion string = "Emoji Suggestion"
FeatureGrammarCheck string = "Grammar Check"
FeatureMultiPaste string = "Multi-Paste"
FeatureMultiwordSuggestion string = "Multiword Suggestion"
FeatureAutoCorrection string = "Auto-Correction"
FeatureAutoCapitalization string = "Auto-Capitalization"
FeatureLongpressDiacritics string = "Longpress Diacritics"
)
|
package cmd
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"github.com/puppetlabs/wash/cmd/internal/server"
cmdutil "github.com/puppetlabs/wash/cmd/util"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
// Create an executable file at the given path that invokes the given wash subcommand.
func writeAlias(path, subcommand string) error {
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0750)
if err != nil {
return err
}
_, err = f.WriteString("#!/bin/sh\nexec wash " + subcommand + " \"$@\"")
f.Close()
return err
}
func runShell(cachedir, mountpath string) exitCode {
// Create a temporary run space with wash and aliases. Add it to the PATH.
runpath, err := ioutil.TempDir(cachedir, "run")
if err != nil {
cmdutil.ErrPrintf("Error creating temporary run location in %v: %v\n", cachedir, err)
return exitCode{1}
}
defer os.RemoveAll(runpath)
washPath, err := os.Executable()
if err != nil {
cmdutil.ErrPrintf("Error finding wash executable: %v\n", err)
return exitCode{1}
}
newWashPath := filepath.Join(runpath, "wash")
if err := os.Symlink(washPath, newWashPath); err != nil {
cmdutil.ErrPrintf("Error linking wash executable to %v: %v\n", newWashPath, err)
return exitCode{1}
}
// Executable file can't override shell built-ins, so use wexec instead of exec.
// List also isn't very feature complete so we don't override ls.
// These are executables instead of aliases because putting alias declarations at the beginning
// of stdin for the command doesn't work right.
aliases := map[string]string{
"wclear": "clear",
"wexec": "exec",
"find": "find",
"help": "help",
"whistory": "history",
"list": "list",
"meta": "meta",
"tail": "tail",
}
for name, subcommand := range aliases {
if err := writeAlias(filepath.Join(runpath, name), subcommand); err != nil {
cmdutil.ErrPrintf("Error creating alias %v for subcommand %v: %v\n", name, subcommand, err)
return exitCode{1}
}
}
pathEnv := os.Getenv("PATH")
if err := os.Setenv("PATH", runpath+string(os.PathListSeparator)+pathEnv); err != nil {
cmdutil.ErrPrintf("Error adding wash executables to PATH: %v\n", err)
return exitCode{1}
}
// Run the default system shell.
sh := os.Getenv("SHELL")
if sh == "" {
sh = "/bin/sh"
}
comm := exec.Command(sh)
comm.Stdin = os.Stdin
comm.Stdout = os.Stdout
comm.Stderr = os.Stderr
comm.Dir = mountpath
if runErr := comm.Run(); runErr != nil {
if exitErr, ok := runErr.(*exec.ExitError); ok {
return exitCode{exitErr.ExitCode()}
}
cmdutil.ErrPrintf("%v\n", runErr)
return exitCode{1}
}
return exitCode{0}
}
// Start the wash server, then present the default system shell.
// On exit, stop the server and return any errors.
func rootMain(cmd *cobra.Command, args []string) exitCode {
// Configure logrus to emit simple text
log.SetFormatter(&log.TextFormatter{DisableTimestamp: true})
cachedir, err := os.UserCacheDir()
if err != nil {
cmdutil.ErrPrintf("Unable to get user cache dir: %v\n", err)
return exitCode{1}
}
cachedir = filepath.Join(cachedir, "wash")
// ensure cache directory exists
if err = os.MkdirAll(cachedir, 0750); err != nil {
cmdutil.ErrPrintf("Unable to create cache dir %v: %v\n", cachedir, err)
return exitCode{1}
}
mountpath, err := ioutil.TempDir(cachedir, "mnt")
if err != nil {
cmdutil.ErrPrintf("Unable to create temporary mountpoint in %v: %v\n", cachedir, err)
return exitCode{1}
}
defer os.RemoveAll(mountpath)
// TODO: instead of running a server in-process, can we start one in a separate process that can
// be shared between multiple invocations of `wash`?
srv := server.New(mountpath, serverOptsFromFlags())
if err := srv.Start(); err != nil {
cmdutil.ErrPrintf("Unable to start server: %v\n", err)
return exitCode{1}
}
fmt.Println(`Welcome to Wash!
Wash includes several built-in commands: wclear, wexec, find, list, meta, tail. Try 'help'.
Commands run with wash can be seen via 'whistory', and logs for those commands with 'whistory <id>'.`)
exit := runShell(cachedir, mountpath)
srv.Stop()
fmt.Println("Goodbye!")
return exit
}
|
package main
import "fmt"
func checkValidString(s string) bool {
l, h := 0, 0
for _, c := range s {
switch c {
case '(':
l, h = l+1, h+1
case ')':
l, h = l-1, h-1
case '*':
l, h = l-1, h+1
}
if h < 0 {
return false
}
if l < 0 {
l = 0
}
}
return l <= 0 && 0 <= h
}
func main() {
fmt.Println(checkValidString("))((((()*()*(())())(()))((*()(*((*(*()))()(())*()()))*)*()))()()(())()(()))()))))")
}
|
package pel_test
import (
"github.com/reiver/go-pel"
"image"
"math"
"math/rand"
"time"
"testing"
)
func TestRGBA_At_alpha255(t *testing.T) {
randomness := rand.New(rand.NewSource( time.Now().UTC().UnixNano() ))
for testNumber:=0; testNumber<10; testNumber++ {
var x,y int
{
x = randomness.Int()
if 0 == randomness.Int()%2 {
x = -x
}
y = randomness.Int()
if 0 == randomness.Int()%2 {
y = -y
}
}
var r,b,g,a uint8
{
r = uint8(randomness.Intn(256))
g = uint8(randomness.Intn(256))
b = uint8(randomness.Intn(256))
a = 255
}
var pixel pel.RGBA = pel.RGBA{
X:x,
Y:y,
R:r,
G:g,
B:b,
A:a,
}
var img image.Image = pixel
{
eR := uint32(r) * (0xffff/0xff)
eG := uint32(g) * (0xffff/0xff)
eB := uint32(b) * (0xffff/0xff)
eA := uint32(a) * (0xffff/0xff)
aR, aG, aB, aA := img.At(x,y).RGBA()
if eR != aR || eG != aG || eB != aB || eA != aA {
t.Errorf("For test #%d, the actual color was not what was expected.", testNumber)
t.Logf("(x,y)=(%d,%d)", x,y)
t.Logf("rgba(%d,%d,%d,%d)", r,g,b,a)
t.Logf("EXPECTED (r,g,b,a)=(%d,%d,%d,%d) ⤳ rgba(%d,%d,%d,%d)", eR, eG, eB, eA, ((eR*0xff)/0xffff), ((eG*0xff)/0xffff), ((eB*0xff)/0xffff), ((eA*0xff)/0xffff))
t.Logf("ACTUAL (r,g,b,a)=(%d,%d,%d,%d) ⤳ rgba(%d,%d,%d,%d)", aR, aG, aB, aA, ((aR*0xff)/0xffff), ((aG*0xff)/0xffff), ((aB*0xff)/0xffff), ((aA*0xff)/0xffff))
continue
}
}
for subTestNumber:=0; subTestNumber<30; subTestNumber++ {
var xx,yy int
{
xx = randomness.Int()
if 0 == randomness.Int()%2 {
xx = -xx
}
yy = randomness.Int()
if 0 == randomness.Int()%2 {
yy = -yy
}
if x == xx && y == yy {
xx--
yy--
}
}
var eR, eG, eB, eA uint32 = 0,0,0,0
aR, aG, aB, aA := img.At(xx,yy).RGBA()
if eR != aR || eG != aG || eB != aB || eA != aA {
t.Errorf("For test #%d, the actual color was not what was expected.", testNumber)
t.Logf("(x,y)=(%d,%d)", x,y)
t.Logf("rgba(%d,%d,%d,%d)", r,g,b,a)
t.Logf("EXPECTED (r,g,b,a)=(%d,%d,%d,%d) ⤳ rgba(%d,%d,%d,%d)", eR, eG, eB, eA, ((eR*0xff)/0xffff), ((eG*0xff)/0xffff), ((eB*0xff)/0xffff), ((eA*0xff)/0xffff))
t.Logf("ACTUAL (r,g,b,a)=(%d,%d,%d,%d) ⤳ rgba(%d,%d,%d,%d)", aR, aG, aB, aA, ((aR*0xff)/0xffff), ((aG*0xff)/0xffff), ((aB*0xff)/0xffff), ((aA*0xff)/0xffff))
continue
}
}
}
}
func TestRGBA_At_alpha127(t *testing.T) {
randomness := rand.New(rand.NewSource( time.Now().UTC().UnixNano() ))
for testNumber:=0; testNumber<10; testNumber++ {
var x,y int
{
x = randomness.Int()
if 0 == randomness.Int()%2 {
x = -x
}
y = randomness.Int()
if 0 == randomness.Int()%2 {
y = -y
}
}
var r,b,g,a uint8
{
r = uint8(randomness.Intn(256))
g = uint8(randomness.Intn(256))
b = uint8(randomness.Intn(256))
a = 127
}
var pixel pel.RGBA = pel.RGBA{
X:x,
Y:y,
R:r,
G:g,
B:b,
A:a,
}
var img image.Image = pixel
{
eR := uint32(math.Round((float64(r) * 127.0) / 255.0))*(0xffff/0xff)
eG := uint32(math.Round((float64(g) * 127.0) / 255.0))*(0xffff/0xff)
eB := uint32(math.Round((float64(b) * 127.0) / 255.0))*(0xffff/0xff)
eA := uint32(0xffff)
aR, aG, aB, aA := img.At(x,y).RGBA()
if eR != aR || eG != aG || eB != aB || eA != aA {
t.Errorf("For test #%d, the actual color was not what was expected.", testNumber)
t.Logf("(x,y)=(%d,%d)", x,y)
t.Logf("rgba(%d,%d,%d,%d)", r,g,b,a)
t.Logf("EXPECTED (r,g,b,a)=(%d,%d,%d,%d) ⤳ rgba(%d,%d,%d,%d)", eR, eG, eB, eA, ((eR*0xff)/0xffff), ((eG*0xff)/0xffff), ((eB*0xff)/0xffff), ((eA*0xff)/0xffff))
t.Logf("ACTUAL (r,g,b,a)=(%d,%d,%d,%d) ⤳ rgba(%d,%d,%d,%d)", aR, aG, aB, aA, ((aR*0xff)/0xffff), ((aG*0xff)/0xffff), ((aB*0xff)/0xffff), ((aA*0xff)/0xffff))
continue
}
}
for subTestNumber:=0; subTestNumber<30; subTestNumber++ {
var xx,yy int
{
xx = randomness.Int()
if 0 == randomness.Int()%2 {
xx = -xx
}
yy = randomness.Int()
if 0 == randomness.Int()%2 {
yy = -yy
}
if x == xx && y == yy {
xx--
yy--
}
}
var eR, eG, eB, eA uint32 = 0,0,0,0
aR, aG, aB, aA := img.At(xx,yy).RGBA()
if eR != aR || eG != aG || eB != aB || eA != aA {
t.Errorf("For test #%d, the actual color was not what was expected.", testNumber)
t.Logf("(x,y)=(%d,%d)", x,y)
t.Logf("rgba(%d,%d,%d,%d)", r,g,b,a)
t.Logf("EXPECTED (r,g,b,a)=(%d,%d,%d,%d) ⤳ rgba(%d,%d,%d,%d)", eR, eG, eB, eA, ((eR*0xff)/0xffff), ((eG*0xff)/0xffff), ((eB*0xff)/0xffff), ((eA*0xff)/0xffff))
t.Logf("ACTUAL (r,g,b,a)=(%d,%d,%d,%d) ⤳ rgba(%d,%d,%d,%d)", aR, aG, aB, aA, ((aR*0xff)/0xffff), ((aG*0xff)/0xffff), ((aB*0xff)/0xffff), ((aA*0xff)/0xffff))
continue
}
}
}
}
|
package main
import "fmt"
func main() {
for i := 0; i < 15; i++ {
fmt.Printf("%d in for loop\n", i)
}
i := 0
START:
if i < 15 {
fmt.Printf("%d in goto\n", i)
i++
goto START
}
}
|
package nebula
import (
"bytes"
"encoding/binary"
"errors"
"math"
"net"
"testing"
"time"
"github.com/rcrowley/go-metrics"
"github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/firewall"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/test"
"github.com/stretchr/testify/assert"
)
func TestNewFirewall(t *testing.T) {
l := test.NewLogger()
c := &cert.NebulaCertificate{}
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, c)
conntrack := fw.Conntrack
assert.NotNil(t, conntrack)
assert.NotNil(t, conntrack.Conns)
assert.NotNil(t, conntrack.TimerWheel)
assert.NotNil(t, fw.InRules)
assert.NotNil(t, fw.OutRules)
assert.Equal(t, time.Second, fw.TCPTimeout)
assert.Equal(t, time.Minute, fw.UDPTimeout)
assert.Equal(t, time.Hour, fw.DefaultTimeout)
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
fw = NewFirewall(l, time.Second, time.Hour, time.Minute, c)
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
fw = NewFirewall(l, time.Hour, time.Second, time.Minute, c)
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
fw = NewFirewall(l, time.Hour, time.Minute, time.Second, c)
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
fw = NewFirewall(l, time.Minute, time.Hour, time.Second, c)
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
fw = NewFirewall(l, time.Minute, time.Second, time.Hour, c)
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
}
func TestFirewall_AddRule(t *testing.T) {
l := test.NewLogger()
ob := &bytes.Buffer{}
l.SetOutput(ob)
c := &cert.NebulaCertificate{}
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.NotNil(t, fw.InRules)
assert.NotNil(t, fw.OutRules)
_, ti, _ := net.ParseCIDR("1.2.3.4/32")
assert.Nil(t, fw.AddRule(true, firewall.ProtoTCP, 1, 1, []string{}, "", nil, nil, "", ""))
// An empty rule is any
assert.True(t, fw.InRules.TCP[1].Any.Any)
assert.Empty(t, fw.InRules.TCP[1].Any.Groups)
assert.Empty(t, fw.InRules.TCP[1].Any.Hosts)
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, nil, "", ""))
assert.False(t, fw.InRules.UDP[1].Any.Any)
assert.Contains(t, fw.InRules.UDP[1].Any.Groups[0], "g1")
assert.Empty(t, fw.InRules.UDP[1].Any.Hosts)
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoICMP, 1, 1, []string{}, "h1", nil, nil, "", ""))
assert.False(t, fw.InRules.ICMP[1].Any.Any)
assert.Empty(t, fw.InRules.ICMP[1].Any.Groups)
assert.Contains(t, fw.InRules.ICMP[1].Any.Hosts, "h1")
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", ti, nil, "", ""))
assert.False(t, fw.OutRules.AnyProto[1].Any.Any)
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Groups)
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Hosts)
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.CIDR.Match(iputil.Ip2VpnIp(ti.IP)))
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", nil, ti, "", ""))
assert.False(t, fw.OutRules.AnyProto[1].Any.Any)
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Groups)
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Hosts)
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.LocalCIDR.Match(iputil.Ip2VpnIp(ti.IP)))
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, nil, "ca-name", ""))
assert.Contains(t, fw.InRules.UDP[1].CANames, "ca-name")
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, nil, "", "ca-sha"))
assert.Contains(t, fw.InRules.UDP[1].CAShas, "ca-sha")
// Set any and clear fields
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{"g1", "g2"}, "h1", ti, ti, "", ""))
assert.Equal(t, []string{"g1", "g2"}, fw.OutRules.AnyProto[0].Any.Groups[0])
assert.Contains(t, fw.OutRules.AnyProto[0].Any.Hosts, "h1")
assert.NotNil(t, fw.OutRules.AnyProto[0].Any.CIDR.Match(iputil.Ip2VpnIp(ti.IP)))
assert.NotNil(t, fw.OutRules.AnyProto[0].Any.LocalCIDR.Match(iputil.Ip2VpnIp(ti.IP)))
// run twice just to make sure
//TODO: these ANY rules should clear the CA firewall portion
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, nil, "", ""))
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", nil, nil, "", ""))
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
assert.Empty(t, fw.OutRules.AnyProto[0].Any.Groups)
assert.Empty(t, fw.OutRules.AnyProto[0].Any.Hosts)
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", nil, nil, "", ""))
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
_, anyIp, _ := net.ParseCIDR("0.0.0.0/0")
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "", anyIp, nil, "", ""))
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
// Test error conditions
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Error(t, fw.AddRule(true, math.MaxUint8, 0, 0, []string{}, "", nil, nil, "", ""))
assert.Error(t, fw.AddRule(true, firewall.ProtoAny, 10, 0, []string{}, "", nil, nil, "", ""))
}
func TestFirewall_Drop(t *testing.T) {
l := test.NewLogger()
ob := &bytes.Buffer{}
l.SetOutput(ob)
p := firewall.Packet{
LocalIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
RemoteIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
LocalPort: 10,
RemotePort: 90,
Protocol: firewall.ProtoUDP,
Fragment: false,
}
ipNet := net.IPNet{
IP: net.IPv4(1, 2, 3, 4),
Mask: net.IPMask{255, 255, 255, 0},
}
c := cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: "host1",
Ips: []*net.IPNet{&ipNet},
Groups: []string{"default-group"},
InvertedGroups: map[string]struct{}{"default-group": {}},
Issuer: "signer-shasum",
},
}
h := HostInfo{
ConnectionState: &ConnectionState{
peerCert: &c,
},
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
}
h.CreateRemoteCIDR(&c)
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, nil, "", ""))
cp := cert.NewCAPool()
// Drop outbound
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrNoMatchingRule)
// Allow inbound
resetConntrack(fw)
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
// Allow outbound because conntrack
assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp, nil))
// test remote mismatch
oldRemote := p.RemoteIP
p.RemoteIP = iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 10))
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrInvalidRemoteIP)
p.RemoteIP = oldRemote
// ensure signer doesn't get in the way of group checks
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "", "signer-shasum"))
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "", "signer-shasum-bad"))
assert.Equal(t, fw.Drop([]byte{}, p, true, &h, cp, nil), ErrNoMatchingRule)
// test caSha doesn't drop on match
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "", "signer-shasum-bad"))
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "", "signer-shasum"))
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
// ensure ca name doesn't get in the way of group checks
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "ca-good", ""))
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "ca-good-bad", ""))
assert.Equal(t, fw.Drop([]byte{}, p, true, &h, cp, nil), ErrNoMatchingRule)
// test caName doesn't drop on match
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "ca-good-bad", ""))
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "ca-good", ""))
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
}
func BenchmarkFirewallTable_match(b *testing.B) {
ft := FirewallTable{
TCP: firewallPort{},
}
_, n, _ := net.ParseCIDR("172.1.1.1/32")
_ = ft.TCP.addRule(10, 10, []string{"good-group"}, "good-host", n, n, "", "")
_ = ft.TCP.addRule(10, 10, []string{"good-group2"}, "good-host", n, n, "", "")
_ = ft.TCP.addRule(10, 10, []string{"good-group3"}, "good-host", n, n, "", "")
_ = ft.TCP.addRule(10, 10, []string{"good-group4"}, "good-host", n, n, "", "")
_ = ft.TCP.addRule(10, 10, []string{"good-group, good-group1"}, "good-host", n, n, "", "")
cp := cert.NewCAPool()
b.Run("fail on proto", func(b *testing.B) {
c := &cert.NebulaCertificate{}
for n := 0; n < b.N; n++ {
ft.match(firewall.Packet{Protocol: firewall.ProtoUDP}, true, c, cp)
}
})
b.Run("fail on port", func(b *testing.B) {
c := &cert.NebulaCertificate{}
for n := 0; n < b.N; n++ {
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 1}, true, c, cp)
}
})
b.Run("fail all group, name, and cidr", func(b *testing.B) {
_, ip, _ := net.ParseCIDR("9.254.254.254/32")
c := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
InvertedGroups: map[string]struct{}{"nope": {}},
Name: "nope",
Ips: []*net.IPNet{ip},
},
}
for n := 0; n < b.N; n++ {
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp)
}
})
b.Run("pass on group", func(b *testing.B) {
c := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
InvertedGroups: map[string]struct{}{"good-group": {}},
Name: "nope",
},
}
for n := 0; n < b.N; n++ {
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp)
}
})
b.Run("pass on name", func(b *testing.B) {
c := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
InvertedGroups: map[string]struct{}{"nope": {}},
Name: "good-host",
},
}
for n := 0; n < b.N; n++ {
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp)
}
})
b.Run("pass on ip", func(b *testing.B) {
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
c := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
InvertedGroups: map[string]struct{}{"nope": {}},
Name: "good-host",
},
}
for n := 0; n < b.N; n++ {
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10, RemoteIP: ip}, true, c, cp)
}
})
b.Run("pass on local ip", func(b *testing.B) {
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
c := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
InvertedGroups: map[string]struct{}{"nope": {}},
Name: "good-host",
},
}
for n := 0; n < b.N; n++ {
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10, LocalIP: ip}, true, c, cp)
}
})
_ = ft.TCP.addRule(0, 0, []string{"good-group"}, "good-host", n, n, "", "")
b.Run("pass on ip with any port", func(b *testing.B) {
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
c := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
InvertedGroups: map[string]struct{}{"nope": {}},
Name: "good-host",
},
}
for n := 0; n < b.N; n++ {
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, RemoteIP: ip}, true, c, cp)
}
})
b.Run("pass on local ip with any port", func(b *testing.B) {
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
c := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
InvertedGroups: map[string]struct{}{"nope": {}},
Name: "good-host",
},
}
for n := 0; n < b.N; n++ {
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: ip}, true, c, cp)
}
})
}
func TestFirewall_Drop2(t *testing.T) {
l := test.NewLogger()
ob := &bytes.Buffer{}
l.SetOutput(ob)
p := firewall.Packet{
LocalIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
RemoteIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
LocalPort: 10,
RemotePort: 90,
Protocol: firewall.ProtoUDP,
Fragment: false,
}
ipNet := net.IPNet{
IP: net.IPv4(1, 2, 3, 4),
Mask: net.IPMask{255, 255, 255, 0},
}
c := cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: "host1",
Ips: []*net.IPNet{&ipNet},
InvertedGroups: map[string]struct{}{"default-group": {}, "test-group": {}},
},
}
h := HostInfo{
ConnectionState: &ConnectionState{
peerCert: &c,
},
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
}
h.CreateRemoteCIDR(&c)
c1 := cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: "host1",
Ips: []*net.IPNet{&ipNet},
InvertedGroups: map[string]struct{}{"default-group": {}, "test-group-not": {}},
},
}
h1 := HostInfo{
ConnectionState: &ConnectionState{
peerCert: &c1,
},
}
h1.CreateRemoteCIDR(&c1)
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group", "test-group"}, "", nil, nil, "", ""))
cp := cert.NewCAPool()
// h1/c1 lacks the proper groups
assert.Error(t, fw.Drop([]byte{}, p, true, &h1, cp, nil), ErrNoMatchingRule)
// c has the proper groups
resetConntrack(fw)
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
}
func TestFirewall_Drop3(t *testing.T) {
l := test.NewLogger()
ob := &bytes.Buffer{}
l.SetOutput(ob)
p := firewall.Packet{
LocalIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
RemoteIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
LocalPort: 1,
RemotePort: 1,
Protocol: firewall.ProtoUDP,
Fragment: false,
}
ipNet := net.IPNet{
IP: net.IPv4(1, 2, 3, 4),
Mask: net.IPMask{255, 255, 255, 0},
}
c := cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: "host-owner",
Ips: []*net.IPNet{&ipNet},
},
}
c1 := cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: "host1",
Ips: []*net.IPNet{&ipNet},
Issuer: "signer-sha-bad",
},
}
h1 := HostInfo{
ConnectionState: &ConnectionState{
peerCert: &c1,
},
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
}
h1.CreateRemoteCIDR(&c1)
c2 := cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: "host2",
Ips: []*net.IPNet{&ipNet},
Issuer: "signer-sha",
},
}
h2 := HostInfo{
ConnectionState: &ConnectionState{
peerCert: &c2,
},
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
}
h2.CreateRemoteCIDR(&c2)
c3 := cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: "host3",
Ips: []*net.IPNet{&ipNet},
Issuer: "signer-sha-bad",
},
}
h3 := HostInfo{
ConnectionState: &ConnectionState{
peerCert: &c3,
},
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
}
h3.CreateRemoteCIDR(&c3)
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "host1", nil, nil, "", ""))
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "", nil, nil, "", "signer-sha"))
cp := cert.NewCAPool()
// c1 should pass because host match
assert.NoError(t, fw.Drop([]byte{}, p, true, &h1, cp, nil))
// c2 should pass because ca sha match
resetConntrack(fw)
assert.NoError(t, fw.Drop([]byte{}, p, true, &h2, cp, nil))
// c3 should fail because no match
resetConntrack(fw)
assert.Equal(t, fw.Drop([]byte{}, p, true, &h3, cp, nil), ErrNoMatchingRule)
}
func TestFirewall_DropConntrackReload(t *testing.T) {
l := test.NewLogger()
ob := &bytes.Buffer{}
l.SetOutput(ob)
p := firewall.Packet{
LocalIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
RemoteIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
LocalPort: 10,
RemotePort: 90,
Protocol: firewall.ProtoUDP,
Fragment: false,
}
ipNet := net.IPNet{
IP: net.IPv4(1, 2, 3, 4),
Mask: net.IPMask{255, 255, 255, 0},
}
c := cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: "host1",
Ips: []*net.IPNet{&ipNet},
Groups: []string{"default-group"},
InvertedGroups: map[string]struct{}{"default-group": {}},
Issuer: "signer-shasum",
},
}
h := HostInfo{
ConnectionState: &ConnectionState{
peerCert: &c,
},
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
}
h.CreateRemoteCIDR(&c)
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, nil, "", ""))
cp := cert.NewCAPool()
// Drop outbound
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrNoMatchingRule)
// Allow inbound
resetConntrack(fw)
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
// Allow outbound because conntrack
assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp, nil))
oldFw := fw
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 10, 10, []string{"any"}, "", nil, nil, "", ""))
fw.Conntrack = oldFw.Conntrack
fw.rulesVersion = oldFw.rulesVersion + 1
// Allow outbound because conntrack and new rules allow port 10
assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp, nil))
oldFw = fw
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 11, 11, []string{"any"}, "", nil, nil, "", ""))
fw.Conntrack = oldFw.Conntrack
fw.rulesVersion = oldFw.rulesVersion + 1
// Drop outbound because conntrack doesn't match new ruleset
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrNoMatchingRule)
}
func BenchmarkLookup(b *testing.B) {
ml := func(m map[string]struct{}, a [][]string) {
for n := 0; n < b.N; n++ {
for _, sg := range a {
found := false
for _, g := range sg {
if _, ok := m[g]; !ok {
found = false
break
}
found = true
}
if found {
return
}
}
}
}
b.Run("array to map best", func(b *testing.B) {
m := map[string]struct{}{
"1ne": {},
"2wo": {},
"3hr": {},
"4ou": {},
"5iv": {},
"6ix": {},
}
a := [][]string{
{"1ne", "2wo", "3hr", "4ou", "5iv", "6ix"},
{"one", "2wo", "3hr", "4ou", "5iv", "6ix"},
{"one", "two", "3hr", "4ou", "5iv", "6ix"},
{"one", "two", "thr", "4ou", "5iv", "6ix"},
{"one", "two", "thr", "fou", "5iv", "6ix"},
{"one", "two", "thr", "fou", "fiv", "6ix"},
{"one", "two", "thr", "fou", "fiv", "six"},
}
for n := 0; n < b.N; n++ {
ml(m, a)
}
})
b.Run("array to map worst", func(b *testing.B) {
m := map[string]struct{}{
"one": {},
"two": {},
"thr": {},
"fou": {},
"fiv": {},
"six": {},
}
a := [][]string{
{"1ne", "2wo", "3hr", "4ou", "5iv", "6ix"},
{"one", "2wo", "3hr", "4ou", "5iv", "6ix"},
{"one", "two", "3hr", "4ou", "5iv", "6ix"},
{"one", "two", "thr", "4ou", "5iv", "6ix"},
{"one", "two", "thr", "fou", "5iv", "6ix"},
{"one", "two", "thr", "fou", "fiv", "6ix"},
{"one", "two", "thr", "fou", "fiv", "six"},
}
for n := 0; n < b.N; n++ {
ml(m, a)
}
})
//TODO: only way array lookup in array will help is if both are sorted, then maybe it's faster
}
func Test_parsePort(t *testing.T) {
_, _, err := parsePort("")
assert.EqualError(t, err, "was not a number; ``")
_, _, err = parsePort(" ")
assert.EqualError(t, err, "was not a number; ` `")
_, _, err = parsePort("-")
assert.EqualError(t, err, "appears to be a range but could not be parsed; `-`")
_, _, err = parsePort(" - ")
assert.EqualError(t, err, "appears to be a range but could not be parsed; ` - `")
_, _, err = parsePort("a-b")
assert.EqualError(t, err, "beginning range was not a number; `a`")
_, _, err = parsePort("1-b")
assert.EqualError(t, err, "ending range was not a number; `b`")
s, e, err := parsePort(" 1 - 2 ")
assert.Equal(t, int32(1), s)
assert.Equal(t, int32(2), e)
assert.Nil(t, err)
s, e, err = parsePort("0-1")
assert.Equal(t, int32(0), s)
assert.Equal(t, int32(0), e)
assert.Nil(t, err)
s, e, err = parsePort("9919")
assert.Equal(t, int32(9919), s)
assert.Equal(t, int32(9919), e)
assert.Nil(t, err)
s, e, err = parsePort("any")
assert.Equal(t, int32(0), s)
assert.Equal(t, int32(0), e)
assert.Nil(t, err)
}
func TestNewFirewallFromConfig(t *testing.T) {
l := test.NewLogger()
// Test a bad rule definition
c := &cert.NebulaCertificate{}
conf := config.NewC(l)
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": "asdf"}
_, err := NewFirewallFromConfig(l, c, conf)
assert.EqualError(t, err, "firewall.outbound failed to parse, should be an array of rules")
// Test both port and code
conf = config.NewC(l)
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "code": "2"}}}
_, err = NewFirewallFromConfig(l, c, conf)
assert.EqualError(t, err, "firewall.outbound rule #0; only one of port or code should be provided")
// Test missing host, group, cidr, ca_name and ca_sha
conf = config.NewC(l)
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{}}}
_, err = NewFirewallFromConfig(l, c, conf)
assert.EqualError(t, err, "firewall.outbound rule #0; at least one of host, group, cidr, local_cidr, ca_name, or ca_sha must be provided")
// Test code/port error
conf = config.NewC(l)
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "a", "host": "testh"}}}
_, err = NewFirewallFromConfig(l, c, conf)
assert.EqualError(t, err, "firewall.outbound rule #0; code was not a number; `a`")
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "a", "host": "testh"}}}
_, err = NewFirewallFromConfig(l, c, conf)
assert.EqualError(t, err, "firewall.outbound rule #0; port was not a number; `a`")
// Test proto error
conf = config.NewC(l)
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "host": "testh"}}}
_, err = NewFirewallFromConfig(l, c, conf)
assert.EqualError(t, err, "firewall.outbound rule #0; proto was not understood; ``")
// Test cidr parse error
conf = config.NewC(l)
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "cidr": "testh", "proto": "any"}}}
_, err = NewFirewallFromConfig(l, c, conf)
assert.EqualError(t, err, "firewall.outbound rule #0; cidr did not parse; invalid CIDR address: testh")
// Test local_cidr parse error
conf = config.NewC(l)
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "local_cidr": "testh", "proto": "any"}}}
_, err = NewFirewallFromConfig(l, c, conf)
assert.EqualError(t, err, "firewall.outbound rule #0; local_cidr did not parse; invalid CIDR address: testh")
// Test both group and groups
conf = config.NewC(l)
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a", "groups": []string{"b", "c"}}}}
_, err = NewFirewallFromConfig(l, c, conf)
assert.EqualError(t, err, "firewall.inbound rule #0; only one of group or groups should be defined, both provided")
}
func TestAddFirewallRulesFromConfig(t *testing.T) {
l := test.NewLogger()
// Test adding tcp rule
conf := config.NewC(l)
mf := &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "tcp", "host": "a"}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoTCP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil, localIp: nil}, mf.lastCall)
// Test adding udp rule
conf = config.NewC(l)
mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "udp", "host": "a"}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoUDP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil, localIp: nil}, mf.lastCall)
// Test adding icmp rule
conf = config.NewC(l)
mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "icmp", "host": "a"}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoICMP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil, localIp: nil}, mf.lastCall)
// Test adding any rule
conf = config.NewC(l)
mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil, localIp: nil}, mf.lastCall)
// Test adding rule with cidr
cidr := &net.IPNet{IP: net.ParseIP("10.0.0.0").To4(), Mask: net.IPv4Mask(255, 0, 0, 0)}
conf = config.NewC(l)
mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "cidr": cidr.String()}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: cidr, localIp: nil}, mf.lastCall)
// Test adding rule with local_cidr
conf = config.NewC(l)
mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "local_cidr": cidr.String()}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, localIp: cidr}, mf.lastCall)
// Test adding rule with ca_sha
conf = config.NewC(l)
mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_sha": "12312313123"}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, localIp: nil, caSha: "12312313123"}, mf.lastCall)
// Test adding rule with ca_name
conf = config.NewC(l)
mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_name": "root01"}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, localIp: nil, caName: "root01"}, mf.lastCall)
// Test single group
conf = config.NewC(l)
mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a"}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil, localIp: nil}, mf.lastCall)
// Test single groups
conf = config.NewC(l)
mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": "a"}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil, localIp: nil}, mf.lastCall)
// Test multiple AND groups
conf = config.NewC(l)
mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": []string{"a", "b"}}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a", "b"}, ip: nil, localIp: nil}, mf.lastCall)
// Test Add error
conf = config.NewC(l)
mf = &mockFirewall{}
mf.nextCallReturn = errors.New("test error")
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}}
assert.EqualError(t, AddFirewallRulesFromConfig(l, true, conf, mf), "firewall.inbound rule #0; `test error`")
}
func TestTCPRTTTracking(t *testing.T) {
b := make([]byte, 200)
// Max ip IHL (60 bytes) and tcp IHL (60 bytes)
b[0] = 15
b[60+12] = 15 << 4
f := Firewall{
metricTCPRTT: metrics.GetOrRegisterHistogram("nope", nil, metrics.NewExpDecaySample(1028, 0.015)),
}
// Set SEQ to 1
binary.BigEndian.PutUint32(b[60+4:60+8], 1)
c := &conn{}
setTCPRTTTracking(c, b)
assert.Equal(t, uint32(1), c.Seq)
// Bad ack - no ack flag
binary.BigEndian.PutUint32(b[60+8:60+12], 80)
assert.False(t, f.checkTCPRTT(c, b))
// Bad ack, number is too low
binary.BigEndian.PutUint32(b[60+8:60+12], 0)
b[60+13] = uint8(0x10)
assert.False(t, f.checkTCPRTT(c, b))
// Good ack
binary.BigEndian.PutUint32(b[60+8:60+12], 80)
assert.True(t, f.checkTCPRTT(c, b))
assert.Equal(t, uint32(0), c.Seq)
// Set SEQ to 1
binary.BigEndian.PutUint32(b[60+4:60+8], 1)
c = &conn{}
setTCPRTTTracking(c, b)
assert.Equal(t, uint32(1), c.Seq)
// Good acks
binary.BigEndian.PutUint32(b[60+8:60+12], 81)
assert.True(t, f.checkTCPRTT(c, b))
assert.Equal(t, uint32(0), c.Seq)
// Set SEQ to max uint32 - 20
binary.BigEndian.PutUint32(b[60+4:60+8], ^uint32(0)-20)
c = &conn{}
setTCPRTTTracking(c, b)
assert.Equal(t, ^uint32(0)-20, c.Seq)
// Good acks
binary.BigEndian.PutUint32(b[60+8:60+12], 81)
assert.True(t, f.checkTCPRTT(c, b))
assert.Equal(t, uint32(0), c.Seq)
// Set SEQ to max uint32 / 2
binary.BigEndian.PutUint32(b[60+4:60+8], ^uint32(0)/2)
c = &conn{}
setTCPRTTTracking(c, b)
assert.Equal(t, ^uint32(0)/2, c.Seq)
// Below
binary.BigEndian.PutUint32(b[60+8:60+12], ^uint32(0)/2-1)
assert.False(t, f.checkTCPRTT(c, b))
assert.Equal(t, ^uint32(0)/2, c.Seq)
// Halfway below
binary.BigEndian.PutUint32(b[60+8:60+12], uint32(0))
assert.False(t, f.checkTCPRTT(c, b))
assert.Equal(t, ^uint32(0)/2, c.Seq)
// Halfway above is ok
binary.BigEndian.PutUint32(b[60+8:60+12], ^uint32(0))
assert.True(t, f.checkTCPRTT(c, b))
assert.Equal(t, uint32(0), c.Seq)
// Set SEQ to max uint32
binary.BigEndian.PutUint32(b[60+4:60+8], ^uint32(0))
c = &conn{}
setTCPRTTTracking(c, b)
assert.Equal(t, ^uint32(0), c.Seq)
// Halfway + 1 above
binary.BigEndian.PutUint32(b[60+8:60+12], ^uint32(0)/2+1)
assert.False(t, f.checkTCPRTT(c, b))
assert.Equal(t, ^uint32(0), c.Seq)
// Halfway above
binary.BigEndian.PutUint32(b[60+8:60+12], ^uint32(0)/2)
assert.True(t, f.checkTCPRTT(c, b))
assert.Equal(t, uint32(0), c.Seq)
}
func TestFirewall_convertRule(t *testing.T) {
l := test.NewLogger()
ob := &bytes.Buffer{}
l.SetOutput(ob)
// Ensure group array of 1 is converted and a warning is printed
c := map[interface{}]interface{}{
"group": []interface{}{"group1"},
}
r, err := convertRule(l, c, "test", 1)
assert.Contains(t, ob.String(), "test rule #1; group was an array with a single value, converting to simple value")
assert.Nil(t, err)
assert.Equal(t, "group1", r.Group)
// Ensure group array of > 1 is errord
ob.Reset()
c = map[interface{}]interface{}{
"group": []interface{}{"group1", "group2"},
}
r, err = convertRule(l, c, "test", 1)
assert.Equal(t, "", ob.String())
assert.Error(t, err, "group should contain a single value, an array with more than one entry was provided")
// Make sure a well formed group is alright
ob.Reset()
c = map[interface{}]interface{}{
"group": "group1",
}
r, err = convertRule(l, c, "test", 1)
assert.Nil(t, err)
assert.Equal(t, "group1", r.Group)
}
type addRuleCall struct {
incoming bool
proto uint8
startPort int32
endPort int32
groups []string
host string
ip *net.IPNet
localIp *net.IPNet
caName string
caSha string
}
type mockFirewall struct {
lastCall addRuleCall
nextCallReturn error
}
func (mf *mockFirewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, localIp *net.IPNet, caName string, caSha string) error {
mf.lastCall = addRuleCall{
incoming: incoming,
proto: proto,
startPort: startPort,
endPort: endPort,
groups: groups,
host: host,
ip: ip,
localIp: localIp,
caName: caName,
caSha: caSha,
}
err := mf.nextCallReturn
mf.nextCallReturn = nil
return err
}
func resetConntrack(fw *Firewall) {
fw.Conntrack.Lock()
fw.Conntrack.Conns = map[firewall.Packet]*conn{}
fw.Conntrack.Unlock()
}
|
package app
import (
"bytes"
"context"
"io"
"net/http"
"testing"
"testing/fstest"
"time"
"github.com/dikaeinstein/downloader/pkg/hash"
"github.com/dikaeinstein/godl/internal/pkg/downloader"
"github.com/dikaeinstein/godl/pkg/fsys"
"github.com/dikaeinstein/godl/test"
)
type fakeHashVerifier struct{}
func (fakeHashVerifier) Verify(_ io.Reader, _ string) error {
return nil
}
func TestDownloadRelease(t *testing.T) {
fakeRoundTripper := func(req *http.Request) *http.Response {
testData := bytes.NewBufferString("This is test data")
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(testData),
ContentLength: int64(len(testData.Bytes())),
}
}
testClient := test.NewTestClient(test.RoundTripFunc(fakeRoundTripper))
imFS := fsys.NewInMemFS(make(fstest.MapFS))
dl := &downloader.Downloader{
BaseURL: "https://storage.googleapis.com/golang/",
Client: testClient,
DownloadDir: ".",
FS: imFS,
Hasher: hash.FakeHasher{},
HashVerifier: fakeHashVerifier{},
}
d := Download{dl, 5 * time.Second}
err := d.Run(context.Background(), "1.12")
if err != nil {
t.Fatalf("Error downloading go binary: %v", err)
}
entries, err := imFS.ReadDir(".")
if err != nil {
t.Error(err)
}
expected := "go1.12.darwin-amd64.tar.gz"
if entries[0].Name() != expected {
t.Errorf("downloaded filename does not match. want %s; got: %s",
expected, entries[0].Name())
}
}
|
package errors
import "errors"
var (
ERR_ASSET_NAME_INVALID = errors.New("asset name invalid, too long")
ERR_ASSET_PRECISION_INVALID = errors.New("asset precision invalid")
ERR_ASSET_AMOUNT_INVALID = errors.New("asset amount invalid")
ERR_ASSET_CHECK_OWNER_INVALID = errors.New("asset owner invalid")
)
|
package main
import "fmt"
func main() {
// indentation for illustration
n1 := Node{data:1}
n2 := Node{data:2}
n3 := Node{data:3}
n6 := Node{data:31}
n7 := Node{data:32}
n4 := Node{data:4}
n5 := Node{data:5}
n1.addChild(&n2)
n1.addChild(&n3)
n3.addChild(&n6)
n3.addChild(&n7)
n1.addChild(&n4)
n1.addChild(&n5)
t := Tree{root: &n1}
n1.removeChild(5)
// test for breadth print
fmt.Println("print breadth first")
t.traverseBF(func(n *Node) {
fmt.Printf("%v\n", n)
})
fmt.Println("")
// test execute function on every node
fmt.Println("add 10 breadth first")
t.traverseBF(func(n *Node) {
(*n).data += 10
fmt.Printf("%v\n", n)
})
fmt.Println("")
// test for depth first
fmt.Println("print depth first")
t.traverseDF(func(n *Node) {
fmt.Printf("%v\n", n)
})
fmt.Println("")
fmt.Println("print depth first recursive")
t.traverseDFrec(func(n *Node) {
fmt.Printf("%v\n", n)
})
fmt.Println("")
// test execute function on every node
fmt.Println("minus 10 depth first")
t.traverseDF(func(n *Node) {
(*n).data -= 10
fmt.Printf("%v\n", n)
})
fmt.Println("")
var emptyTree Tree
// test empty tree breadth first
fmt.Println("print breadth first on empty tree")
emptyTree.traverseBF(func(n *Node) {
fmt.Printf("%v\n", n)
})
fmt.Println("")
// test empty tree depth first
fmt.Println("print depth first on empty tree")
emptyTree.traverseBF(func(n *Node) {
fmt.Printf("%v\n", n)
})
fmt.Println("")
} |
package sessionresolver
import (
"encoding/json"
)
// codec is the codec we use.
type codec interface {
// Encode encodes v as a stream of bytes.
Encode(v interface{}) ([]byte, error)
// Decode decodes b into a stream of bytes.
Decode(b []byte, v interface{}) error
}
// getCodec always returns a valid codec.
func (r *Resolver) getCodec() codec {
if r.codec != nil {
return r.codec
}
return &defaultCodec{}
}
// defaultCodec is the default codec.
type defaultCodec struct{}
// Decode decodes b into v using the default codec.
func (*defaultCodec) Decode(b []byte, v interface{}) error {
return json.Unmarshal(b, v)
}
// Encode encodes v using the default codec.
func (*defaultCodec) Encode(v interface{}) ([]byte, error) {
return json.Marshal(v)
}
|
package decorator
import (
"io"
"testing"
"github.com/docker/libtrust"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/manifest"
"github.com/docker/distribution/storage"
"github.com/docker/distribution/storagedriver/inmemory"
"github.com/docker/distribution/testutil"
)
func TestRegistryDecorator(t *testing.T) {
// Initialize the expected decorations. Call counting is a horrible way to
// test this but should keep this code from being atrocious.
expected := map[string]int{
"repository": 1,
"manifestservice": 1,
"layerservice": 1,
"layer": 4,
"layerupload": 4,
}
decorated := map[string]int{}
decorator := Func(func(v interface{}) interface{} {
switch v := v.(type) {
case storage.Repository:
t.Logf("decorate repository: %T", v)
decorated["repository"]++
case storage.ManifestService:
t.Logf("decorate manifestservice: %T", v)
decorated["manifestservice"]++
case storage.LayerService:
t.Logf("decorate layerservice: %T", v)
decorated["layerservice"]++
case storage.Layer:
t.Logf("decorate layer: %T", v)
decorated["layer"]++
case storage.LayerUpload:
t.Logf("decorate layerupload: %T", v)
decorated["layerupload"]++
default:
t.Fatalf("unexpected object decorated: %v", v)
}
return v
})
registry := storage.NewRegistryWithDriver(inmemory.New())
registry = DecorateRegistry(registry, decorator)
// Now take the registry through a number of operations
checkExerciseRegistry(t, registry)
for component, calls := range expected {
if decorated[component] != calls {
t.Fatalf("%v was not decorated expected number of times: %d != %d", component, decorated[component], calls)
}
}
}
// checkExerciseRegistry takes the registry through all of its operations,
// carrying out generic checks.
func checkExerciseRegistry(t *testing.T, registry storage.Registry) {
name := "foo/bar"
tag := "thetag"
repository := registry.Repository(name)
m := manifest.Manifest{
Versioned: manifest.Versioned{
SchemaVersion: 1,
},
Name: name,
Tag: tag,
}
layers := repository.Layers()
for i := 0; i < 2; i++ {
rs, ds, err := testutil.CreateRandomTarFile()
if err != nil {
t.Fatalf("error creating test layer: %v", err)
}
dgst := digest.Digest(ds)
upload, err := layers.Upload()
if err != nil {
t.Fatalf("error creating layer upload: %v", err)
}
// Use the resumes, as well!
upload, err = layers.Resume(upload.UUID())
if err != nil {
t.Fatalf("error resuming layer upload: %v", err)
}
io.Copy(upload, rs)
if _, err := upload.Finish(dgst); err != nil {
t.Fatalf("unexpected error finishing upload: %v", err)
}
m.FSLayers = append(m.FSLayers, manifest.FSLayer{
BlobSum: dgst,
})
// Then fetch the layers
if _, err := layers.Fetch(dgst); err != nil {
t.Fatalf("error fetching layer: %v", err)
}
}
pk, err := libtrust.GenerateECP256PrivateKey()
if err != nil {
t.Fatalf("unexpected error generating key: %v", err)
}
sm, err := manifest.Sign(&m, pk)
if err != nil {
t.Fatalf("unexpected error signing manifest: %v", err)
}
manifests := repository.Manifests()
if err := manifests.Put(tag, sm); err != nil {
t.Fatalf("unexpected error putting the manifest: %v", err)
}
fetched, err := manifests.Get(tag)
if err != nil {
t.Fatalf("unexpected error fetching manifest: %v", err)
}
if fetched.Tag != fetched.Tag {
t.Fatalf("retrieved unexpected manifest: %v", err)
}
}
|
package types
import (
"fmt"
)
// CheckMapType checks the type of m[k] is t, and return the value if yes,
// or return an error if not.
//
// If m is nil, return an error.
//
// The function uses VerifyType to verify the type, that's, VerifyType(m[k], t),
// so for t, see VerifyType.
func CheckMapType(m map[string]interface{}, k, t string) (interface{}, error) {
if m == nil {
return nil, fmt.Errorf("the map is nil")
}
value := m[k]
if value == nil {
return nil, fmt.Errorf("the value of the key[%s] in map is nil", k)
}
if !VerifyType(value, t) {
return nil, fmt.Errorf("the value of the key[%s] in map is not %s",
k, stype2type[k])
}
return value, nil
}
// MapBool check whether m[key] is the bool type, and return the bool value
// if yes, or return an error if not.
//
// If m is nil, return an error.
func MapBool(m map[string]interface{}, key string) (v bool, err error) {
_v, err := CheckMapType(m, key, "bool")
if err == nil {
v = _v.(bool)
}
return
}
// MapString check whether m[key] is the string type, and return the string
// value if yes, or return an error if not.
//
// If m is nil, return an error.
func MapString(m map[string]interface{}, key string) (v string, err error) {
_v, err := CheckMapType(m, key, "string")
if err == nil {
v = _v.(string)
}
return
}
// MapInt check whether m[key] is the int type, and return the int value if yes,
// or return an error if not.
//
// If m is nil, return an error.
func MapInt(m map[string]interface{}, key string) (v int, err error) {
_v, err := CheckMapType(m, key, "int")
if err == nil {
v = _v.(int)
}
return
}
// MapInt8 check whether m[key] is the int8 type, and return the int8 value
// if yes, or return an error if not.
//
// If m is nil, return an error.
func MapInt8(m map[string]interface{}, key string) (v int8, err error) {
_v, err := CheckMapType(m, key, "int8")
if err == nil {
v = _v.(int8)
}
return
}
// MapInt16 check whether m[key] is the int16 type, and return the int16 value
// if yes, or return an error if not.
//
// If m is nil, return an error.
func MapInt16(m map[string]interface{}, key string) (v int16, err error) {
_v, err := CheckMapType(m, key, "int16")
if err == nil {
v = _v.(int16)
}
return
}
// MapInt32 check whether m[key] is the int32 type, and return the int32 value
// if yes, or return an error if not.
//
// If m is nil, return an error.
func MapInt32(m map[string]interface{}, key string) (v int32, err error) {
_v, err := CheckMapType(m, key, "int32")
if err == nil {
v = _v.(int32)
}
return
}
// MapInt64 check whether m[key] is the int64 type, and return the int64 value
// if yes, or return an error if not.
//
// If m is nil, return an error.
func MapInt64(m map[string]interface{}, key string) (v int64, err error) {
_v, err := CheckMapType(m, key, "int64")
if err == nil {
v = _v.(int64)
}
return
}
// MapUint check whether m[key] is the uint type, and return the uint value if yes,
// or return an error if not.
//
// If m is nil, return an error.
func MapUint(m map[string]interface{}, key string) (v uint, err error) {
_v, err := CheckMapType(m, key, "uint")
if err == nil {
v = _v.(uint)
}
return
}
// MapUint8 check whether m[key] is the uint8 type, and return the uint8 value
// if yes, or return an error if not.
//
// If m is nil, return an error.
func MapUint8(m map[string]interface{}, key string) (v uint8, err error) {
_v, err := CheckMapType(m, key, "uint8")
if err == nil {
v = _v.(uint8)
}
return
}
// MapUint16 check whether m[key] is the uint16 type, and return the uint16
// value if yes, or return an error if not.
//
// If m is nil, return an error.
func MapUint16(m map[string]interface{}, key string) (v uint16, err error) {
_v, err := CheckMapType(m, key, "uint16")
if err == nil {
v = _v.(uint16)
}
return
}
// MapUint32 check whether m[key] is the uint32 type, and return the uint32
// value if yes, or return an error if not.
//
// If m is nil, return an error.
func MapUint32(m map[string]interface{}, key string) (v uint32, err error) {
_v, err := CheckMapType(m, key, "uint32")
if err == nil {
v = _v.(uint32)
}
return
}
// MapUint64 check whether m[key] is the uint64 type, and return the uint64
// value if yes, or return an error if not.
//
// If m is nil, return an error.
func MapUint64(m map[string]interface{}, key string) (v uint64, err error) {
_v, err := CheckMapType(m, key, "uint64")
if err == nil {
v = _v.(uint64)
}
return
}
// MapFloat32 check whether m[key] is the Float32 type, and return the Float32
// value if yes, or return an error if not.
//
// If m is nil, return an error.
func MapFloat32(m map[string]interface{}, key string) (v float32, err error) {
_v, err := CheckMapType(m, key, "float32")
if err == nil {
v = _v.(float32)
}
return
}
// MapFloat64 check whether m[key] is the float64 type, and return the float64
// value if yes, or return an error if not.
//
// If m is nil, return an error.
func MapFloat64(m map[string]interface{}, key string) (v float64, err error) {
_v, err := CheckMapType(m, key, "float64")
if err == nil {
v = _v.(float64)
}
return
}
// MapComplex64 check whether m[key] is the complex64 type, and return the
// complex64 value if yes, or return an error if not.
//
// If m is nil, return an error.
func MapComplex64(m map[string]interface{}, key string) (v complex64, err error) {
_v, err := CheckMapType(m, key, "complex64")
if err == nil {
v = _v.(complex64)
}
return
}
// MapComplex128 check whether m[key] is the complex128 type, and return the
// complex128 value if yes, or return an error if not.
//
// If m is nil, return an error.
func MapComplex128(m map[string]interface{}, key string) (v complex128, err error) {
_v, err := CheckMapType(m, key, "complex128")
if err == nil {
v = _v.(complex128)
}
return
}
// MapMap check whether m[key] is the map[string]interface{} type, and return
// the map[string]interface{} value if yes, or return an error if not.
//
// If m is nil, return an error.
func MapMap(m map[string]interface{}, key string) (v map[string]interface{}, err error) {
_v, err := CheckMapType(m, key, "string2interface")
if err == nil {
v = _v.(map[string]interface{})
}
return
}
|
// +build windows darwin linux,!arm
package sensor
import (
"math/rand"
"time"
)
const (
pulseDelay = 10 * time.Microsecond
)
//DistanceSensor interface for an ultra sound sensor
type DistanceSensor interface {
Distance() (float64, error)
}
type HCSRO4Sensor struct {
echo, trigger uint8
}
func NewHCSRO4Sensor(trigger, echo uint8) *HCSRO4Sensor {
sensor := new(HCSRO4Sensor)
return sensor
}
func (sensor *HCSRO4Sensor) Distance() (float64, error) {
rand.Seed(time.Now().UTC().UnixNano())
return rand.Float64(), nil
}
|
// Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aliyun
import (
"fmt"
"strings"
"time"
"yunion.io/x/jsonutils"
"yunion.io/x/pkg/errors"
"yunion.io/x/pkg/utils"
api "yunion.io/x/onecloud/pkg/apis/compute"
"yunion.io/x/onecloud/pkg/cloudprovider"
"yunion.io/x/onecloud/pkg/multicloud"
)
type SElasticSearch struct {
multicloud.SVirtualResourceBase
multicloud.SBillingBase
region *SRegion
Tags []struct {
TagKey string
TagValue string
}
AdvancedDedicateMaster bool
AdvancedSetting struct {
gcName string
}
AliwsDicts []struct {
FileSize int
Name string
SourceType string
Type string
}
ClientNodeConfiguration struct {
Amount int
Disk int
DiskType string
Spec string
}
InstanceCategory string
CreatedAt time.Time
DedicateMaster bool
Description string
DictList []struct {
FileSize int
Name string
SourceType string
Type string
}
Domain string
ElasticDataNodeConfiguration struct {
Amount int
Disk int
DiskType string
Spec string
}
EnableKibanaPrivateNetwork bool
EnableKibanaPublicNetwork bool
EnablePublic bool
EsConfig map[string]string
EsIPBlacklist []string
EsIPWhitelist []string
EsVersion string
ExtendConfigs []struct {
ConfigType string
Value string
MaintainEndTime string
AliVersion string
}
HaveClientNode bool
HaveKibana bool
InstanceId string
KibanaConfiguration struct {
Amount int
Spec string
}
KibanaDomain string
KibanaIPWhitelist []string
KibanaPort int
KibanaPrivateIPWhitelist []string
MasterConfiguration struct {
Amount int
Disk int
DiskType string
Spec string
}
NetworkConfig struct {
Type string
VpcId string
VsArea string
VswitchId string
}
NodeAmount int
NodeSpec struct {
Disk int
DiskEncryption bool
DiskType string
Spec string
}
PaymentType string
Port int
PrivateNetworkIpWhiteList []string
Protocol string
PublicDomain string
PublicIpWhitelist []string
PublicPort int
ResourceGroupId string
Status string
SynonymsDicts []struct {
FileSize int
Name string
SourceType string
Type string
}
UpdatedAt time.Time
VpcInstanceId string
WarmNode bool
WarmNodeConfiguration struct {
Amount int
Disk int
DiskEncryption bool
DiskType string
Spec string
}
ZoneCount int
ZoneInfos []struct {
status string
zoneId string
}
}
func (self *SElasticSearch) GetTags() (map[string]string, error) {
ret := map[string]string{}
for _, tag := range self.Tags {
if strings.HasPrefix(tag.TagKey, "aliyun") || strings.HasPrefix(tag.TagKey, "acs:") {
continue
}
if len(tag.TagKey) > 0 {
ret[tag.TagKey] = tag.TagValue
}
}
return ret, nil
}
func (self *SElasticSearch) GetSysTags() map[string]string {
ret := map[string]string{}
for _, tag := range self.Tags {
if strings.HasPrefix(tag.TagKey, "aliyun") || strings.HasPrefix(tag.TagKey, "acs:") {
if len(tag.TagKey) > 0 {
ret[tag.TagKey] = tag.TagValue
}
}
}
return ret
}
func (self *SElasticSearch) SetTags(tags map[string]string, replace bool) error {
return errors.Wrap(cloudprovider.ErrNotImplemented, "SetTags")
}
func (self *SElasticSearch) GetId() string {
return self.InstanceId
}
func (self *SElasticSearch) GetGlobalId() string {
return self.InstanceId
}
func (self *SElasticSearch) GetName() string {
if len(self.Description) > 0 {
return self.Description
}
return self.InstanceId
}
func (self *SElasticSearch) GetDiskSizeGb() int {
return self.NodeSpec.Disk
}
func (self *SElasticSearch) GetStorageType() string {
return self.NodeSpec.DiskType
}
func (self *SElasticSearch) GetCategory() string {
return self.InstanceCategory
}
func (self *SElasticSearch) GetVersion() string {
return strings.Split(self.EsVersion, "_")[0]
}
func (self *SElasticSearch) GetVpcId() string {
return self.NetworkConfig.VpcId
}
func (self *SElasticSearch) GetNetworkId() string {
return self.NetworkConfig.VswitchId
}
func (self *SElasticSearch) GetZoneId() string {
return self.NetworkConfig.VsArea
}
func (self *SElasticSearch) IsMultiAz() bool {
return self.ZoneCount > 1
}
func (self *SElasticSearch) GetVcpuCount() int {
spec, ok := esSpec[self.NodeSpec.Spec]
if ok {
return spec.VcpuCount
}
return 0
}
func (self *SElasticSearch) GetVmemSizeGb() int {
spec, ok := esSpec[self.NodeSpec.Spec]
if ok {
return spec.VmemSizeGb
}
return 0
}
func (self *SElasticSearch) GetInstanceType() string {
return self.NodeSpec.Spec
}
func (self *SElasticSearch) Refresh() error {
es, err := self.region.GetElasitcSearch(self.InstanceId)
if err != nil {
return err
}
return jsonutils.Update(self, es)
}
func (self *SElasticSearch) GetCreatedAt() time.Time {
return self.CreatedAt
}
func (self *SElasticSearch) GetBillingType() string {
return self.PaymentType
}
func (self *SElasticSearch) GetProjectId() string {
return self.ResourceGroupId
}
func (self *SElasticSearch) GetStatus() string {
switch self.Status {
case "active":
return api.ELASTIC_SEARCH_STATUS_AVAILABLE
case "activating":
return api.ELASITC_SEARCH_STATUS_CREATING
case "inactive":
return api.ELASTIC_SEARCH_STATUS_UNAVAILABLE
case "invalid":
return api.ELASTIC_SEARCH_STATUS_DELETING
default:
return self.Status
}
}
func (self *SRegion) GetIElasticSearchs() ([]cloudprovider.ICloudElasticSearch, error) {
ret := []SElasticSearch{}
for {
part, total, err := self.GetElasticSearchs(100, len(ret)/100+1)
if err != nil {
return nil, errors.Wrapf(err, "GetElasitcSearchs")
}
ret = append(ret, part...)
if len(ret) >= total {
break
}
}
result := []cloudprovider.ICloudElasticSearch{}
for i := range ret {
ret[i].region = self
result = append(result, &ret[i])
}
return result, nil
}
func (self *SRegion) GetIElasticSearchById(id string) (cloudprovider.ICloudElasticSearch, error) {
es, err := self.GetElasitcSearch(id)
if err != nil {
return nil, err
}
return es, nil
}
func (self *SRegion) GetElasticSearchs(size, page int) ([]SElasticSearch, int, error) {
if size < 1 || size > 100 {
size = 100
}
if page < 1 {
page = 1
}
params := map[string]string{
"PathPattern": "/openapi/instances",
"size": fmt.Sprintf("%d", size),
"page": fmt.Sprintf("%d", page),
}
resp, err := self.esRequest("ListInstance", params)
if err != nil {
return nil, 0, errors.Wrapf(err, "ListInstance")
}
ret := []SElasticSearch{}
err = resp.Unmarshal(&ret, "Result")
if err != nil {
return nil, 0, errors.Wrapf(err, "resp.Unmarshal")
}
totalCount, _ := resp.Int("Headers", "X-Total-Count")
return ret, int(totalCount), nil
}
func (self *SRegion) GetElasitcSearch(id string) (*SElasticSearch, error) {
if len(id) == 0 {
return nil, errors.Wrapf(cloudprovider.ErrNotFound, "empty id")
}
params := map[string]string{
"PathPattern": fmt.Sprintf("/openapi/instances/%s", id),
}
resp, err := self.esRequest("DescribeInstance", params)
if err != nil {
return nil, errors.Wrapf(err, "DescribeInstance")
}
ret := SElasticSearch{region: self}
err = resp.Unmarshal(&ret, "Result")
if err != nil {
return nil, errors.Wrapf(err, "resp.Unmarshal")
}
return &ret, nil
}
func (self *SElasticSearch) Delete() error {
return self.region.DeleteElasticSearch(self.InstanceId)
}
func (self *SRegion) DeleteElasticSearch(id string) error {
params := map[string]string{
"clientToken": utils.GenRequestId(20),
"deleteType": "immediate",
"PathPattern": fmt.Sprintf("/openapi/instances/%s", id),
}
_, err := self.esRequest("DeleteInstance", params)
return errors.Wrapf(err, "DeleteInstance")
}
|
package protocol
import (
"CMDB/api/conf"
hostAPI "CMDB/api/pkg/host/http"
"context"
"fmt"
"github.com/infraboard/mcube/http/middleware/cors"
"github.com/infraboard/mcube/logger"
"github.com/infraboard/mcube/logger/zap"
"github.com/julienschmidt/httprouter"
"net/http"
"time"
)
// NewHTTPService 构建函数
func NewHTTPService() *HTTPService {
r := httprouter.New()
cors.Default().Handler(r)
server := &http.Server{
ReadHeaderTimeout: 60 * time.Second,
ReadTimeout: 60 * time.Second,
WriteTimeout: 60 * time.Second,
IdleTimeout: 60 * time.Second,
MaxHeaderBytes: 1 << 20, // 1M
Addr: conf.C().APP.Addr(),
Handler: cors.AllowAll().Handler(r),
}
return &HTTPService{
r: r,
server: server,
l: zap.L().Named("API"),
c: conf.C(),
}
}
// HTTPService http服务
type HTTPService struct {
r *httprouter.Router
l logger.Logger
c *conf.Config
server *http.Server
}
// Start 启动服务
func (s *HTTPService) Start() error {
// 装置子服务路由
hostAPI.RegistAPI(s.r)
// 启动 HTTP服务
s.l.Infof("HTTP服务启动成功, 监听地址: %s", s.server.Addr)
if err := s.server.ListenAndServe(); err != nil {
if err == http.ErrServerClosed {
s.l.Info("service is stopped")
}
return fmt.Errorf("start service error, %s", err.Error())
}
return nil
}
// Stop 停止server
func (s *HTTPService) Stop() error {
s.l.Info("start graceful shutdown")
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// 优雅关闭HTTP服务
if err := s.server.Shutdown(ctx); err != nil {
s.l.Errorf("graceful shutdown timeout, force exit")
}
return nil
}
|
package subcast
import (
"io/ioutil"
"text/scanner"
"subc/compile/arch/amd64"
"subc/parse"
"subc/scan"
"subc/types"
)
func Fuzz(data []byte) int {
r := scan.StringReader(scanner.Position{
Filename: "fuzz",
Line: 1,
Column: 1,
}, string(data), false)
scanner := scan.New(scan.DefaultConfig, "fuzz", r)
prog, err := parse.Parse(parse.Config{}, scanner)
if err != nil {
return 0
}
emitter := amd64.NewEmitter(ioutil.Discard)
_, err = types.Check(types.Config{
Sizes: emitter.Sizes,
}, prog)
if err != nil {
return 0
}
return 1
}
|
package gerror
import (
"encoding/json"
"errors"
"fmt"
"testing"
)
var (
baseError = errors.New("test")
)
func Benchmark_New(b *testing.B) {
for i := 0; i < b.N; i++ {
New("test")
}
}
func Benchmark_Newf(b *testing.B) {
for i := 0; i < b.N; i++ {
Newf("%s", "test")
}
}
func Benchmark_Wrap(b *testing.B) {
for i := 0; i < b.N; i++ {
WithCause(baseError, "test")
}
}
func Benchmark_Wrapf(b *testing.B) {
for i := 0; i < b.N; i++ {
WithCausef(baseError, "%s", "test")
}
}
func Benchmark_Stack(b *testing.B) {
err := New("1")
err = WithCause(err, "2")
err = WithCause(err, "3")
for i := 0; i < b.N; i++ {
fmt.Sprintf("%+v", err)
}
}
func Benchmark_Error(b *testing.B) {
err := New("1")
err = WithCause(err, "2")
err = WithCause(err, "3")
for i := 0; i < b.N; i++ {
err.Error()
}
}
func Test_New(t *testing.T) {
err1 := New("test")
fmt.Println(err1.Error())
err2 := Newf("%v", "test")
fmt.Println(err2.Error())
}
func Test_Wrap1(t *testing.T) {
err := errors.New("1")
err = WithCause(err, "2")
err = WithCause(err, "3")
fmt.Println(err.Error())
}
func Test_Wrap2(t *testing.T) {
err := New("1")
err = WithCause(err, "2")
err = WithCause(err, "3")
fmt.Println(err.Error())
}
func Test_Wrap3(t *testing.T) {
err := New("1")
err = WithCause(err, "")
fmt.Println(err.Error())
}
func Test_Stack1(t *testing.T) {
err := errors.New("1")
fmt.Printf("%+v\n", err)
}
func Test_Stack2(t *testing.T) {
err := errors.New("1")
err = WithCause(err, "2")
err = WithCause(err, "3")
fmt.Printf("%+v\n", err)
}
func Test_Stack3(t *testing.T) {
err := New("1")
fmt.Printf("%+v\n", err)
}
func Test_Stack4(t *testing.T) {
err := New("1")
err = WithCause(err, "2")
err = WithCause(err, "3")
fmt.Printf("%+v\n", err)
}
func Test_Stack5(t *testing.T) {
err := &runtimeException{
msg: "hello",
}
fmt.Printf("%+v\n", err)
}
func Test_Json(t *testing.T) {
err := WithCause(New("1"), "2")
b, _ := json.Marshal(map[string]interface{}{"error": err})
fmt.Println(string(b))
}
func Test_Null(t *testing.T) {
var err error
fmt.Printf("%+v\n", err)
fmt.Printf("%v\n", err)
fmt.Printf("%-v\n", err)
b, _ := json.Marshal(map[string]interface{}{"error": err})
fmt.Println(string(b))
}
func Test_Cause1(t *testing.T) {
err := errors.New("1")
err = WithCause(err, "2")
err = WithCause(err, "3")
e := RootCause(err)
fmt.Printf("%+v\n", e)
}
func Test_Cause2(t *testing.T) {
err := New("1")
err = WithCause(err, "2")
err = WithCause(err, "3")
e := RootCause(err)
fmt.Printf("%+v\n", e)
}
|
package utils
var Version = "v1.0"
|
package reports
import (
"context"
"database/sql"
"time"
"github.com/pganalyze/collector/input/postgres"
"github.com/pganalyze/collector/output/pganalyze_collector"
"github.com/pganalyze/collector/state"
"github.com/pganalyze/collector/util"
"google.golang.org/protobuf/types/known/timestamppb"
)
// BloatReport - Report on table and index bloat
type BloatReport struct {
ReportRunID string
CollectedAt time.Time
Data state.PostgresBloatStats
}
// RunID - Returns the ID of this report run
func (report BloatReport) RunID() string {
return report.ReportRunID
}
// ReportType - Returns the type of the report as a string
func (report BloatReport) ReportType() string {
return "bloat"
}
// Run the report
func (report *BloatReport) Run(ctx context.Context, server *state.Server, logger *util.Logger, connection *sql.DB) (err error) {
systemType := server.Config.SystemType
report.Data, err = postgres.GetBloatStats(ctx, logger, connection, systemType, server.Config.IgnoreSchemaRegexp)
if err != nil {
return
}
return
}
// Result of the report
func (report *BloatReport) Result() *pganalyze_collector.Report {
var r pganalyze_collector.Report
var data pganalyze_collector.BloatReportData
r.ReportRunId = report.ReportRunID
r.ReportType = "bloat"
r.CollectedAt = timestamppb.New(report.CollectedAt)
data.DatabaseReferences = append(data.DatabaseReferences, &pganalyze_collector.DatabaseReference{Name: report.Data.DatabaseName})
for _, relation := range report.Data.Relations {
data.RelationBloatStatistics = append(data.RelationBloatStatistics, &pganalyze_collector.RelationBloatStatistic{RelationIdx: int32(len(data.RelationReferences)), BloatLookupMethod: pganalyze_collector.BloatLookupMethod_ESTIMATE_FAST, TotalBytes: relation.TotalBytes, BloatBytes: relation.BloatBytes})
data.RelationReferences = append(data.RelationReferences, &pganalyze_collector.RelationReference{DatabaseIdx: 0, SchemaName: relation.SchemaName, RelationName: relation.RelationName})
}
for _, index := range report.Data.Indices {
data.IndexBloatStatistics = append(data.IndexBloatStatistics, &pganalyze_collector.IndexBloatStatistic{IndexIdx: int32(len(data.IndexReferences)), TotalBytes: index.TotalBytes, BloatBytes: index.BloatBytes})
data.IndexReferences = append(data.IndexReferences, &pganalyze_collector.IndexReference{DatabaseIdx: 0, SchemaName: index.SchemaName, IndexName: index.IndexName})
}
r.Data = &pganalyze_collector.Report_BloatReportData{BloatReportData: &data}
return &r
}
|
package dashrates
import (
"encoding/json"
"io/ioutil"
"net/http"
"time"
)
// BitbnsAPI implements the RateAPI interface and contains info necessary for
// calling to the public Bitbns price ticker API.
type BitbnsAPI struct {
BaseAPIURL string
PriceTickerEndpoint string
}
// NewBitbnsAPI is a constructor for BitbnsAPI.
func NewBitbnsAPI() *BitbnsAPI {
return &BitbnsAPI{
BaseAPIURL: "https://bitbns.com",
PriceTickerEndpoint: "/order/getTickerWithVolume/",
}
}
// DisplayName returns the exchange display name. It is part of the RateAPI
// interface implementation.
func (a *BitbnsAPI) DisplayName() string {
return "Bitbns"
}
// FetchRate gets the Dash exchange rate from the Bitbns API.
//
// This is part of the RateAPI interface implementation.
func (a *BitbnsAPI) FetchRate() (*RateInfo, error) {
resp, err := http.Get(a.BaseAPIURL + a.PriceTickerEndpoint)
if err != nil {
return nil, err
}
defer resp.Body.Close()
now := time.Now()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
// parse json and extract Dash rate
var res bitbnsPriceResp
err = json.Unmarshal(body, &res)
if err != nil {
return nil, err
}
ri := RateInfo{
BaseCurrency: "DASH",
QuoteCurrency: "USD",
LastPrice: res.Dashusdt.LastTradedPrice,
BaseAssetVolume: 0,
FetchTime: now,
}
return &ri, nil
}
// bitbnsPriceResp is used in parsing the Bitbns API response only.
type bitbnsPriceResp struct {
Dashusdt struct {
HighestBuyBid float64 `json:"highest_buy_bid"`
LowestSellBid float64 `json:"lowest_sell_bid"`
LastTradedPrice float64 `json:"last_traded_price"`
YesPrice float64 `json:"yes_price"`
InrPrice float64 `json:"inr_price"`
Volume struct{}
} `json:"DASHUSDT"`
}
|
// Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package migration
import (
"context"
"fmt"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/lease"
"github.com/cockroachdb/cockroach/pkg/sql/sqlutil"
"github.com/cockroachdb/logtags"
)
// SQLDeps are the dependencies of migrations which perform actions at the
// SQL layer.
type SQLDeps struct {
DB *kv.DB
Codec keys.SQLCodec
Settings *cluster.Settings
LeaseManager *lease.Manager
InternalExecutor sqlutil.InternalExecutor
}
// SQLMigrationFn is used to perform sql-level migrations. It may be run from
// any tenant.
type SQLMigrationFn func(context.Context, clusterversion.ClusterVersion, SQLDeps) error
// SQLMigration is an implementation of Migration for SQL-level migrations.
type SQLMigration struct {
migration
fn SQLMigrationFn
}
// NewSQLMigration constructs a SQLMigration.
func NewSQLMigration(
description string, cv clusterversion.ClusterVersion, fn SQLMigrationFn,
) *SQLMigration {
return &SQLMigration{
migration: migration{
description: description,
cv: cv,
},
fn: fn,
}
}
// Run kickstarts the actual migration process for SQL-level migrations.
func (m *SQLMigration) Run(
ctx context.Context, cv clusterversion.ClusterVersion, d SQLDeps,
) (err error) {
ctx = logtags.AddTag(ctx, fmt.Sprintf("migration=%s", cv), nil)
return m.fn(ctx, cv, d)
}
|
// Copyright 2014 mqant Author. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gate
import (
"github.com/liangdas/mqant/network"
"time"
"bufio"
"github.com/liangdas/mqant/module"
"github.com/liangdas/mqant/conf"
)
type Gate struct {
module.BaseModule
MaxConnNum int
MaxMsgLen uint32
MinStorageHeartbeat int64 //Session持久化最短心跳包
// websocket
WSAddr string
HTTPTimeout time.Duration
// tcp
TCPAddr string
//tls
Tls bool
CertFile string
KeyFile string
//
handler GateHandler
agentLearner AgentLearner
storage StorageHandler
}
/**
设置Session信息持久化接口
*/
func (gate *Gate) SetStorageHandler(storage StorageHandler)(error){
gate.storage=storage
return nil
}
func (gate *Gate) GetStorageHandler()(storage StorageHandler){
return gate.storage
}
func (gate *Gate) OnInit(subclass module.Module,app module.App,settings *conf.ModuleSettings) {
gate.BaseModule.OnInit(subclass,app,settings) //这是必须的
gate.MaxConnNum=int(settings.Settings["MaxConnNum"].(float64))
gate.MaxMsgLen=uint32(settings.Settings["MaxMsgLen"].(float64))
gate.WSAddr=settings.Settings["WSAddr"].(string)
gate.HTTPTimeout=time.Second*time.Duration(settings.Settings["HTTPTimeout"].(float64))
gate.TCPAddr=settings.Settings["TCPAddr"].(string)
if Tls,ok:=settings.Settings["Tls"];ok{
gate.Tls=Tls.(bool)
}else{
gate.Tls= false
}
if CertFile,ok:=settings.Settings["CertFile"];ok{
gate.CertFile=CertFile.(string)
}else{
gate.CertFile= ""
}
if KeyFile,ok:=settings.Settings["KeyFile"];ok{
gate.KeyFile=KeyFile.(string)
}else{
gate.KeyFile= ""
}
if MinHBStorage, ok := settings.Settings["MinHBStorage"]; ok {
gate.MinStorageHeartbeat=int64(MinHBStorage.(float64))
}else{
gate.MinStorageHeartbeat=60
}
handler:=NewGateHandler(gate)
gate.agentLearner=handler
gate.handler=handler
gate.GetServer().RegisterGO("Update",gate.handler.Update)
gate.GetServer().RegisterGO("Bind",gate.handler.Bind)
gate.GetServer().RegisterGO("UnBind",gate.handler.UnBind)
gate.GetServer().RegisterGO("Push",gate.handler.Push)
gate.GetServer().RegisterGO("Set",gate.handler.Set)
gate.GetServer().RegisterGO("Remove",gate.handler.Remove)
gate.GetServer().RegisterGO("Send",gate.handler.Send)
gate.GetServer().RegisterGO("Close",gate.handler.Close)
}
func (gate *Gate) Run(closeSig chan bool) {
var wsServer *network.WSServer
if gate.WSAddr != "" {
wsServer = new(network.WSServer)
wsServer.Addr = gate.WSAddr
wsServer.MaxConnNum = gate.MaxConnNum
wsServer.MaxMsgLen = gate.MaxMsgLen
wsServer.HTTPTimeout = gate.HTTPTimeout
wsServer.Tls=gate.Tls
wsServer.CertFile=gate.CertFile
wsServer.KeyFile=gate.KeyFile
wsServer.NewAgent = func(conn *network.WSConn) network.Agent {
a := &agent{
conn: conn,
gate: gate,
r: bufio.NewReader(conn),
w: bufio.NewWriter(conn),
isclose:false,
}
return a
}
}
var tcpServer *network.TCPServer
if gate.TCPAddr != "" {
tcpServer = new(network.TCPServer)
tcpServer.Addr = gate.TCPAddr
tcpServer.MaxConnNum = gate.MaxConnNum
tcpServer.Tls=gate.Tls
tcpServer.CertFile=gate.CertFile
tcpServer.KeyFile=gate.KeyFile
tcpServer.NewAgent = func(conn *network.TCPConn) network.Agent {
a := &agent{
conn: conn,
gate: gate,
r: bufio.NewReader(conn),
w: bufio.NewWriter(conn),
isclose:false,
}
return a
}
}
if wsServer != nil {
wsServer.Start()
}
if tcpServer != nil {
tcpServer.Start()
}
<-closeSig
if wsServer != nil {
wsServer.Close()
}
if tcpServer != nil {
tcpServer.Close()
}
}
func (gate *Gate) OnDestroy() {
gate.BaseModule.OnDestroy() //这是必须的
}
|
package server
import (
"chaplapp/provider"
"chaplapp/renderer"
"chaplapp/strategy"
"log"
"net/http"
)
type handler struct {
meetingProvider provider.MeetingProvider
chairmanProvider provider.ChairmanProvider
strategy strategy.PlanningStrategy
renderer renderer.AssignmentListRenderer
}
func (this handler) ServeHTTP(response http.ResponseWriter, _ *http.Request) {
assignments, error := this.strategy.Plan(this.meetingProvider, this.chairmanProvider)
if error == nil {
response.Write([]byte("<html><body>"))
response.Write([]byte(this.renderer.Render(assignments)))
response.Write([]byte("</body></html>"))
} else {
log.Fatal("Error rendering assignments:", error)
}
}
func NewHttpHandler(meetingProvider provider.MeetingProvider,
chairmanProvider provider.ChairmanProvider,
strategy strategy.PlanningStrategy,
renderer renderer.AssignmentListRenderer) http.Handler {
return handler{meetingProvider, chairmanProvider, strategy, renderer}
}
|
// Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package batcheval
import (
"context"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval/result"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
)
func init() {
RegisterReadOnlyCommand(roachpb.RefreshRange, DefaultDeclareKeys, RefreshRange)
}
// RefreshRange checks whether the key range specified has any values written in
// the interval [args.RefreshFrom, header.Timestamp].
func RefreshRange(
ctx context.Context, reader storage.Reader, cArgs CommandArgs, resp roachpb.Response,
) (result.Result, error) {
args := cArgs.Args.(*roachpb.RefreshRangeRequest)
h := cArgs.Header
if h.Txn == nil {
return result.Result{}, errors.AssertionFailedf("no transaction specified to %s", args.Method())
}
// We're going to refresh up to the transaction's read timestamp.
if h.Timestamp != h.Txn.WriteTimestamp {
// We're expecting the read and write timestamp to have converged before the
// Refresh request was sent.
log.Fatalf(ctx, "expected provisional commit ts %s == read ts %s. txn: %s", h.Timestamp,
h.Txn.WriteTimestamp, h.Txn)
}
refreshTo := h.Timestamp
refreshFrom := args.RefreshFrom
if refreshFrom.IsEmpty() {
return result.Result{}, errors.AssertionFailedf("empty RefreshFrom: %s", args)
}
// Iterate over values until we discover any value written at or after the
// original timestamp, but before or at the current timestamp. Note that we
// iterate inconsistently, meaning that intents - including our own - are
// collected separately and the callback is only invoked on the latest
// committed version. Note also that we include tombstones, which must be
// considered as updates on refresh.
log.VEventf(ctx, 2, "refresh %s @[%s-%s]", args.Span(), refreshFrom, refreshTo)
intents, err := storage.MVCCIterate(
ctx, reader, args.Key, args.EndKey, refreshTo,
storage.MVCCScanOptions{
Inconsistent: true,
Tombstones: true,
},
func(kv roachpb.KeyValue) error {
if ts := kv.Value.Timestamp; refreshFrom.LessEq(ts) {
return errors.Errorf("encountered recently written key %s @%s", kv.Key, ts)
}
return nil
})
if err != nil {
return result.Result{}, err
}
// Check if any intents which are not owned by this transaction were written
// at or beneath the refresh timestamp.
for _, i := range intents {
// Ignore our own intents.
if i.Txn.ID == h.Txn.ID {
continue
}
// Return an error if an intent was written to the span.
return result.Result{}, errors.Errorf("encountered recently written intent %s @%s",
i.Key, i.Txn.WriteTimestamp)
}
return result.Result{}, nil
}
|
package model
import (
"fmt"
"github.com/grafana/tempo/pkg/tempopb"
"github.com/gogo/protobuf/proto"
)
// CurrentEncoding is a string representing the encoding that all new blocks should be created with
// "" = tempopb.Trace
// "v1" = tempopb.TraceBytes
const CurrentEncoding = "v1"
// TracePBEncoding is a string that represents the original TracePBEncoding. Pass this if you know that the
// bytes are encoded *tracepb.Trace
const TracePBEncoding = ""
// allEncodings is used for testing
var allEncodings = []string{
CurrentEncoding,
TracePBEncoding,
}
// Unmarshal converts a byte slice of the passed encoding into a *tempopb.Trace
func Unmarshal(obj []byte, dataEncoding string) (*tempopb.Trace, error) {
trace := &tempopb.Trace{}
switch dataEncoding {
case "":
err := proto.Unmarshal(obj, trace)
if err != nil {
return nil, err
}
case "v1":
traceBytes := &tempopb.TraceBytes{}
err := proto.Unmarshal(obj, traceBytes)
if err != nil {
return nil, err
}
for _, bytes := range traceBytes.Traces {
innerTrace := &tempopb.Trace{}
err = proto.Unmarshal(bytes, innerTrace)
if err != nil {
return nil, err
}
trace.Batches = append(trace.Batches, innerTrace.Batches...)
}
default:
return nil, fmt.Errorf("unrecognized dataEncoding in Unmarshal %s", dataEncoding)
}
return trace, nil
}
// marshal converts a tempopb.Trace into a byte slice encoded using dataEncoding
// nolint:interfacer
func marshal(trace *tempopb.Trace, dataEncoding string) ([]byte, error) {
switch dataEncoding {
case "":
return proto.Marshal(trace)
case "v1":
traceBytes := &tempopb.TraceBytes{}
bytes, err := proto.Marshal(trace)
if err != nil {
return nil, err
}
traceBytes.Traces = append(traceBytes.Traces, bytes)
return proto.Marshal(traceBytes)
default:
return nil, fmt.Errorf("unrecognized dataEncoding in Unmarshal %s", dataEncoding)
}
}
|
package main
import "fmt"
func switchOnType(x interface{}) {
switch x.(type) { // chequea el tipo de x
case int:
fmt.Println("int")
case string:
fmt.Println("string")
default:
fmt.Println("unknown")
}
}
func main() {
switchOnType(5)
switchOnType("blebleble")
switchOnType('f')
}
|
// Copyright 2018 The adeia authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package adeia_test
import (
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
"github.com/seibert-media/adeia"
"github.com/seibert-media/adeia/domain"
"github.com/seibert-media/adeia/mocks"
)
func TestSyncer(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Syncer Suite")
}
var _ = Describe("Syncer", func() {
var (
fetcher *mocks.IngressFetcher
applier *mocks.IngressApplier
converter *mocks.IngressCreator
syncer *adeia.Syncer
)
BeforeEach(func() {
fetcher = &mocks.IngressFetcher{}
applier = &mocks.IngressApplier{}
converter = &mocks.IngressCreator{}
syncer = &adeia.Syncer{
Applier: applier,
Fetcher: fetcher,
Creator: converter,
}
})
Describe("Sync", func() {
It("calls Ingress fetcher", func() {
Expect(fetcher.FetchCallCount()).To(Equal(0))
Expect(syncer.Sync()).To(BeNil())
Expect(fetcher.FetchCallCount()).To(Equal(1))
})
It("return error when fetch fails", func() {
fetcher.FetchReturns(nil, errors.New("Failed"))
Expect(syncer.Sync()).NotTo(BeNil())
})
It("calls applier", func() {
Expect(applier.ApplyCallCount()).To(Equal(0))
syncer.Sync()
Expect(applier.ApplyCallCount()).To(Equal(1))
})
It("does not apply if fetch fails", func() {
fetcher.FetchReturns(nil, errors.New("Failed"))
Expect(applier.ApplyCallCount()).To(Equal(0))
syncer.Sync()
Expect(applier.ApplyCallCount()).To(Equal(0))
})
It("gives the fetched domains to apply", func() {
list := []domain.Domain{"A", "B"}
fetcher.FetchReturns(list, nil)
syncer.Sync()
Expect(converter.CreateArgsForCall(0)).To(Equal(list))
})
It("returns apply error", func() {
applier.ApplyReturns(errors.New("Failed"))
Expect(syncer.Sync()).NotTo(BeNil())
})
})
})
|
package dev
import (
devspacecontext "github.com/loft-sh/devspace/pkg/devspace/context"
"github.com/loft-sh/devspace/pkg/devspace/pipeline/types"
"github.com/loft-sh/devspace/pkg/devspace/server"
"github.com/loft-sh/devspace/pkg/util/log"
"github.com/mgutz/ansi"
"github.com/sirupsen/logrus"
)
func UI(ctx devspacecontext.Context, port int, showUI bool, pipeline types.Pipeline) (*server.Server, error) {
var defaultPort *int
if port != 0 {
defaultPort = &port
}
// Create server
uiLogger := log.GetFileLogger("ui")
serv, err := server.NewServer(ctx.WithLogger(uiLogger), "localhost", false, defaultPort, pipeline)
if err != nil {
ctx.Log().Warnf("Couldn't start UI server: %v", err)
} else {
// Start server
go func() {
_ = serv.ListenAndServe()
}()
go func() {
<-ctx.Context().Done()
_ = serv.Server.Close()
}()
if showUI {
ctx.Log().WriteString(logrus.InfoLevel, "\n#########################################################\n")
ctx.Log().Infof("DevSpace UI available at: %s", ansi.Color("http://"+serv.Server.Addr, "white+b"))
ctx.Log().WriteString(logrus.InfoLevel, "#########################################################\n\n")
}
}
return serv, nil
}
|
package selector
import (
"fmt"
"strings"
"github.com/layer5io/meshery/models/pattern/core"
)
const (
CoreResource = "pattern.meshery.io/core"
MeshResource = "pattern.meshery.io/mesh/workload"
K8sResource = "pattern.meshery.io/k8s"
)
type Helpers interface {
GetServiceMesh() (name string, version string)
GetAPIVersionForKind(kind string) string
}
type Selector struct {
helpers Helpers
}
func New(helpers Helpers) *Selector {
return &Selector{
helpers: helpers,
}
}
func GetAnnotationsForWorkload(w core.WorkloadCapability) map[string]string {
res := map[string]string{}
metadata := w.OAMDefinition.Spec.Metadata
typ, ok := metadata["@type"]
if ok {
for k, v := range metadata {
if k == "@type" {
continue
}
res[fmt.Sprintf("%s.%s", strings.ReplaceAll(typ, "/", "."), k)] = v
}
}
return res
}
func generateWorkloadKey(name string) string {
return fmt.Sprintf(
"/meshery/registry/definition/%s/%s/%s",
"core.oam.dev/v1alpha1",
"WorkloadDefinition",
name,
)
}
func generateTraitKey(name string) string {
return fmt.Sprintf(
"/meshery/registry/definition/%s/%s/%s",
"core.oam.dev/v1alpha1",
"TraitDefinition",
name,
)
}
|
package main
import (
"fmt"
)
func main() {
array1 := []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
array2 := []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
fmt.Printf("Original array :%v", array1)
leftShiftedArray := leftShiftArray(array1, 3)
rightShiftedArray := rightShiftArray(array2, 3)
fmt.Println("")
fmt.Printf("Left shifted :%v", leftShiftedArray)
fmt.Println("")
fmt.Printf("Right shifted :%v", rightShiftedArray)
}
func leftShiftArray(array []int, shift int) []int {
for index := 0; index < shift; index++ {
first := array[0]
for index := 0; index < len(array)-1; index++ {
array[index] = array[index+1]
}
array[len(array)-1] = first
}
return array
}
func rightShiftArray(array []int, shift int) []int {
for index := 0; index < shift; index++ {
last := array[len(array)-1]
for index := len(array) - 1; index > 0; index-- {
array[index] = array[index-1]
}
array[0] = last
}
return array
}
|
package gohotdraw
import (
_ "fmt"
)
type DrawingView interface {
GetDrawing() Drawing
SetDrawing(drawing Drawing)
GetGraphics() Graphics
SetGraphics(g Graphics)
SetEditor(editor DrawingEditor)
Add(figure Figure) Figure
Remove(figure Figure) Figure
AddFigureSelectionListener(l FigureSelectionListener)
RemoveFigureSelectionListener(l FigureSelectionListener)
FireSelectionChanged()
IsFigureSelected(figure Figure) bool
AddToSelection(figure Figure)
RemoveFromSelection(figure Figure)
ClearSelection()
AddAllToSelection(figures *Set)
GetSelection() *Set
ToggleSelection(figure Figure)
FindHandle(p *Point) Handle
SetUpdateStrategy(strategy Painter)
// Paints the drawing view. The actual drawing is delegated to
// the current update strategy.
Repaint()
// Draws the contents of the drawing view.
// The view has three layers: background, drawing, handles.
// The layers are drawn in back to front order.
Draw(g Graphics)
//drawDrawing(g Graphics)
}
type StandardDrawingView struct {
drawing Drawing
eventHandler *EventHandler
editor DrawingEditor
updateStrategy Painter
graphics Graphics
selection *Set
selectionListeners *Set
selectionHandles *Set
}
func NewStandardDrawingView() *StandardDrawingView {
view := &StandardDrawingView{}
view.eventHandler = NewEventHandler(view)
view.updateStrategy = &SimpleUpdateStrategy{}
view.selection = NewSet()
view.selectionListeners = NewSet()
return view
}
func (this *StandardDrawingView) GetDrawing() Drawing {
return this.drawing
}
func (this *StandardDrawingView) SetDrawing(drawing Drawing) {
if this.drawing != nil {
//TODO this.ClearSelection()
//TODO this.drawing.RemoveFigureListener(this)
}
this.drawing = drawing
if this.drawing != nil {
this.drawing.AddFigureListener(this.eventHandler)
}
//TODO this.CheckMinimumSize()
this.Repaint()
}
func (this *StandardDrawingView) SetEditor(editor DrawingEditor) {
this.editor = editor
}
func (this *StandardDrawingView) SetGraphics(g Graphics) {
this.graphics = g
}
func (this *StandardDrawingView) GetGraphics() Graphics {
return this.graphics
}
func (this *StandardDrawingView) SetUpdateStrategy(p Painter) {
this.updateStrategy = p
}
func (this *StandardDrawingView) Add(figure Figure) Figure {
return this.drawing.Add(figure)
}
func (this *StandardDrawingView) Remove(figure Figure) Figure {
return this.drawing.Remove(figure)
}
func (this *StandardDrawingView) GetSelection() *Set {
return this.selection.Clone()
}
func (this *StandardDrawingView) IsFigureSelected(figure Figure) bool {
return this.selection.Contains(figure)
}
func (this *StandardDrawingView) AddToSelection(figure Figure) {
if !this.IsFigureSelected(figure) {
this.selection.Push(figure)
this.selectionHandles = nil
this.FireSelectionChanged()
}
}
func (this *StandardDrawingView) AddAllToSelection(figures *Set) {
for currentFigure := range figures.Iter() {
this.AddToSelection(currentFigure.(Figure))
}
}
func (this *StandardDrawingView) RemoveFromSelection(figure Figure) {
if !this.IsFigureSelected(figure) {
for i := 0; i < this.selection.Len(); i++ {
currentFigure := this.selection.At(i).(Figure)
if currentFigure == figure {
this.selection.Delete(i)
}
}
this.selectionHandles = nil
this.FireSelectionChanged()
}
}
func (this *StandardDrawingView) ClearSelection() {
if this.selectionHandles == nil {
return
}
this.selection = NewSet()
this.selectionHandles = nil
this.FireSelectionChanged()
}
func (this *StandardDrawingView) ToggleSelection(figure Figure) {
if this.IsFigureSelected(figure) {
this.RemoveFromSelection(figure)
} else {
this.AddToSelection(figure)
}
this.FireSelectionChanged()
}
func (this *StandardDrawingView) AddFigureSelectionListener(l FigureSelectionListener) {
this.selectionListeners.Add(l)
}
func (this *StandardDrawingView) RemoveFigureSelectionListener(l FigureSelectionListener) {
for i := 0; i < this.selectionListeners.Len(); i++ {
currentListener := this.selectionListeners.At(i).(FigureSelectionListener)
if currentListener == l {
this.selectionListeners.Delete(i)
return
}
}
}
func (this *StandardDrawingView) FireSelectionChanged() {
for i := 0; i < this.selectionListeners.Len(); i++ {
currentListener := this.selectionListeners.At(i).(FigureSelectionListener)
currentListener.FigureSelectionChanged(this)
}
}
func (this *StandardDrawingView) GetSelectionHandles() *Set {
if this.selectionHandles == nil {
this.selectionHandles = NewSet()
selectedFigures := this.GetSelection()
for f := 0; f < selectedFigures.Len(); f++ {
currentFigure := selectedFigures.At(f).(Figure)
currentHandles := currentFigure.GetHandles()
for h := 0; h < currentHandles.Len(); h++ {
this.selectionHandles.Push(currentHandles.At(h))
}
}
}
return this.selectionHandles
}
func (this *StandardDrawingView) FindHandle(p *Point) Handle {
var currentHandle Handle
handles := this.GetSelectionHandles()
for i := 0; i < handles.Len(); i++ {
currentHandle = handles.At(i).(Handle)
if HandleContainsPoint(currentHandle, p) {
return currentHandle
}
}
return nil
}
func (this *StandardDrawingView) drawHandles(g Graphics) {
selectionHandles := this.GetSelectionHandles()
for i := 0; i < selectionHandles.Len(); i++ {
currentHandle := selectionHandles.At(i).(Handle)
DrawHandle(currentHandle, g)
}
}
//Draws background, drawing, foreground, and handles
func (this *StandardDrawingView) Draw(g Graphics) {
this.drawBackground(g)
this.drawForeground(g)
this.drawHandles(g)
}
func (this *StandardDrawingView) drawBackground(g Graphics) {
width := g.GetWindowSize().Width
height := g.GetWindowSize().Height
//fmt.Printf("height: %v, width: %v\n", width, height)
g.SetFGColor(White)
//fmt.Println("paint background")
g.DrawRectFromRect(&Rectangle{0, 0, width, height})
}
//Draws the drawing of the view
func (this *StandardDrawingView) drawForeground(g Graphics) {
this.drawing.Draw(g)
}
func (this *StandardDrawingView) Repaint() {
if this.graphics != nil {
if this.updateStrategy != nil {
this.updateStrategy.Draw(this.graphics, this)
}
}
}
|
package models
import (
"github.com/inimbir/onpu-data-grabber/app/clients"
)
type HashTag struct {
Value string
}
func NewHashtag() *HashTag {
return &HashTag{}
}
var hashTagsChainOfResponsibilities = &IsValidHandler{
next: &IsNotExistsHandler{
next: &PrepareDataHandler{
next: &CheckSimilarityHandler{
next: &ExtractHashtagsHandler{
next: &SaveHashTagDataHandler{}}}}}}
func (tag HashTag) GetHandlers() HashTagHandler {
return hashTagsChainOfResponsibilities
}
func (tag HashTag) Exists() bool {
//@todo
//return clients.GetMongoDb().Exists(tag)
return true
}
//func (tag Group) Update() (bool, error) {
// return clients.GetMongoDb().Update(tag)
//}
func (tag HashTag) GetCriterias(group string) (criterias []string, err error) {
return clients.GetMongoDb().GetHashTagsByGroup(group, clients.ConfirmedTweets)
}
|
package main
import (
"fmt"
"os"
"strconv"
"strings"
"encoding/csv"
)
const Letters = "abcdefghijklmnop"
const LetterCount = len(Letters)
const Dances = 1000000000
var firstPosition = 0
var letterMap = make(map[string]int)
var positionMap = make(map[int]string)
func init() {
for i, c := range Letters {
letterMap[string(c)] = i
positionMap[i] = string(c)
}
}
func main() {
printResults()
instructions := readInput()
part1(instructions)
fmt.Println("Part 1 answer:")
printResults()
cyclesAt := 0
for i := 2;; i++ {
part1(instructions)
if danceResults() == Letters {
fmt.Println("It's a cycle at iteration", i)
cyclesAt = i
break
}
}
for j := 1; j <= Dances % cyclesAt; j++ {
part1(instructions)
}
fmt.Println("Part 2 answer:")
printResults()
}
func part1(instructions []string) {
for _, instruction := range instructions {
operation := instruction[0]
operands := instruction[1:]
switch operation {
case 's': {
spinSize, _ := strconv.Atoi(operands)
spin(spinSize)
}
case 'x': {
positions := strings.Split(operands, "/")
position1, _ := strconv.Atoi(positions[0])
position2, _ := strconv.Atoi(positions[1])
exchange(position1, position2)
}
case 'p': {
partners := strings.Split(operands, "/")
partner(partners[0], partners[1])
}
}
}
}
func danceResults() string {
var answer string
for i := 0; i < LetterCount; i++ {
answer += positionMap[(i + firstPosition) % LetterCount]
}
return answer
}
func printResults() {
fmt.Println(danceResults())
}
func spin(spinSize int) {
firstPosition = (firstPosition + LetterCount - spinSize) % LetterCount
}
func exchange(position1 int, position2 int) {
actual1 := (position1 + firstPosition) % LetterCount
actual2 := (position2 + firstPosition) % LetterCount
positionMap[actual1], positionMap[actual2] = positionMap[actual2], positionMap[actual1]
letterMap[positionMap[actual1]] = actual1
letterMap[positionMap[actual2]] = actual2
}
func partner(letter1 string, letter2 string) {
letterMap[letter1], letterMap[letter2] = letterMap[letter2], letterMap[letter1]
positionMap[letterMap[letter1]] = letter1
positionMap[letterMap[letter2]] = letter2
}
func readInput() []string {
reader := csv.NewReader(os.Stdin)
answer, _ := reader.Read()
return answer
} |
package cli
import (
"github.com/thePhilGuy/grove/git"
urfaveCli "gopkg.in/urfave/cli.v1"
)
func Initialize() *urfaveCli.App {
grover := urfaveCli.NewApp()
grover.Name = "grove"
grover.Usage = "Work across multiple git repositories"
grover.Version = "0.0.1"
grover.Commands = []urfaveCli.Command{
{
Name: "check",
Usage: "check if the current directory is a git repository",
Action: git.CheckRepository,
},
{
Name: "init",
Usage: "initializes a grove repository",
Description: "Initializes a git repository in the current directory if there isn't one, then adds it to the current grove branch (default master).",
Action: git.InitializeRepository,
},
{
Name: "branch",
Usage: "list, create, or delete branches",
Subcommands: []urfaveCli.Command{
{
Name: "list",
Aliases: []string{"ls", "l"},
Usage: "list branches in current repository",
Action: git.ListBranches,
},
},
},
}
return grover
}
|
package base
import (
"jean/rtda/jvmstack"
)
func Branch(frame *jvmstack.Frame, offset int) {
pc := frame.Thread().PC()
nextPC := pc + offset
frame.SetNextPC(nextPC)
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package arc
import (
"context"
"regexp"
"strings"
"time"
"chromiumos/tast/common/testexec"
"chromiumos/tast/errors"
"chromiumos/tast/testing"
)
const (
// RemovableMediaUUID is the (fake) UUID of the removable media volume for
// testing. It is defined in
// Chromium's components/arc/volume_mounter/arc_volume_mounter_bridge.cc.
RemovableMediaUUID = "00000000000000000000000000000000DEADBEEF"
// MyFilesUUID is the UUID of the ARC MyFiles volume. It is defined in
// Chromium's components/arc/volume_mounter/arc_volume_mounter_bridge.cc.
MyFilesUUID = "0000000000000000000000000000CAFEF00D2019"
// VolumeProviderContentURIPrefix is the prefix of the URIs of files served by
// ArcVolumeProvider.
VolumeProviderContentURIPrefix = "content://org.chromium.arc.volumeprovider/"
// stubVolumeIDRegex is regex for volume IDs of StubVolumes (MyFiles,
// removable media) in ARC.
stubVolumeIDRegex = `(stub:)?[0-9]+`
// sdCardVolumeIDRegex is regex for the volume ID of the sdcard volume in ARC.
sdCardVolumeIDRegex = `emulated(;0)?`
)
// waitForARCVolumeStatusAndGetVolumeID waits for a volume of the given ID, state, and UUID
// appears inside ARC and returns the volume ID. Volume's ID, state, UUID should be expressed in regex.
// It looks up the output of "adb sm list-volumes" and returns the volume ID in the first line
// that matches the specified regex.
func waitForARCVolumeStatusAndGetVolumeID(ctx context.Context, a *ARC, id, state, uuid string) (string, error) {
// Regular expression that matches the output line for the specified
// volume. Each output line "adb sm list-volumes" is of the form:
// <volume id><space(s)><mount status><space(s)><volume UUID>.
// Examples:
// emulated;0 mounted null
// 1821167369 ejecting 00000000000000000000000000000000DEADBEEF
// stub:18446744073709551614 unmounted 0000000000000000000000000000CAFEF00D2019
re := regexp.MustCompile(id + `\s+` + state + `\s+` + uuid)
var volumeID string
if err := testing.Poll(ctx, func(ctx context.Context) error {
out, err := a.Command(ctx, "sm", "list-volumes").Output(testexec.DumpLogOnError)
if err != nil {
return testing.PollBreak(errors.Wrap(err, "sm command failed"))
}
if matchedLine := re.Find(out); matchedLine != nil {
volumeID = strings.Split(string(matchedLine), " ")[0]
return nil
}
return errors.Errorf("no matching volume found for ID %q, state %q, UUID %q", id, state, uuid)
}, &testing.PollOptions{Timeout: 30 * time.Second}); err != nil {
return "", err
}
return volumeID, nil
}
// waitForARCVolumeMount waits for a volume to be mounted in ARC using the sm
// command. Just checking mountinfo is not sufficient here since it takes some
// time for the FUSE layer in Android R+ to be ready after /storage/<UUID> has
// become a mountpoint.
func waitForARCVolumeMount(ctx context.Context, a *ARC, id, uuid string) error {
_, err := waitForARCVolumeStatusAndGetVolumeID(ctx, a, id, "mounted", uuid)
return err
}
// WaitForARCRemovableMediaVolumeMount waits for the removable media volume for
// testing to be mounted inside ARC.
func WaitForARCRemovableMediaVolumeMount(ctx context.Context, a *ARC) error {
testing.ContextLog(ctx, "Waiting for the removable volume to be mounted in ARC")
return waitForARCVolumeMount(ctx, a, stubVolumeIDRegex, RemovableMediaUUID)
}
// WaitForARCMyFilesVolumeMount waits for the MyFiles volume to be mounted
// inside ARC.
func WaitForARCMyFilesVolumeMount(ctx context.Context, a *ARC) error {
testing.ContextLog(ctx, "Waiting for the MyFiles volume to be mounted in ARC")
return waitForARCVolumeMount(ctx, a, stubVolumeIDRegex, MyFilesUUID)
}
// WaitForARCSDCardVolumeMount waits for the sdcard volume to be mounted
// inside ARC.
func WaitForARCSDCardVolumeMount(ctx context.Context, a *ARC) error {
testing.ContextLog(ctx, "Waiting for the sdcard volume to be mounted in ARC")
return waitForARCVolumeMount(ctx, a, sdCardVolumeIDRegex, "null")
}
// waitForARCVolumeUnmount waits for a volume to be unmounted inside ARC.
func waitForARCVolumeUnmount(ctx context.Context, a *ARC, id, uuid string) error {
_, err := waitForARCVolumeStatusAndGetVolumeID(ctx, a, id, "unmounted", uuid)
return err
}
// WaitForARCRemovableMediaVolumeUnmount waits for the removable media volume for
// testing to be unmounted inside ARC.
func WaitForARCRemovableMediaVolumeUnmount(ctx context.Context, a *ARC) error {
testing.ContextLog(ctx, "Waiting for the removable volume to be unmounted in ARC")
return waitForARCVolumeUnmount(ctx, a, stubVolumeIDRegex, RemovableMediaUUID)
}
// WaitForARCMyFilesVolumeUnmount waits for the MyFiles volume to be unmounted
// inside ARC.
func WaitForARCMyFilesVolumeUnmount(ctx context.Context, a *ARC) error {
testing.ContextLog(ctx, "Waiting for the MyFiles volume to be unmounted in ARC")
return waitForARCVolumeUnmount(ctx, a, stubVolumeIDRegex, MyFilesUUID)
}
// WaitForARCSDCardVolumeUnmount waits for the sdcard volume to be unmounted
// inside ARC.
func WaitForARCSDCardVolumeUnmount(ctx context.Context, a *ARC) error {
testing.ContextLog(ctx, "Waiting for the sdcard volume to be unmounted in ARC")
return waitForARCVolumeUnmount(ctx, a, sdCardVolumeIDRegex, "null")
}
// WaitForARCMyFilesVolumeMountIfARCVMEnabled waits for the MyFiles volume to be
// mounted inside ARC only if ARCVM is enabled. Otherwise it just returns nil.
// This can be used in tests that write to or read from ARC's Download folder,
// because Downloads integraion in ARCVM depends on MyFiles mount.
func WaitForARCMyFilesVolumeMountIfARCVMEnabled(ctx context.Context, a *ARC) error {
isARCVMEnabled, err := VMEnabled()
if err != nil {
return errors.Wrap(err, "failed to check whether ARCVM is enabled")
}
if !isARCVMEnabled {
return nil
}
return WaitForARCMyFilesVolumeMount(ctx, a)
}
// MyFilesVolumeID returns the volume ID of the MyFiles volume. It waits for
// the volume to be mounted if it is not mounted yet.
func MyFilesVolumeID(ctx context.Context, a *ARC) (string, error) {
return waitForARCVolumeStatusAndGetVolumeID(ctx, a, stubVolumeIDRegex, "mounted", MyFilesUUID)
}
// SDCardVolumeID returns the volume ID of the sdcard volume
// (/storage/emulated/0). Although the volume ID itself is a constant, the
// function waits for the volume to be mounted if it is not mounted yet,
// so that the ID is guaranteed to be valid and usable inside ARC.
func SDCardVolumeID(ctx context.Context, a *ARC) (string, error) {
return waitForARCVolumeStatusAndGetVolumeID(ctx, a, sdCardVolumeIDRegex, "mounted", "null")
}
|
package main
import (
"bytes"
"fmt"
)
func comma(s string) string {
var n int = len(s)
var tmp bytes.Buffer
var begin = 0
if n%3 == 0 {
begin = 3
} else {
begin = n % 3
}
var a int = 0
var i int = 0
for i = begin; i < n; i += 3 {
tmp.WriteString(s[a:i])
tmp.WriteString(",")
a = i
}
tmp.WriteString(s[a:i])
return tmp.String()
}
func main() {
fmt.Print(comma("1234567"))
}
|
package main
import "fmt"
type employee struct {
salary float32
}
func (em *employee) giveRaise(per float32) float32 {
return em.salary * (1.0 + per)
}
func main() {
emp1 := employee{3000.0}
fmt.Println(emp1.giveRaise(0.8))
emp2 := &employee{18000.0}
fmt.Println(emp2.giveRaise(0.8))
}
|
package main
import (
"designPattern/AAA_singleton/emperor2"
)
func main() {
er := emperor2.GetInstance()
er.Name = "haha"
er.Say()
for i:=0; i<3; i++{
er := emperor2.GetInstance()
er.Say()
}
}
|
package download
import (
"net/http"
"spider/utils/context"
)
type Downloader interface {
Download(*context.Request) (*context.Response, error)
}
type PageDownloader struct {
client *http.Client
}
func NewPageDownloader(client *http.Client) *PageDownloader {
if client == nil {
client = &http.Client{}
}
return &PageDownloader{client}
}
func (self *PageDownloader) Download(req *context.Request) (resp *context.Response, err error) {
var httpResp *http.Response
url := req.GetUrl()
b := req.GetPostBody()
method := req.GetMethod()
switch method {
case "POST", "post", "Post":
httpResp, err = self.client.PostForm(url, b)
if err != nil {
return nil, err
}
case "GET", "get", "Get":
fallthrough
default:
httpResp, err = self.client.Get(url)
if err != nil {
return nil, err
}
}
return context.NewResponse(req, httpResp), nil
}
|
package main
import (
"flag"
"zenhack.net/go/tempest/internal/common/types"
"zenhack.net/go/tempest/internal/server/database"
"zenhack.net/go/tempest/internal/server/tokenutil"
"zenhack.net/go/util"
)
var (
typ = flag.String("type", "", "credential type to use")
scopedID = flag.String("id", "", "type-specific credential id")
roleStr = flag.String("role", string(types.RoleUser), "role the user should have")
)
func main() {
accountID := types.AccountID(tokenutil.Gen128Base64())
flag.Parse()
role := types.Role(*roleStr)
if !role.IsValid() {
panic("Invalid role: " + role)
}
db, err := database.Open()
util.Chkfatal(err)
defer db.Close()
tx, err := db.Begin()
util.Chkfatal(err)
defer tx.Rollback()
util.Chkfatal(tx.AddAccount(database.NewAccount{
ID: accountID,
Role: role,
}))
util.Chkfatal(tx.AddCredential(database.NewCredential{
AccountID: accountID,
Login: true,
Credential: types.Credential{
Type: types.CredentialType(*typ),
ScopedID: *scopedID,
},
}))
util.Chkfatal(tx.Commit())
}
|
package main
import (
"encoding/csv"
"fmt"
"github.com/tebeka/selenium/chrome"
"log"
"os"
"strings"
"time"
"github.com/tebeka/selenium"
"gopkg.in/gomail.v2"
)
const (
port = 8066
)
var webDriver selenium.WebDriver
var service *selenium.Service
const urlBeijing = "https://www.che168.com/beijing/list/#pvareaid=104646"
var (
writer *csv.Writer
dateTime string
)
func main() {
SetupWriter()
StartChrome()
StartCrawler()
//opts := []selenium.ServiceOption{
// // Enable fake XWindow session.
// // selenium.StartFrameBuffer(),
// selenium.Output(os.Stderr), // Output debug information to STDERR
//}
//
//// Enable debug info.
//// selenium.SetDebug(true)
//service, err := selenium.NewChromeDriverService("./chromedriver83.0.4103.39", port, opts...)
//if err != nil {
// panic(err)
//}
//defer service.Stop()
//
//caps := selenium.Capabilities{"browserName": "chrome"}
//wd, err := selenium.NewRemote(caps, fmt.Sprintf("http://127.0.0.1:%d/wd/hub", port))
//if err != nil {
// panic(err)
//}
//defer wd.Quit()
//
//wd.Get("https://tw.yahoo.com")
//
//time.Sleep(5 * time.Second)
}
// StartChrome 启动谷歌浏览器headless模式
func StartChrome() {
var err error
opts := []selenium.ServiceOption{}
caps := selenium.Capabilities{
"browserName": "chrome",
}
// 禁止加载图片,加快渲染速度
imagCaps := map[string]interface{}{
"profile.managed_default_content_settings.images": 2,
}
chromeCaps := chrome.Capabilities{
Prefs: imagCaps,
Path: "",
Args: []string{
"--headless", // 设置Chrome无头模式
"--no-sandbox",
"--user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/604.4.7 (KHTML, like Gecko) Version/11.0.2 Safari/604.4.7", // 模拟user-agent,防反爬
},
}
caps.AddChrome(chromeCaps)
// 启动chromedriver,端口号可自定义
service, err = selenium.NewChromeDriverService("./chromedriver83.0.4103.39", port, opts...)
if err != nil {
log.Printf("Error starting the ChromeDriver server: %v", err)
}
// 调起chrome浏览器
webDriver, err = selenium.NewRemote(caps, fmt.Sprintf("http://localhost:%d/wd/hub", port))
if err != nil {
panic(err)
}
// 这是目标网站留下的坑,不加这个在linux系统中会显示手机网页,每个网站的策略不一样,需要区别处理。
webDriver.AddCookie(&selenium.Cookie{
Name: "defaultJumpDomain",
Value: "www",
})
// 导航到目标网站
err = webDriver.Get(urlBeijing)
if err != nil {
panic(fmt.Sprintf("Failed to load page: %s\n", err))
}
log.Println(webDriver.Title())
}
// StartCrawler 开始爬取数据
func StartCrawler() {
log.Println("Start Crawling at ", time.Now().Format("2006-01-02 15:04:05"))
pageIndex := 0
for {
listContainer, err := webDriver.FindElement(selenium.ByXPATH, "//*[@id=\"viewlist_ul\"]")
if err != nil {
panic(err)
}
lists, err := listContainer.FindElements(selenium.ByClassName, "carinfo")
if err != nil {
panic(err)
}
log.Println("数据量:", len(lists))
pageIndex++
log.Printf("正在抓取第%d页数据...\n", pageIndex)
for i := 0; i < len(lists); i++ {
var urlElem selenium.WebElement
if pageIndex == 1 {
urlElem, err = webDriver.FindElement(selenium.ByXPATH, fmt.Sprintf("//*[@id='viewlist_ul']/li[%d]/a", i+13))
} else {
urlElem, err = webDriver.FindElement(selenium.ByXPATH, fmt.Sprintf("//*[@id='viewlist_ul']/li[%d]/a", i+1))
}
if err != nil {
break
}
url, err := urlElem.GetAttribute("href")
if err != nil {
break
}
webDriver.Get(url)
title, _ := webDriver.Title()
log.Printf("当前页面标题:%s\n", title)
modelElem, err := webDriver.FindElement(selenium.ByXPATH, "/html/body/div[5]/div[2]/div[1]/h2")
var model string
if err != nil {
log.Println(err)
model = "暂无"
} else {
model, _ = modelElem.Text()
}
log.Printf("model=[%s]\n", model)
priceElem, err := webDriver.FindElement(selenium.ByXPATH, "/html/body/div[5]/div[2]/div[2]/div/ins")
var price string
if err != nil {
log.Println(err)
price = "暂无"
} else {
price, _ = priceElem.Text()
price = fmt.Sprintf("%s万", price)
}
log.Printf("price=[%s]\n", price)
milesElem, err := webDriver.FindElement(selenium.ByXPATH, "/html/body/div[5]/div[2]/div[4]/ul/li[1]/span")
var miles string
if err != nil {
log.Println(err)
milesElem, err := webDriver.FindElement(selenium.ByXPATH, "/html/body/div[5]/div[2]/div[3]/ul/li[1]/span")
if err != nil {
log.Println(err)
miles = "暂无"
} else {
miles, _ = milesElem.Text()
}
} else {
miles, _ = milesElem.Text()
}
log.Printf("miles=[%s]\n", miles)
timeElem, err := webDriver.FindElement(selenium.ByXPATH, "/html/body/div[5]/div[2]/div[4]/ul/li[2]/span")
var date string
if err != nil {
log.Println(err)
timeElem, err := webDriver.FindElement(selenium.ByXPATH, "/html/body/div[5]/div[2]/div[3]/ul/li[2]/span")
if err != nil {
log.Println(err)
date = "暂无"
} else {
date, _ = timeElem.Text()
}
} else {
date, _ = timeElem.Text()
}
log.Printf("time=[%s]\n", date)
positionElem, err := webDriver.FindElement(selenium.ByXPATH, "/html/body/div[5]/div[2]/div[4]/ul/li[4]/span")
var position string
if err != nil {
log.Println(err)
positionElem, err := webDriver.FindElement(selenium.ByXPATH, "/html/body/div[5]/div[2]/div[3]/ul/li[4]/span")
if err != nil {
log.Println(err)
position = "暂无"
} else {
position, _ = positionElem.Text()
}
} else {
position, _ = positionElem.Text()
}
log.Printf("position=[%s]\n", position)
storeElem, err := webDriver.FindElement(selenium.ByXPATH, "/html/body/div[5]/div[2]/div[1]/div/div/div")
var store string
if err != nil {
log.Println(err)
store = "暂无"
} else {
store, _ = storeElem.Text()
store = strings.Replace(store, "商家|", "", -1)
if strings.Contains(store, "金牌店铺") {
store = strings.Replace(store, "金牌店铺", "", -1)
}
}
log.Printf("store=[%s]\n", store)
writer.Write([]string{model, miles, date, price, position, store})
writer.Flush()
webDriver.Back()
}
log.Printf("第%d页数据已经抓取完毕,开始下一页...\n", pageIndex)
nextButton, err := webDriver.FindElement(selenium.ByClassName, "page-item-next")
if err != nil {
log.Println("所有数据抓取完毕!")
break
}
nextButton.Click()
}
log.Println("Crawling Finished at ", time.Now().Format("2006-01-02 15:04:05"))
sendResult(dateTime)
}
// SetupWriter 初始化CSV
func SetupWriter() {
dateTime = time.Now().Format("2006-01-02 15:04:05") // 格式字符串是固定的,据说是go语言诞生时间,谷歌的恶趣味...
os.Mkdir("data", os.ModePerm)
csvFile, err := os.Create(fmt.Sprintf("data/%s.csv", dateTime))
if err != nil {
panic(err)
}
csvFile.WriteString("\xEF\xBB\xBF")
writer = csv.NewWriter(csvFile)
writer.Write([]string{"车型", "行驶里程", "首次上牌", "价格", "所在地", "门店"})
}
func sendResult(fileName string) {
email := gomail.NewMessage()
email.SetAddressHeader("From", "re**ng@163.com", "张**")
email.SetHeader("To", email.FormatAddress("li**yang@163.com", "李**"))
email.SetHeader("Cc", email.FormatAddress("zhang**tao@163.net", "张**"))
email.SetHeader("Subject", "二手车之家-北京-二手车信息")
email.SetBody("text/plain;charset=UTF-8", "本周抓取到的二手车信息数据,请注意查收!\n")
email.Attach(fmt.Sprintf("data/%s.csv", fileName))
dialer := &gomail.Dialer{
Host: "smtp.163.com",
Port: 25,
Username: "gitxuzan@126.com", // 替换自己的邮箱地址
Password: "2725", // 自定义smtp服务器密码
SSL: false,
}
if err := dialer.DialAndSend(email); err != nil {
log.Println("邮件发送失败!err: ", err)
return
}
log.Println("邮件发送成功!")
}
|
package main
import (
"fmt"
"time"
)
func main() {
//var a chan int
//a = make(chan int)
//fmt.Println(a)
// Send
//ch <- val
// Receive
//val := <- ch
ch := make(chan int, 1024)
fmt.Println("Sending value to channel")
go send(ch, 42)
fmt.Println("Receiving from channel")
go receive(ch)
time.Sleep(time.Second * 1)
}
func send(ch chan int, val int) {
ch <- 10
ch <- 20
ch <- val
}
func receive(ch chan int) {
//val := <-ch
for val := range ch {
fmt.Printf("Value Received=%d in receive function\n", val)
}
}
|
package vector
import "math"
var (
degreesToRadias = math.Pi / 180
)
// NewVector creates a new vector
func NewVector(vals ...float64) Vector {
return Vector{vals}
}
// Vector is the vector object
type Vector struct {
vals []float64
}
// GetVals returns the values of the vector
func (v1 Vector) GetVals() []float64 {
return v1.vals
}
func convertDegreesToRadians(d float64) float64 {
return d * degreesToRadias
}
// Add will add one vector to another and return the result
func (v1 Vector) Add(v2 Vector) Vector {
if len(v1.vals) != len(v2.vals) {
panic("Cannot add vectors that don't represent equal dimensions")
}
v := Vector{vals: make([]float64, len(v1.vals))}
for i, val := range v1.vals {
v.vals[i] = val + v2.vals[i]
}
return v
}
// Subtract will subtract the passed vector from the instance vector and return the result
func (v1 Vector) Subtract(v2 Vector) Vector {
if len(v1.vals) != len(v2.vals) {
panic("Cannot subtract vectors that don't represent equal dimensions")
}
v := Vector{vals: make([]float64, len(v1.vals))}
for i, val := range v1.vals {
v.vals[i] = val - v2.vals[i]
}
return v
}
// Multiply multiplies two vectors
func (v1 Vector) Multiply(v2 Vector) Vector {
if len(v1.vals) != len(v2.vals) {
panic("Cannot subtract vectors that don't represent equal dimensions")
}
v := Vector{vals: make([]float64, len(v1.vals))}
for i, val := range v1.vals {
v.vals[i] = val * v2.vals[i]
}
return v
}
// Scale multiplies the vector by the scalar provided and returns the result
func (v1 Vector) Scale(scalar float64) Vector {
v := Vector{vals: make([]float64, len(v1.vals))}
for i, val := range v1.vals {
v.vals[i] = val * scalar
}
return v
}
// CrossProduct performs the cross product of 2 vectors and returns the result
func (v1 Vector) CrossProduct(v2 Vector) Vector {
// magnitude of v1 * magnitude of v2 sin(angle between them)
// don't know how to implement for 2D
return Vector{}
}
// DotProduct performs the dot product of 2 vectors and returns the result
func (v1 Vector) DotProduct(v2 Vector) float64 {
if len(v1.vals) != len(v2.vals) {
panic("Cannot take dot product vectors that don't represent equal dimensions")
}
var res float64 = 0
for i, val := range v1.vals {
res += val * v2.vals[i]
}
return res
}
// RotateAboutTail rotates the vector about its tail
func (v1 Vector) RotateAboutTail(clockWiseAngleInRadians float64) Vector {
if len(v1.vals) != 2 {
panic("Rotate only implemented for 2D vectors")
}
// sin & cosin expect anti-clockwise so negate
cos := math.Cos(-clockWiseAngleInRadians)
sin := math.Sin(-clockWiseAngleInRadians)
x1 := v1.vals[0]
y1 := v1.vals[1]
x2 := (cos * x1) - (sin * y1)
y2 := (sin * x1) + (cos * y1)
return Vector{vals: []float64{x2, y2}}
}
// AsUnitVector converts any vector to a unit vector (magnitudes of 1)
func (v1 Vector) AsUnitVector() (unitV Vector) {
for _, val := range v1.vals {
var v float64
if val > 0 {
v = 1
} else if val < 0 {
v = -1
}
// else remains as 0
unitV.vals = append(unitV.vals, v)
}
return
}
// Abs returns a vector with absolute magnitudes
func (v1 Vector) Abs() Vector {
v := Vector{vals: make([]float64, len(v1.vals))}
for i, val := range v1.vals {
v.vals[i] = math.Abs(val)
}
return v
}
|
package pkg
// InvalidBlockError is raised when we find an invalid block
type InvalidBlockError struct {
message string
block Block
}
// NewInvalidBlockError returns an error that formats as the given text.
func NewInvalidBlockError(message string, block *Block) *InvalidBlockError {
return &InvalidBlockError{
message: message,
}
}
func (e *InvalidBlockError) Error() string {
return e.message
}
// BlockNotFoundError is raised when we can't find an indes in the block
type BlockNotFoundError struct {
message string
block Block
}
// NewBlockNotFoundError returns an error that formats as the given text.
func NewBlockNotFoundError(message string, index int) *BlockNotFoundError {
return &BlockNotFoundError{
message: message,
}
}
func (e *BlockNotFoundError) Error() string {
return e.message
}
// EnvNotFoundError is raised when we can't find an indes in the block
type EnvNotFoundError struct {
message string
}
// NewEnvNotFoundError returns an error that formats as the given text.
func NewEnvNotFoundError(message string) *EnvNotFoundError {
return &EnvNotFoundError{
message: message,
}
}
func (e *EnvNotFoundError) Error() string {
return e.message
}
|
package main
import (
"syscall"
"unsafe"
)
// precision timing
var (
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
queryPerformanceFrequencyProc = modkernel32.NewProc("QueryPerformanceFrequency")
queryPerformanceCounterProc = modkernel32.NewProc("QueryPerformanceCounter")
)
// now returns time.Duration using queryPerformanceCounter
func QPC() int64 {
var now int64
syscall.Syscall(queryPerformanceCounterProc.Addr(), 1, uintptr(unsafe.Pointer(&now)), 0, 0)
return now
}
// QPCFrequency returns frequency in ticks per second
func QPCFrequency() int64 {
var freq int64
r1, _, _ := syscall.Syscall(queryPerformanceFrequencyProc.Addr(), 1, uintptr(unsafe.Pointer(&freq)), 0, 0)
if r1 == 0 {
panic("call failed")
}
return freq
}
|
// Copyright (c) 2020 StackRox Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
package pipeconn
import (
"context"
"encoding/binary"
"io"
"net"
"sync"
"sync/atomic"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func u32BE(v uint32) []byte {
var enc [4]byte
binary.BigEndian.PutUint32(enc[:], v)
return enc[:]
}
func TestNetwork(t *testing.T) {
t.Parallel()
assert.Equal(t, Network, pipeAddr.Network())
}
func TestPipeListener_Connections(t *testing.T) {
t.Parallel()
lis, dialCtx := NewPipeListener()
var clientSum uint32
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
wg.Add(1)
go func(idx int) {
defer wg.Done()
conn, err := dialCtx(context.Background())
require.NoError(t, err)
sum := uint32(idx)
_, err = conn.Write(u32BE(sum))
assert.NoError(t, err)
var buf [4]byte
_, err = io.ReadFull(conn, buf[:])
assert.NoError(t, err)
sum += binary.BigEndian.Uint32(buf[:])
_, err = conn.Write(u32BE(sum))
assert.NoError(t, err)
assert.NoError(t, conn.Close())
atomic.AddUint32(&clientSum, sum)
}(i)
}
var serverSum uint32
for i := 0; i < 10; i++ {
conn, err := lis.Accept()
require.NoError(t, err)
wg.Add(1)
go func(idx int, conn net.Conn) {
defer wg.Done()
sum := uint32(idx)
var buf [4]byte
_, err := io.ReadFull(conn, buf[:])
assert.NoError(t, err)
sum += binary.BigEndian.Uint32(buf[:])
_, err = conn.Write(u32BE(uint32(idx)))
assert.NoError(t, err)
_, err = io.ReadFull(conn, buf[:])
assert.NoError(t, err)
assert.Equal(t, sum, binary.BigEndian.Uint32(buf[:]))
atomic.AddUint32(&serverSum, sum)
n, err := io.ReadFull(conn, buf[:])
assert.Zero(t, n)
assert.Equal(t, io.EOF, err)
}(i, conn)
}
wg.Wait()
assert.Equal(t, serverSum, clientSum)
}
func TestPipeListener_Close(t *testing.T) {
t.Parallel()
lis, dialCtx := NewPipeListener()
assert.NoError(t, lis.Close())
conn, err := lis.Accept()
assert.Nil(t, conn)
assert.Equal(t, ErrClosed, err)
conn, err = dialCtx(context.Background())
assert.Nil(t, conn)
assert.Equal(t, ErrClosed, err)
assert.Equal(t, ErrAlreadyClosed, lis.Close())
}
|
package main
import (
"fmt"
"net/http"
)
const html = `
<html>
<head>
</head>
<body>
<h3>Congrats, GO app was successfully configured!</h3>
The environment was configured using this installation script: <a target="_blank" href="https://github.com/bykovme/webgolangdo">github.com/bykovme/webgolangdo</a>
<br>
Find more interesting stuff here: <a target="_blank" href="https://bykov.tech/">bykov.tech</a>
</body>
</html>
`
func handler(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, html)
}
func main() {
http.HandleFunc("/", handler)
http.ListenAndServe(":8080", nil)
}
|
package kgo
import (
"archive/tar"
"compress/gzip"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"mime"
"net/http"
"os"
"path"
"path/filepath"
"regexp"
"strings"
"syscall"
)
// GetExt 获取文件扩展名,不包括点"."
func (kf *LkkFile) GetExt(path string) string {
suffix := filepath.Ext(path)
if suffix != "" {
return strings.ToLower(suffix[1:])
}
return suffix
}
// GetContents 获取文件内容作为字符串
func (kf *LkkFile) GetContents(path string) ([]byte, error) {
data, err := ioutil.ReadFile(path)
return data, err
}
// PutContents 将一个字符串写入文件
func (kf *LkkFile) PutContents(fpath string, data []byte) error {
if dir := path.Dir(fpath); dir != "" {
if err := os.MkdirAll(dir, 0755); err != nil {
return err
}
}
return ioutil.WriteFile(fpath, data, 0644)
}
// GetMime 获取文件mime类型;fast为true时根据后缀快速获取;为false时读取文件头获取
func (kf *LkkFile) GetMime(path string, fast bool) string {
var res string
if fast {
suffix := filepath.Ext(path)
res = mime.TypeByExtension(suffix)
} else {
srcFile, err := os.Open(path)
if err != nil {
return res
}
buffer := make([]byte, 512)
_, err = srcFile.Read(buffer)
if err != nil {
return res
}
res = http.DetectContentType(buffer)
}
return res
}
// FileSize 获取文件大小(bytes字节),注意:文件不存在或无法访问返回-1
func (kf *LkkFile) FileSize(path string) int64 {
f, err := os.Stat(path)
if nil != err {
return -1
}
return f.Size()
}
// DirSize 获取目录大小(bytes字节)
func (kf *LkkFile) DirSize(path string) int64 {
var size int64
//filepath.Walk压测很慢
_ = filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
size += info.Size()
}
return err
})
return size
}
// IsExist 路径(文件/目录)是否存在
func (kf *LkkFile) IsExist(path string) bool {
_, err := os.Stat(path)
return err == nil || os.IsExist(err)
}
// IsWritable 路径是否可写
func (kf *LkkFile) IsWritable(path string) bool {
err := syscall.Access(path, syscall.O_RDWR)
if err != nil {
return false
}
return true
}
// IsReadable 路径是否可读
func (kf *LkkFile) IsReadable(path string) bool {
err := syscall.Access(path, syscall.O_RDONLY)
if err != nil {
return false
}
return true
}
// IsExecutable 是否可执行文件
func (kf *LkkFile) IsExecutable(file string) bool {
info, err := os.Stat(file)
return err == nil && info.Mode().IsRegular() && (info.Mode()&0111) != 0
}
// IsFile 是否常规文件(且存在)
func (kf *LkkFile) IsFile(path string) bool {
stat, err := os.Stat(path)
if err != nil {
return false
}
//常规文件,不包括链接
return stat.Mode().IsRegular()
}
// IsLink 是否链接文件(且存在)
func (kf *LkkFile) IsLink(path string) bool {
f, err := os.Lstat(path)
if err != nil {
return false
}
return f.Mode()&os.ModeSymlink == os.ModeSymlink
}
// IsDir 是否目录(且存在)
func (kf *LkkFile) IsDir(path string) bool {
f, err := os.Lstat(path)
if os.IsNotExist(err) || nil != err {
return false
}
return f.IsDir()
}
// IsBinary 是否二进制文件(且存在)
func (kf *LkkFile) IsBinary(path string) bool {
cont, err := kf.GetContents(path)
if err != nil {
return false
}
return KConv.IsBinary(string(cont))
}
// IsImg 是否图片文件
func (kf *LkkFile) IsImg(path string) bool {
ext := kf.GetExt(path)
switch ext {
case "jpg", "jpeg", "bmp", "gif", "png", "svg", "ico":
return true
default:
return false
}
}
// Mkdir 新建目录
func (kf *LkkFile) Mkdir(filename string, mode os.FileMode) error {
return os.MkdirAll(filename, mode)
}
// AbsPath 获取绝对路径,path可允许不存在.
func (kf *LkkFile) AbsPath(path string) string {
fullPath := ""
res, err := filepath.Abs(path) // filepath.Abs最终使用到os.Getwd()检查
if err != nil {
fullPath = filepath.Clean(filepath.Join(`/`, path))
} else {
fullPath = res
}
return fullPath
}
// RealPath 返回规范化的真实绝对路径名,path必须存在.若路径不存在则返回空字符串.
func (kf *LkkFile) RealPath(path string) string {
fullPath := path
if !filepath.IsAbs(path) {
wd, err := os.Getwd()
if err != nil {
return ""
}
fullPath = filepath.Clean(wd + `/` + path)
}
_, err := os.Stat(fullPath)
if err != nil {
return ""
}
return fullPath
}
// Touch 快速创建指定大小的文件,size为字节
func (kf *LkkFile) Touch(path string, size int64) bool {
//创建目录
destDir := filepath.Dir(path)
if destDir != "" && !kf.IsDir(destDir) {
if err := os.MkdirAll(destDir, 0766); err != nil {
return false
}
}
fd, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return false
}
defer fd.Close()
if size > 1 {
_, _ = fd.Seek(size-1, 0)
_, _ = fd.Write([]byte{0})
}
return true
}
// Rename 重命名一个文件或目录
func (kf *LkkFile) Rename(oldname, newname string) error {
return os.Rename(oldname, newname)
}
// Unlink 删除文件
func (kf *LkkFile) Unlink(filename string) error {
return os.Remove(filename)
}
// CopyFile 拷贝源文件到目标文件,cover为枚举(FILE_COVER_ALLOW、FILE_COVER_IGNORE、FILE_COVER_DENY)
func (kf *LkkFile) CopyFile(source string, dest string, cover LkkFileCover) (int64, error) {
if source == dest {
return 0, nil
}
sourceStat, err := os.Stat(source)
if err != nil {
return 0, err
} else if !sourceStat.Mode().IsRegular() {
return 0, fmt.Errorf("%s is not a regular file", source)
}
if cover != FILE_COVER_ALLOW {
if _, err := os.Stat(dest); err == nil {
if cover == FILE_COVER_IGNORE {
return 0, nil
} else if cover == FILE_COVER_DENY {
return 0, fmt.Errorf("File %s already exists", dest)
}
}
}
sourceFile, _ := os.Open(source)
defer sourceFile.Close()
//创建目录
destDir := filepath.Dir(dest)
if destDir != "" && !kf.IsDir(destDir) {
if err = os.MkdirAll(destDir, 0766); err != nil {
return 0, err
}
}
destFile, err := os.Create(dest)
if err != nil {
return 0, err
}
defer destFile.Close()
var nBytes int64
sourceSize := sourceStat.Size()
if sourceSize <= 1048576 { //1M以内小文件使用buffer拷贝
var total int
var bufferSize int = 102400
if sourceSize < 524288 {
bufferSize = 51200
}
buf := make([]byte, bufferSize)
for {
n, err := sourceFile.Read(buf)
if err != nil && err != io.EOF {
return int64(total), err
} else if n == 0 {
break
}
if _, err := destFile.Write(buf[:n]); err != nil {
return int64(total), err
}
total += n
}
nBytes = int64(total)
} else {
nBytes, err = io.Copy(destFile, sourceFile)
if err == nil {
err = os.Chmod(dest, sourceStat.Mode())
}
}
return nBytes, err
}
// FastCopy 快速拷贝源文件到目标文件,不做安全检查
func (kf *LkkFile) FastCopy(source string, dest string) (int64, error) {
sourceFile, err := os.Open(source)
if err != nil {
return 0, err
}
//创建目录
destDir := filepath.Dir(dest)
if !kf.IsDir(destDir) {
if err = os.MkdirAll(destDir, 0766); err != nil {
return 0, err
}
}
destFile, err := os.Create(dest)
if err != nil {
return 0, err
}
var bufferSize int = 32768
var nBytes int
buf := make([]byte, bufferSize)
for {
n, err := sourceFile.Read(buf)
if err != nil && err != io.EOF {
return int64(nBytes), err
} else if n == 0 {
break
}
if _, err := destFile.Write(buf[:n]); err != nil {
return int64(nBytes), err
}
nBytes += n
}
return int64(nBytes), err
}
// CopyLink 拷贝链接
func (kf *LkkFile) CopyLink(source string, dest string) error {
if source == dest {
return nil
}
source, err := os.Readlink(source)
if err != nil {
return err
}
_, err = os.Lstat(dest)
if err == nil {
_ = os.Remove(dest)
}
//创建目录
destDir := filepath.Dir(dest)
if !kf.IsDir(destDir) {
if err := os.MkdirAll(destDir, 0766); err != nil {
return err
}
}
return os.Symlink(source, dest)
}
// CopyDir 拷贝源目录到目标目录,cover为枚举(FILE_COVER_ALLOW、FILE_COVER_IGNORE、FILE_COVER_DENY)
func (kf *LkkFile) CopyDir(source string, dest string, cover LkkFileCover) (int64, error) {
var total, nBytes int64
var err error
sourceInfo, err := os.Stat(source)
if err != nil {
return 0, err
} else if !sourceInfo.IsDir() {
return 0, fmt.Errorf("%s is not a directory", source)
}
// create dest dir
err = os.MkdirAll(dest, sourceInfo.Mode())
if err != nil {
return 0, err
}
directory, _ := os.Open(source)
defer directory.Close()
objects, err := directory.Readdir(-1)
if err != nil {
return 0, err
}
for _, obj := range objects {
srcFilePath := filepath.Join(source, obj.Name())
destFilePath := filepath.Join(dest, obj.Name())
if obj.IsDir() {
// create sub-directories - recursively
nBytes, err = kf.CopyDir(srcFilePath, destFilePath, cover)
} else {
destFileInfo, err := os.Stat(destFilePath)
if err == nil {
if cover != FILE_COVER_ALLOW || os.SameFile(obj, destFileInfo) {
continue
}
}
if obj.Mode()&os.ModeSymlink != 0 {
// a link
_ = kf.CopyLink(srcFilePath, destFilePath)
} else {
nBytes, err = kf.CopyFile(srcFilePath, destFilePath, cover)
}
}
if err == nil {
total += nBytes
}
}
return total, err
}
// Img2Base64 读取图片文件,并转换为base64字符串
func (kf *LkkFile) Img2Base64(path string) (string, error) {
if !kf.IsImg(path) {
return "", fmt.Errorf("%s is not a image", path)
}
imgBuffer, err := ioutil.ReadFile(path)
if err != nil {
return "", err
}
ext := kf.GetExt(path)
return fmt.Sprintf("data:image/%s;base64,%s", ext, base64.StdEncoding.EncodeToString(imgBuffer)), nil
}
// DelDir 删除目录;delRoot为true时连该目录一起删除;为false时只清空该目录
func (kf *LkkFile) DelDir(dir string, delRoot bool) error {
realPath := kf.AbsPath(dir)
if !kf.IsDir(realPath) {
return fmt.Errorf("Dir %s not exists", dir)
}
names, err := ioutil.ReadDir(realPath)
if err != nil {
return err
}
for _, entery := range names {
file := path.Join([]string{realPath, entery.Name()}...)
err = os.RemoveAll(file)
}
//删除根节点(指定的目录)
if delRoot {
err = os.RemoveAll(realPath)
}
return err
}
// FileTree 获取目录的文件树列表;
// ftype为枚举(FILE_TREE_ALL、FILE_TREE_DIR、FILE_TREE_FILE);
// recursive为是否递归;
// filters为一个或多个文件过滤器函数,FileFilter类型
func (kf *LkkFile) FileTree(path string, ftype LkkFileTree, recursive bool, filters ...FileFilter) []string {
var trees []string
if kf.IsFile(path) || kf.IsLink(path) {
if ftype != FILE_TREE_DIR {
trees = append(trees, path)
}
return trees
}
path = strings.TrimRight(path, "/")
files, err := filepath.Glob(filepath.Join(path, "*"))
if err != nil || len(files) == 0 {
return trees
}
for _, file := range files {
//文件过滤
chk := true
if len(filters) > 0 {
for _, filter := range filters {
chk = filter(file)
if !chk {
break
}
}
}
if !chk {
continue
}
if kf.IsDir(file) {
if ftype != FILE_TREE_FILE {
trees = append(trees, file)
}
if recursive {
subs := kf.FileTree(file, ftype, recursive)
trees = append(trees, subs...)
}
} else if ftype != FILE_TREE_DIR {
trees = append(trees, file)
}
}
return trees
}
// FormatDir 格式化路径,将"\","//"替换为"/",且以"/"结尾
func (kf *LkkFile) FormatDir(path string) string {
if path == "" {
return ""
}
// 将"\"替换为"/"
path = strings.ReplaceAll(path, "\\", "/")
str := RegFormatDir.ReplaceAllString(path, "/")
return strings.TrimRight(str, "/") + "/"
}
// Md5 获取文件md5值,length指定结果长度32/16
func (kf *LkkFile) Md5(path string, length uint8) (string, error) {
var res string
f, err := os.Open(path)
if err != nil {
return res, err
}
defer f.Close()
hash := md5.New()
if _, err := io.Copy(hash, f); err != nil {
return res, err
}
hashInBytes := hash.Sum(nil)
if length > 0 && length < 32 {
dst := make([]byte, hex.EncodedLen(len(hashInBytes)))
hex.Encode(dst, hashInBytes)
res = string(dst[:length])
} else {
res = hex.EncodeToString(hashInBytes)
}
return res, nil
}
// ShaX 计算文件的 shaX 散列值,x为1/256/512
func (kf *LkkFile) ShaX(path string, x uint16) (string, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return "", err
}
return string(shaXStr(data, x)), nil
}
// Pathinfo 获取文件路径的信息,options的值为-1: all; 1: dirname; 2: basename; 4: extension; 8: filename
func (kf *LkkFile) Pathinfo(path string, options int) map[string]string {
if options == -1 {
options = 1 | 2 | 4 | 8
}
info := make(map[string]string)
if (options & 1) == 1 {
info["dirname"] = filepath.Dir(path)
}
if (options & 2) == 2 {
info["basename"] = filepath.Base(path)
}
if ((options & 4) == 4) || ((options & 8) == 8) {
basename := ""
if (options & 2) == 2 {
basename, _ = info["basename"]
} else {
basename = filepath.Base(path)
}
p := strings.LastIndex(basename, ".")
filename, extension := "", ""
if p > 0 {
filename, extension = basename[:p], basename[p+1:]
} else if p == -1 {
filename = basename
} else if p == 0 {
extension = basename[p+1:]
}
if (options & 4) == 4 {
info["extension"] = extension
}
if (options & 8) == 8 {
info["filename"] = filename
}
}
return info
}
// Basename 返回路径中的文件名部分
func (kf *LkkFile) Basename(path string) string {
return filepath.Base(path)
}
// Dirname 返回路径中的目录部分,注意空路径或无目录的返回"."
func (kf *LkkFile) Dirname(path string) string {
return filepath.Dir(path)
}
// Filemtime 取得文件修改时间
func (kf *LkkFile) Filemtime(filename string) (int64, error) {
fileinfo, err := os.Stat(filename)
if err != nil {
return 0, err
}
return fileinfo.ModTime().Unix(), nil
}
// Glob 寻找与模式匹配的文件路径
func (kf *LkkFile) Glob(pattern string) ([]string, error) {
return filepath.Glob(pattern)
}
// TarGz 打包压缩tar.gz;src为源文件或目录,dstTar为打包的路径名,ignorePatterns为要忽略的文件正则
func (kf *LkkFile) TarGz(src string, dstTar string, ignorePatterns ...string) (bool, error) {
//过滤器,检查要忽略的文件
var filter = func(file string) bool {
res := true
for _, pattern := range ignorePatterns {
re, err := regexp.Compile(pattern)
if err != nil {
continue
}
chk := re.MatchString(file)
if chk {
res = false
break
}
}
return res
}
src = kf.AbsPath(src)
dstTar = kf.AbsPath(dstTar)
dstDir := kf.Dirname(dstTar)
if !kf.IsExist(dstDir) {
_ = kf.Mkdir(dstDir, os.ModePerm)
}
files := kf.FileTree(src, FILE_TREE_ALL, true, filter)
if len(files) == 0 {
return false, fmt.Errorf("src no files to tar.gz")
}
// dest file write
fw, err := os.Create(dstTar)
if err != nil {
return false, err
}
defer fw.Close()
// gzip write
gw := gzip.NewWriter(fw)
defer gw.Close()
// tar write
tw := tar.NewWriter(gw)
defer tw.Close()
parentDir := filepath.Dir(src)
for _, file := range files {
if file == dstTar {
continue
}
fi, err := os.Stat(file)
if err != nil {
continue
}
newName := strings.Replace(file, parentDir, "", -1)
// Create tar header
hdr := new(tar.Header)
hdr.Format = tar.FormatGNU
if fi.IsDir() {
// if last character of header name is '/' it also can be directory
// but if you don't set Typeflag, error will occur when you untargz
hdr.Name = newName + "/"
hdr.Typeflag = tar.TypeDir
hdr.Size = 0
//hdr.Mode = 0755 | c_ISDIR
hdr.Mode = int64(fi.Mode())
hdr.ModTime = fi.ModTime()
// Write hander
err := tw.WriteHeader(hdr)
if err != nil {
return false, fmt.Errorf("DirErr: %s file:%s\n", err.Error(), file)
}
} else {
// File reader
fr, err := os.Open(file)
if err != nil {
return false, fmt.Errorf("OpenErr: %s file:%s\n", err.Error(), file)
}
defer fr.Close()
hdr.Name = newName
hdr.Size = fi.Size()
hdr.Mode = int64(fi.Mode())
hdr.ModTime = fi.ModTime()
// Write hander
err = tw.WriteHeader(hdr)
if err != nil {
return false, fmt.Errorf("FileErr: %s file:%s\n", err.Error(), file)
}
// Write file data
_, err = io.Copy(tw, fr)
if err != nil {
return false, fmt.Errorf("CopyErr: %s file:%s\n", err.Error(), file)
}
_ = fr.Close()
}
}
return true, nil
}
// UnTarGz 将tar.gz文件解压缩;srcTar为压缩包,dstDir为解压目录
func (kf *LkkFile) UnTarGz(srcTar, dstDir string) (bool, error) {
fr, err := os.Open(srcTar)
if err != nil {
return false, err
}
defer fr.Close()
dstDir = strings.TrimRight(kf.AbsPath(dstDir), "/\\")
if !kf.IsExist(dstDir) {
err := kf.Mkdir(dstDir, os.ModePerm)
if err != nil {
return false, err
}
}
// Gzip reader
gr, err := gzip.NewReader(fr)
// Tar reader
tr := tar.NewReader(gr)
for {
hdr, err := tr.Next()
if err == io.EOF {
// End of tar archive
break
} else if err != nil {
return false, err
}
// Create diretory before create file
newPath := dstDir + "/" + strings.TrimLeft(hdr.Name, "/\\")
parentDir := path.Dir(newPath)
if !kf.IsExist(parentDir) {
_ = os.MkdirAll(parentDir, os.ModePerm)
}
if hdr.Typeflag != tar.TypeDir {
// Write data to file
fw, err := os.Create(newPath)
if err != nil {
return false, fmt.Errorf("CreateErr: %s file:%s\n", err.Error(), newPath)
}
_, err = io.Copy(fw, tr)
if err != nil {
return false, fmt.Errorf("CopyErr: %s file:%s\n", err.Error(), newPath)
}
_ = fw.Close()
}
}
return true, nil
}
// SafeFileName 将文件名转换为安全可用的字符串.
func (kf *LkkFile) SafeFileName(str string) string {
name := strings.ToLower(str)
name = path.Clean(path.Base(name))
name = strings.Trim(name, " ")
separators, err := regexp.Compile(`[ &_=+:]`)
if err == nil {
name = separators.ReplaceAllString(name, "-")
}
legal, err := regexp.Compile(`[^[:alnum:]-.]`)
if err == nil {
name = legal.ReplaceAllString(name, "")
}
for strings.Contains(name, "--") {
name = strings.Replace(name, "--", "-", -1)
}
return name
}
// ChmodBatch 批量改变路径权限模式(包括子目录和所属文件).filemode为文件权限模式,dirmode为目录权限模式.
func (kf *LkkFile) ChmodBatch(path string, filemode, dirmode os.FileMode) (res bool) {
var err error
err = filepath.Walk(path, func(path string, f os.FileInfo, err error) error {
if f == nil {
return err
}
if f.IsDir() {
err = os.Chmod(path, dirmode)
} else {
err = os.Chmod(path, filemode)
}
return err
})
if err == nil {
res = true
}
return
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.