text
stringlengths 11
4.05M
|
|---|
package main
import "fmt"
func main() {
//test(4)
test2(4)
}
//当执行main函数的时候,开辟一个栈空间,执行test函数,此时4>2条件成立,此时该栈区的值是3,但是因为有else存在,所以fmt.Println(num1)不执行
//开辟新栈区,调用自身,此时3>2条件成立,此时该栈区的值是2,但是因为有else存在,所以fmt.Println(num1)不执行
//开辟新栈区,继续调用自身,2>2条件不成立,执行else后面的语句,此时该栈区的值是2, 所以执行fmt.Println(num1)为2
//当不再开辟栈空间了,函数会按照返回之后才能执行剩下的语句,所以最后的结果是2
func test2(num1 int) {
if num1 > 2 {
num1--
test2(num1)
} else {
fmt.Println(num1)
}
}
//当执行main函数的时候,开辟一个栈空间,执行test函数,此时4>2条件成立,此时该栈区的值也就是fmt.Println(num1)为4--=3
//开辟新栈区,调用自身,此时3>2条件成立,此时该栈区的值也就是fmt.Println(num1)为3--=2
//开辟新栈区,继续调用自身,2>2条件不成立,此时不在调用自身函数 此时该栈区的值也就是fmt.Println(num1)为2
//当不再开辟栈空间了,函数会按照返回之后才能执行剩下的语句,所以最后的结果是2,2,3
func test(num1 int) {
if num1 > 2 {
num1--
test(num1)
}
fmt.Println(num1)
}
|
// Copyright (c) 2020 Blockwatch Data Inc.
// Author: alex@blockwatch.cc
package puller
import (
"github.com/zyjblockchain/sandy_log/log"
"sync"
"tezos_index/puller/models"
"time"
logpkg "github.com/echa/log"
)
// var log logpkg.Logger = logpkg.Log
func init() {
DisableLog()
}
func DisableLog() {
// log = logpkg.Disabled
}
func UseLogger(logger logpkg.Logger) {
// log = logger
}
type logClosure func() string
func (c logClosure) String() string {
return c()
}
func newLogClosure(c func() string) logClosure {
return logClosure(c)
}
type BlockProgressLogger struct {
sync.Mutex
nBlocks int64
nTx int64
lastTime time.Time
action string
}
func NewBlockProgressLogger(msg string) *BlockProgressLogger {
return &BlockProgressLogger{
lastTime: time.Now(),
action: msg,
}
}
func (b *BlockProgressLogger) LogBlockHeight(block *models.Block, qlen int, state State, d time.Duration) {
b.Lock()
defer b.Unlock()
b.nBlocks++
b.nTx += int64(len(block.Ops))
now := time.Now()
duration := now.Sub(b.lastTime)
if duration < time.Second*10 {
return
}
blockStr := "blocks"
if b.nBlocks == 1 {
blockStr = "block"
}
txStr := "transactions"
if b.nTx == 1 {
txStr = "transaction"
}
log.Infof("%s %d %s in %s (%d %s, height %d, %s, q=%d, t=%s, s=%s)",
b.action, b.nBlocks, blockStr, duration.Truncate(10*time.Millisecond),
b.nTx, txStr, block.Height,
block.Timestamp,
qlen, d, state)
b.nBlocks = 0
b.nTx = 0
b.lastTime = now
}
|
// Copyright (c) Alex Ellis 2017. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
package handlers
import (
"fmt"
"io/ioutil"
"net/http"
"net/url"
"github.com/gorilla/mux"
"github.com/openfaas/faas/gateway/metrics"
"github.com/openfaas/faas/gateway/queue"
)
// MakeQueuedProxy accepts work onto a queue
func MakeQueuedProxy(metrics metrics.MetricOptions, wildcard bool, canQueueRequests queue.CanQueueRequests, pathTransformer URLPathTransformer) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if r.Body != nil {
defer r.Body.Close()
}
body, err := ioutil.ReadAll(r.Body)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(err.Error()))
return
}
vars := mux.Vars(r)
name := vars["name"]
callbackURLHeader := r.Header.Get("X-Callback-Url")
var callbackURL *url.URL
if len(callbackURLHeader) > 0 {
urlVal, urlErr := url.Parse(callbackURLHeader)
if urlErr != nil {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(urlErr.Error()))
return
}
callbackURL = urlVal
}
req := &queue.Request{
Function: name,
Body: body,
Method: r.Method,
QueryString: r.URL.RawQuery,
Path: pathTransformer.Transform(r),
Header: r.Header,
Host: r.Host,
CallbackURL: callbackURL,
}
if err = canQueueRequests.Queue(req); err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
fmt.Println(err)
return
}
w.WriteHeader(http.StatusAccepted)
}
}
|
package scrape
import (
"encoding/json"
"errors"
"fmt"
"reflect"
"strings"
"time"
"github.com/slotix/dataflowkit/errs"
"github.com/slotix/dataflowkit/fetch"
"github.com/slotix/dataflowkit/splash"
"github.com/slotix/dataflowkit/utils"
"github.com/spf13/viper"
)
// UnmarshalJSON casts Request interface{} type to custom splash.Request{} type.
// If omited in Payload, Optional payload parameters initialized with default values.
// http://choly.ca/post/go-json-marshalling/
func (p *Payload) UnmarshalJSON(data []byte) error {
type Alias Payload
aux := &struct {
Request interface{} `json:"request"`
*Alias
}{
Alias: (*Alias)(p),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
request, err := p.initRequest("")
if err != nil {
return err
}
if aux.Request == nil {
return &errs.BadRequest{}
}
err = fillStruct(aux.Request.(map[string]interface{}), request)
if err != nil {
return err
}
p.Request = request
//init other fields
p.PayloadMD5 = utils.GenerateMD5(data)
if p.Format == "" {
p.Format = viper.GetString("FORMAT")
}
//if p.RetryTimes == 0 {
// p.RetryTimes = DefaultOptions.RetryTimes
//}
if p.FetchDelay == nil {
delay := time.Duration(viper.GetInt("FETCH_DELAY")) * time.Millisecond
p.FetchDelay = &delay
}
if p.RandomizeFetchDelay == nil {
rand := viper.GetBool("RANDOMIZE_FETCH_DELAY")
p.RandomizeFetchDelay = &rand
}
if p.Paginator != nil && p.Paginator.MaxPages == 0 {
p.Paginator.MaxPages = viper.GetInt("MAX_PAGES")
}
if p.PaginateResults == nil {
pag := viper.GetBool("PAGINATE_RESULTS")
p.PaginateResults = &pag
}
return nil
}
func (p *Payload) initRequest(newURL string) (fetch.FetchRequester, error) {
//fetcher type from Payload structure takes precedence over FETCHER_TYPE flag value
fetcherType := p.FetcherType
if fetcherType == "" {
fetcherType = viper.GetString("FETCHER_TYPE")
}
var URL string
if URL = newURL; URL == "" && p.Request != nil {
URL = p.Request.GetURL()
}
var request fetch.FetchRequester
switch strings.ToLower(fetcherType) {
case "splash":
if p.Request == nil {
request = &splash.Request{}
} else {
var infiniteScroll bool
if infiniteScroll = false; p.Paginator != nil && p.Paginator.InfiniteScroll {
infiniteScroll = true
}
request = &splash.Request{
URL: URL,
FormData: p.Request.GetFormData(),
UserToken: p.Request.GetUserToken(),
InfiniteScroll: infiniteScroll}
}
case "base":
if p.Request == nil {
request = &fetch.BaseFetcherRequest{}
} else {
request = &fetch.BaseFetcherRequest{
URL: URL,
FormData: p.Request.GetFormData(),
UserToken: p.Request.GetUserToken()}
}
default:
err := errors.New("invalid fetcher type specified")
logger.Error(err.Error())
return nil, err
}
return request, nil
}
//fillStruct fills s Structure with values from m map
func fillStruct(m map[string]interface{}, s interface{}) error {
for k, v := range m {
err := setField(s, k, v)
if err != nil {
//белать фиксить надо
if k == "regexp" {
return nil
}
return err
}
}
//}
return nil
}
func setField(obj interface{}, name string, value interface{}) error {
structValue := reflect.ValueOf(obj).Elem()
//Outgoing structs may contain fields in Title Case or in UPPERCASE - f.e. URL. So we should check if there are fields in Title case or upper case before skipping non-existent fields.
//It is unlikely there is a situation when there are several fields like url, Url, URL in the same structure.
fValues := []reflect.Value{
structValue.FieldByName(name),
structValue.FieldByName(strings.Title(name)),
structValue.FieldByName(strings.ToUpper(name)),
}
var structFieldValue reflect.Value
for _, structFieldValue = range fValues {
if structFieldValue.IsValid() {
break
}
}
// if !structFieldValue.IsValid() {
//skip non-existent fields
// return nil
//return fmt.Errorf("No such field: %s in obj", name)
// }
if !structFieldValue.CanSet() {
return fmt.Errorf("Cannot set field value: %s", name)
}
structFieldType := structFieldValue.Type()
val := reflect.ValueOf(value)
if structFieldType != val.Type() {
invalidTypeError := errors.New("Provided value type didn't match obj field type")
return invalidTypeError
}
structFieldValue.Set(val)
return nil
}
|
package rest
import (
"github.com/jinmukeji/jiujiantang-services/pkg/rest"
"github.com/kataras/iris/v12"
)
// GetVersion 获取服务版本信息
func (h *handler) GetVersion(ctx iris.Context) {
rest.WriteOkJSON(ctx, iris.Map{
"version": "2.0.0",
})
}
|
package aoc2015
import (
"bufio"
"fmt"
"strconv"
"strings"
"sync"
"github.com/pkg/errors"
)
// excludeTown returns the same slice of towns but without some specified value.
func excludeTown(from []town, except ...town) []town {
out := make([]town, 0, len(from)/2) // don't worry it'll realloc.
for _, vv := range from {
// check if vv equals any of except
isInExcept := false
for _, exception := range except {
if vv == exception {
isInExcept = true
}
}
if !isInExcept {
out = append(out, vv)
}
}
return out
}
// disconnected is a townDistance that is unfathomably large.
const disconnected townDistance = 4294967295
// town represents a town
type town string
// townPair represents a pair of two towns
// where a is lexographically less than or equal to b.
type townPair struct {
a, b town
}
// townDistance represents the distance between towns
type townDistance uint
// townGraph represents a graph of towns
type townGraph struct {
links map[townPair]townDistance // pls no negative distance
towns []town
}
// townPath is a path that connects multiple towns together.
// The path can only be added to, and all towns must be unique.
// The path depends on a graph.
type townPath struct {
raw []town
distance townDistance
graph *townGraph
remaining []town // the ones that could still be checked (from graph)
}
// townPathQueue is a queue of townPaths.
// The values in the queue are arranged by their distances,
// used to perform a breadth-first-search through the paths of a graph.
type townPathQueue []townPath
// uint64 converts townDistance to uint64
func (distance townDistance) uint64() uint64 {
return uint64(distance)
}
// newGraph creates a graph construct
func newTownGraph(scanner *bufio.Scanner) (townGraph, error) {
out := townGraph{links: make(map[townPair]townDistance), towns: make([]town, 0)}
// now fill it in for each line
for scanner.Scan() {
// assume format is
// `AlphaNumericStringA to AlphaNumericStringB = UnsignedInteger`
text := strings.Fields(scanner.Text())
if len(text) != 5 {
return townGraph{}, fmt.Errorf("could not parse %v", scanner.Text())
}
townOne, townTwo := town(text[0]), town(text[2])
dist, err := strconv.Atoi(text[4])
if err != nil {
return townGraph{}, errors.Wrapf(err, "could not convert %v in %v", text[4], scanner.Text())
}
if dist < 0 {
return townGraph{}, errors.Wrapf(err, "could not fathom distance %v in %v", dist, scanner.Text())
}
out.set(townOne, townTwo, townDistance(dist))
}
return out, nil
}
// set adds the edge that connects a and b with distance dist.
// Make sure that neither a nor b contain \x00.
// If a == b then it doesn't do anything.
func (graph *townGraph) set(a, b town, dist townDistance) {
if a == b {
return
}
if a > b {
a, b = b, a
}
graph.links[townPair{a, b}] = dist
writeA, writeB := true, true
for _, v := range graph.towns {
if v == a {
writeA = false
}
if v == b {
writeB = false
}
}
if writeA {
graph.towns = append(graph.towns, a)
}
if writeB {
graph.towns = append(graph.towns, b)
}
}
// get returns the distance between two towns.
// If a == b then return 0.
// If link doesn't exist then return disconnected.
func (graph townGraph) get(a, b town) townDistance {
if a == b {
return 0
}
if a > b {
a, b = b, a
}
if dist, ok := graph.links[townPair{a, b}]; ok {
return dist
}
return disconnected
}
// in returns true if town is in the graph's list of towns.
func (graph townGraph) in(town town) bool {
for _, tt := range graph.towns {
if tt == town {
return true
}
}
return false
}
// distance determines the distance returned after traversing all towns in path.
// Will also return a boolean signifying if path is a valid path.
// If path is not valid it will return disconnected and false.
// If path is empty it will return 0 and true.
func (graph townGraph) distance(path []town) (townDistance, bool) {
if len(path) == 0 {
return 0, true
}
if len(path) == 1 {
return 0, graph.in(path[0])
}
firstTwo := graph.get(path[0], path[1])
if len(path) == 2 {
return firstTwo, firstTwo != disconnected
}
remaining, areValid := graph.distance(path[1:])
if !areValid {
return disconnected, false
}
return firstTwo + remaining, true
}
// distanceSimple traces the distance returned after traversing all towns in path.
// Does not check if the path is even valid.
// If it isn't then distanceSimple may return a number that is at least disconnectedLink.
func (graph townGraph) distanceSimple(path []town) townDistance {
if len(path) <= 1 {
return 0 // there is no distance to be travelled
}
if len(path) == 2 {
return graph.get(path[0], path[1])
}
return graph.get(path[0], path[1]) + graph.distanceSimple(path[1:])
}
// permutations creates a channel that contains all the possible paths in the graph
// and will close said channel.
func (graph townGraph) permutations() <-chan []town {
permutate := func(c chan []town, inputs []town) {
output := make([]town, len(inputs))
copy(output, inputs)
c <- output
size := len(inputs)
p := make([]int, size+1)
for i := 0; i < size+1; i++ {
p[i] = i
}
for i := 1; i < size; {
p[i]--
j := 0
if i%2 == 1 {
j = p[i]
}
tmp := inputs[j]
inputs[j] = inputs[i]
inputs[i] = tmp
output := make([]town, len(inputs))
copy(output, inputs)
c <- output
for i = 1; p[i] == 0; i++ {
p[i] = i
}
}
}
c := make(chan []town)
go func(c chan []town) {
defer close(c)
towns := make([]town, len(graph.towns))
copy(towns, graph.towns)
permutate(c, towns)
}(c)
return c
}
// shortestPathPermutative returns the shortest path distance
// by checking through all permutations of the towns in the graph.
// This is an expensive operation.
// If gr has no valid paths it will return disconnected.
func (graph townGraph) shortestPathPermutative() townDistance {
allPermuts := graph.permutations()
record := disconnected
for path := range allPermuts {
// guaranteed to be valid.
if pathDist := graph.distanceSimple(path); pathDist < record {
record = pathDist
}
}
return record
}
// longestPathPermutative returns the longest path distance
// by checking through all permutations of the towns in the graph.
// This is an expensive operation.
// If gr has no valid paths it will return disconnected.
func (graph townGraph) longestPathPermutative() townDistance {
allPermuts := graph.permutations()
record := townDistance(0)
for path := range allPermuts {
// guaranteed to be valid
if pathDist := graph.distanceSimple(path); pathDist > record {
record = pathDist
}
}
if record == 0 {
return disconnected
}
return record
}
// shortestPathGreedyFrom returns the shortest path distance from some town.
// If town does not exist return MaxUint32.
// If the graph is not complete it may perform undefined behavior.
// This function has issues (see below).
//
// Rationale
//
// How about instead of permutating all values we do something a bit better...
// Suppose there are seven towns A to G that Santa has to go through.
// Suppose Santa would start at town A.
// From here, determine which town from the six left is the closest.
// Suppose town B is the closest. Then the distance travelled so far would be A+B.
// Continue with the other towns until all towns would have been used.
//
// The shortest travelled path from town A to all the other towns
// will be what shortestPathGreedyFrom will return
//
// Such a greedy algorithm may run into issues.
// Consider a graph that is defined by the following:
//
// A to B = 1
// A to C = 4
// A to D = 4
// B to C = 1
// B to D = 10
// C to D = 100
//
// Suppose we start at A.
// The shortest path from A would be A -> B -> C -> D, whose total distance is 102.
// This is not the best choice as it is possible to use A -> C -> B -> D,
// whose total distance is 15.
// If the path starts from C however, the path taken would have been C -> B -> A -> D,
// which would have a total distance of 6.
func (graph townGraph) shortestPathGreedyFrom(from town) townDistance {
// check first if town is in towns
if !graph.in(from) {
return disconnected
}
remaining := excludeTown(graph.towns, from)
current := from
var total townDistance
for len(remaining) > 0 {
// check distances from current to everything in remaining
recordDist, recordTown := disconnected, town("")
for _, eachDestination := range remaining {
if eachDistance := graph.get(current, eachDestination); eachDistance < recordDist {
recordDist, recordTown = eachDistance, eachDestination
}
}
// now recordTown sounds like the winner.
remaining = excludeTown(remaining, recordTown)
current = recordTown
total += recordDist
}
return total
}
// shortestPathGreedy checks all towns in a graph's towns list to see which is the shortest to go to.
func (graph townGraph) shortestPathGreedy() townDistance {
record := disconnected
for _, town := range graph.towns {
if pathDistance := graph.shortestPathGreedyFrom(town); pathDistance < record {
record = pathDistance
}
}
return record
}
// longestPathGreedyFrom is like shortestPathGreedyFrom but returns
// the longest path it could trace from town using a greedy algorithm
func (graph townGraph) longestPathGreedyFrom(from town) townDistance {
// check first if town is in towns
if !graph.in(from) {
return disconnected
}
remaining := excludeTown(graph.towns, from)
current := from
var total townDistance
for len(remaining) > 0 {
// check distances from current to everything in remaining
recordDist, recordTown := townDistance(0), town("")
for _, eachDestination := range remaining {
if eachDistance := graph.get(current, eachDestination); eachDistance > recordDist {
recordDist, recordTown = eachDistance, eachDestination
}
}
// now recordTown sounds like the winner.
remaining = excludeTown(remaining, recordTown)
current = recordTown
total += recordDist
}
return total
}
// longestPathGreedy checks all towns in a graph's towns list to see which is the shortest to go to.
func (graph townGraph) longestPathGreedy() townDistance {
var record townDistance
for _, town := range graph.towns {
if pathDistance := graph.longestPathGreedyFrom(town); pathDistance > record {
record = pathDistance
}
}
return record
}
// shortestPathCleverFrom determines the shortest path from a town
// using an algorithm a bit smarter than the previous one,
// although this algorithm is still exhaustive.
// This uses the concept of townPaths and a townPathQueue to record all paths.
// If there is no valid path from town, return disconnected.
func (graph townGraph) shortestPathCleverFrom(town town) townDistance {
// we should be able to solve this...
allPaths := newTownPathQueue()
// let's use tg.towns...
for _, tt := range graph.towns {
path, err := newTownPath(town, tt, &graph)
if err != nil { // maybe same town? or no direct path?
continue
}
allPaths.push(path)
}
for len(allPaths) > 0 {
path, _ := allPaths.pop() // guaranteed no error
if len(path.remaining) == 0 {
// i guess that's our winner
return path.distance
}
// otherwise let's consider all remainings
for _, next := range path.remaining {
nextPath, err := path.add(next)
if err != nil {
continue // there should be no error...
}
allPaths.push(nextPath)
}
}
return disconnected // well we tried...
}
// longestPathNotSoCleverFrom computes the longest path from a town.
func (graph townGraph) longestPathNotSoCleverFrom(town town) townDistance {
allPaths := newTownPathQueue()
// let's use tg.towns...
for _, tt := range graph.towns {
path, err := newTownPath(town, tt, &graph)
if err != nil { // maybe same town? or no direct path?
continue
}
allPaths.push(path)
}
var record townDistance
for len(allPaths) > 0 {
path, _ := allPaths.pop() // guaranteed no error
if len(path.remaining) == 0 {
// this is part of what makes this algo take so long.
// we're exhausting all potential paths
// from shortest to longest.
if path.distance > record {
record = path.distance
}
}
// otherwise let's consider all remainings
for _, next := range path.remaining {
nextPath, err := path.add(next)
if err != nil {
continue // there should be no error...
}
allPaths.push(nextPath)
}
}
if record == 0 {
return disconnected // there are no paths...
}
return record
}
// shortestPathClever spins up shortestPathCleverFrom for all towns in a graph
func (graph townGraph) shortestPathClever() townDistance {
record := disconnected
for _, town := range graph.towns {
if pathDistance := graph.shortestPathCleverFrom(town); pathDistance < record {
record = pathDistance
}
}
return record
}
// longestPathNotSoClever spins up longestPathCleverFrom for all towns in a graph
func (graph townGraph) longestPathNotSoClever() townDistance {
var record townDistance
for _, town := range graph.towns {
if pathDistance := graph.longestPathNotSoCleverFrom(town); pathDistance > record {
record = pathDistance
}
}
return record
}
// newTownPath creates a new townPath construct that starts with two towns.
// Will return an error if a and b are not towns in townGraph.
// Will also return an error if a and b are equal.
func newTownPath(a, b town, graph *townGraph) (townPath, error) {
if a == b {
return townPath{}, fmt.Errorf("could not create path to itself")
}
dist, isValid := graph.distance([]town{a, b})
if !isValid {
return townPath{}, fmt.Errorf("could not create path between %v and %v", a, b)
}
return townPath{raw: []town{a, b}, distance: dist, graph: graph, remaining: excludeTown(graph.towns, a, b)}, nil
}
// copy copies the townPath construct
func (path townPath) copy() townPath {
output := townPath{}
output.raw = make([]town, len(path.raw))
copy(output.raw, path.raw)
output.distance = path.distance
output.graph = path.graph
output.remaining = make([]town, len(path.remaining))
copy(output.remaining, path.remaining)
return output
}
func (path *townPath) tail() town {
return path.raw[len(path.raw)-1]
}
// add adds a town to a town path and returns said path.
// Will return an error if a path between the tail and the next town could not be found.
// Will also return an error if next is already in the path.
func (path townPath) add(next town) (townPath, error) {
// check if tp.remaining is empty
if len(path.remaining) == 0 {
return townPath{}, fmt.Errorf("but there are no potential towns left")
}
// check if next is in raw
for _, town := range path.raw {
if town == next {
return townPath{}, fmt.Errorf("%v already in path", next)
}
}
// check distance bet. last() and next
tail := path.tail()
nextDistance := path.graph.get(tail, next)
if nextDistance == disconnected {
return townPath{}, fmt.Errorf("no link between %v and %v", tail, next)
}
newTp := path.copy()
newTp.raw = append(newTp.raw, next)
newTp.distance += nextDistance
newTp.remaining = excludeTown(newTp.remaining, next)
return newTp, nil
}
// newTownPathQueue creates a townPathQueue construct.
func newTownPathQueue() townPathQueue {
return make(townPathQueue, 0) // it's that simple
}
// push pushes a townPath to the queue.
func (queue *townPathQueue) push(path townPath) {
if len(*queue) == 0 {
*queue = append(*queue, path.copy())
return
}
ind := 0
for ind < len(*queue) && (*queue)[ind].distance < path.distance {
ind++
}
// append a value to tpq. Just any value..
*queue = append(*queue, townPath{})
// move all elements...
for ii := len(*queue) - 1; ii > ind; ii-- {
(*queue)[ii] = (*queue)[ii-1]
}
(*queue)[ind] = path
}
// pop pops a value from the queue.
// Will return an error if the queue is empty.
func (queue *townPathQueue) pop() (townPath, error) {
if len(*queue) == 0 {
return townPath{}, errors.New("queue is empty")
}
out := (*queue)[0]
*queue = (*queue)[1:]
return out, nil
}
// Day09 solves the ninth day puzzle "All in a Single Night".
//
// Input
//
// A file describing the distances between any two towns
// of a graph containing seven towns. For example:
//
// Faerun to Norrath = 129
// Faerun to Tristram = 58
// Faerun to AlphaCentauri = 13
// Faerun to Arbre = 24
// Faerun to Snowdin = 60
// Faerun to Tambi = 71
// Faerun to Straylight = 67
// Norrath to Tristram = 142
// Norrath to AlphaCentauri = 15
// Norrath to Arbre = 135
// Norrath to Snowdin = 75
// Norrath to Tambi = 82
// Norrath to Straylight = 54
// Tristram to AlphaCentauri = 118
// Tristram to Arbre = 122
// Tristram to Snowdin = 103
// Tristram to Tambi = 49
// Tristram to Straylight = 97
// AlphaCentauri to Arbre = 116
// AlphaCentauri to Snowdin = 12
// AlphaCentauri to Tambi = 18
// AlphaCentauri to Straylight = 91
// Arbre to Snowdin = 129
// Arbre to Tambi = 53
// Arbre to Straylight = 40
// Snowdin to Tambi = 15
// Snowdin to Straylight = 99
// Tambi to Straylight = 70
//
// The above is my complete input.
// It is guaranteed that the distances betwen any two towns is given.
// It is also guaranteed that the distances between any two towns is no more than 200.
//
func Day09(input string) (answer1, answer2 string, err error) {
scanner := bufio.NewScanner(strings.NewReader(input))
santasGraph, err := newTownGraph(scanner)
if err != nil {
return
}
var wg sync.WaitGroup
wg.Add(2)
go func() {
answer1 = strconv.FormatUint(santasGraph.shortestPathClever().uint64(), 10)
wg.Done()
}()
go func() {
// use permutative. clever's not really clever.
answer2 = strconv.FormatUint(santasGraph.longestPathPermutative().uint64(), 10)
wg.Done()
}()
wg.Wait()
return
}
|
package main
import (
"fmt"
)
func main() {
xi := []int{2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30}
s := sum(xi...)
fmt.Println("The total amount is:", s)
}
func sum(s ...int) int {
fmt.Println(s)
sum := 0
for i, v := range s {
sum += v
fmt.Println("Index position number:", i, "Now adding ", v, "Total became: ", sum)
}
fmt.Println("The total amount is:", sum)
return sum
}
|
/*Package openid implements web service middlewares for authenticating identities represented by
OpenID Connect (OIDC) ID Tokens.
For details on OIDC go to http://openid.net/specs/openid-connect-core-1_0.html
The middlewares will: extract the ID Token from the request; retrieve the OIDC provider (OP)
configuration and signing keys; validate the token and provide the user identity and claims to the
underlying web service.
The Basics
At the core of this package are the Authenticate and AuthenticateUser middlewares. To use either one
of them you will need an instance of the Configuration type, to create that you use NewConfiguration.
func Authenticate(conf *Configuration, h http.Handler) http.Handler
func AuthenticateUser(conf *Configuration, h UserHandler) http.Handler
NewConfiguration(options ...option) (*Configuration, error)
// options:
func ErrorHandler(eh ErrorHandlerFunc) func(*Configuration) error
func ProvidersGetter(pg GetProvidersFunc) func(*Configuration) error
func HTTPGetter(hg HTTPGetFunc) func(*Configuration) error
// extension points:
type ErrorHandlerFunc func(error, http.ResponseWriter, *http.Request) bool
type GetProvidersFunc func() ([]Provider, error)
type HTTPGetFunc func(r *http.Request, url string) (*http.Response, error)
The Example below demonstrates these elements working together.
Token Parsing
Both Authenticate and AuthenticateUser middlewares expect the incoming requests to have an HTTP
Authorization header with the content 'Bearer [idToken]' where [idToken] is a valid ID Token issued by
an OP. For instance:
Authorization: Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6...
By default, requests that do not contain an Authorization header with this content will not be forwarded
to the next HTTP handler in the pipeline, instead they will fail back to the client with HTTP status
400/Bad Request.
Token Validation
Once parsed the ID Token will be validated:
1) Is the token a valid jwt?
2) Is the token issued by a known OP?
3) Is the token issued for a known client?
4) Is the token valid at the time ('not use before' and 'expire at' claims)?
5) Is the token signed accordingly?
The signature validation is done with the public keys retrieved from the jwks_uri published by the OP in
its OIDC metadata (https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata).
The token's issuer and audiences will be verified using a collection of the type Provider. This
collection is retrieved by calling the implementation of the function GetProvidersFunc registered with
the Configuration.
If the token issuer matches the Issuer of any of the providers and the token audience matches at least
one of the ClientIDs of the respective provider then the token is considered valid.
func myGetProviders() ([]openid.Provider, error) {
p, err := openid.NewProvider("https://accounts.google.com",
[]string{"407408718192.apps.googleusercontent.com"})
// ....
return []openid.Provider{p}, nil
}
c, _ := openid.NewConfiguration(openid.ProvidersGetter(myGetProviders))
In code above only tokens with Issuer claim ('iss') https://accounts.google.com and Audiences claim
('aud') containing "407408718192.apps.googleusercontent.com" can be valid.
By default, when the token validation fails for any reason the requests will not be forwarded to the next
handler in the pipeline, instead they will fail back to the client with HTTP status 401/Unauthorized.
Error Handling
The default behavior of the Authenticate and AuthenticateUser middlewares upon error conditions is:
the execution pipeline is stopped (the next handler will not be executed), the response will contain
status 400 when a token is not found and 401 when it is invalid, and the response will also contain the
error message.
This behavior can be changed by implementing a function of type ErrorHandlerFunc and registering it
using ErrorHandler with the Configuration.
type ErrorHandlerFunc func(error, http.ResponseWriter, *http.Request) bool
func ErrorHandler(eh ErrorHandlerFunc) func(*Configuration) error
For instance:
func myErrorHandler(e error, w http.ResponseWriter, r *http.Request) bool {
fmt.Fprintf(w, e.Error())
return false
}
c, _ := openid.NewConfiguration(openid.ProvidersGetter(myGetProviders),
openid.ErrorHandler(myErrorHandler))
In the code above myErrorHandler adds the error message to the response and let the execution
continue to the next handler in the pipeline (returning false) for all error types.
You can use this extension point to fine tune what happens when a specific error is returned by your
implementation of the GetProvidersFunc or even for the error types and codes exported by this
package:
type ValidationError struct
type ValidationErrorCode uint32
type SetupError struct
type SetupErrorCode uint32
Authenticate vs AuthenticateUser
Both middlewares Authenticate and AuthenticateUser behave exactly the same way when it comes to
parsing and validating the ID Token. The only difference is that AuthenticateUser will forward the
information about the user's identity from the ID Token to the next handler in the pipeline.
If your service does not need to know the identity of the authenticated user then Authenticate will
suffice, otherwise your choice is AuthenticateUser.
In order to receive the User information from the AuthenticateUser the next handler in the pipeline
must implement the interface UserHandler with the following function:
ServeHTTPWithUser(*User, http.ResponseWriter, *http.Request)
You can also make use of the function adapter UserHandlerFunc as shown in the example below:
func myHandlerWithUser(u *openid.User, w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Authenticated! The user is %+v.", u)
}
http.Handle("/user", openid.AuthenticateUser(c, openid.UserHandlerFunc(myHandlerWithUser)))
*/
package openid
|
// Copyright 2021 PingCAP, Inc. Licensed under Apache-2.0.
package build
import (
"strings"
"testing"
"github.com/stretchr/testify/require"
)
func TestInfo(t *testing.T) {
info := Info()
lines := strings.Split(info, "\n")
require.Regexp(t, "^Release Version", lines[0])
require.Regexp(t, "^Git Commit Hash", lines[1])
require.Regexp(t, "^Git Branch", lines[2])
require.Regexp(t, "^Go Version", lines[3])
require.Regexp(t, "^UTC Build Time", lines[4])
}
func TestLogInfo(*testing.T) {
LogInfo(BR)
LogInfo(Lightning)
}
|
package client
import (
"github.com/giantswarm/api-schema"
)
type SearchRequest struct {
Usernames []string `json:"usernames"`
Emails []string `json:"emails"`
UserIDs []string `json:"user_ids"`
}
type SearchResult struct {
Size int `json:"size"`
Items []User `json:"items"`
}
func (c *Client) Search(req SearchRequest) (SearchResult, error) {
zeroVal := SearchResult{}
resp, err := c.postSchemaJSON("/user/search", req)
if err != nil {
return zeroVal, Mask(err)
}
// Check the status is kind of expected
if err := resp.EnsureStatusCodes(apischema.STATUS_CODE_DATA); err != nil {
return zeroVal, Mask(err)
}
var result SearchResult
if err := resp.UnmarshalData(&result); err != nil {
return zeroVal, Mask(err)
}
return result, nil
}
func (c *Client) SearchByUserIDs(userIDs []string) ([]User, error) {
result, err := c.Search(SearchRequest{UserIDs: userIDs})
if err != nil {
return nil, Mask(err)
}
return result.Items, nil
}
func (c *Client) SearchByUsername(username string) (User, error) {
zeroValue := User{}
result, err := c.Search(SearchRequest{Usernames: []string{username}})
if err != nil {
return zeroValue, Mask(err)
}
if len(result.Items) == 0 {
return zeroValue, Mask(ErrNotFound)
}
if len(result.Items) > 1 {
return zeroValue, Mask(ErrUnexpectedResponse)
}
return result.Items[0], nil
}
func (c *Client) SearchByEmail(email string) (User, error) {
zeroValue := User{}
result, err := c.Search(SearchRequest{Emails: []string{email}})
if err != nil {
return zeroValue, Mask(err)
}
if len(result.Items) == 0 {
return zeroValue, Mask(ErrNotFound)
}
if len(result.Items) > 1 {
return zeroValue, Mask(ErrUnexpectedResponse)
}
return result.Items[0], nil
}
|
package main
import (
"encoding/json"
"net/http"
"fmt"
)
type Message struct {
Text string
}
type Job struct {
Title string
City string
}
func main() {
http.Handle("/", http.FileServer(http.Dir("./static")))
http.HandleFunc("/about/", about)
http.HandleFunc("/api/jobs/", jobs)
http.ListenAndServe(":8080", nil)
}
func about(w http.ResponseWriter, r *http.Request) {
m := Message{"Welcome to Rest Services implemented with GO"}
b, err := json.Marshal(m)
checkError(err)
w.Write(b)
}
func jobs(w http.ResponseWriter, r *http.Request) {
keyword := r.FormValue("keyword")
company := r.FormValue("company")
xml := getRssFeed(keyword, company)
rss := parseXml(xml)
jobDTOs := calculateJobsPerCity(rss.Channel)
fmt.Println("found jobs ", len(jobDTOs))
b, err := json.Marshal(jobDTOs)
checkError(err)
w.Write(b)
}
func checkError(err error) {
if err != nil {
panic(err)
}
}
|
package main
import (
"flag"
"fmt"
"math/rand"
"os"
"strconv"
)
func main() {
filePath := flag.String("file", "../../files/input.txt", "where you want to save your file")
num := flag.Int("n", 10000, "amount of numbers you want to generate to the file")
flag.Parse()
generateFile(*filePath, *num)
}
// Generate random numbers which are separated by "\t"
func generateFile(filePath string, num int) {
f, _ := os.OpenFile(filePath, os.O_CREATE|os.O_RDWR, 0777)
defer f.Close()
var s string
for i := 0; i < num; i++ {
s = strconv.Itoa(rand.Intn(20000)) + "\t"
f.WriteString(s)
}
fmt.Printf("Exported file %s\n", filePath)
}
|
package storage
import (
"context"
"time"
"github.com/mongodb/mongo-go-driver/mongo"
)
// MongoStorage implements our storage interface
type MongoStorage struct {
*mongo.Client
DB string
Collection string
}
// NewMongoStorage initializes a MongoStorage
func NewMongoStorage(ctx context.Context, connection, db, collection string) (*MongoStorage, error) {
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
client, err := mongo.Connect(ctx, "mongodb://localhost")
if err != nil {
return nil, err
}
ms := MongoStorage{
Client: client,
DB: db,
Collection: collection,
}
return &ms, nil
}
|
package main
import (
"bytes"
"context"
"fmt"
"image"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/BenLubar/nodejs-roundtripper"
"github.com/gopherjs/gopherjs/js"
"github.com/karlseguin/ccache"
"golang.org/x/net/html"
"golang.org/x/net/html/atom"
)
const debug = true
var nconf = js.Module.Get("parent").Call("require", "nconf")
var winston = js.Module.Get("parent").Call("require", "winston")
var lru = ccache.New(ccache.Configure())
var client = &http.Client{
Transport: roundtripper.RoundTripper,
}
func parse(src string) string {
nodes, err := html.ParseFragment(strings.NewReader(src), &html.Node{
Type: html.ElementNode,
Data: "div",
DataAtom: atom.Div,
})
if err != nil {
return src
}
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
var wg sync.WaitGroup
wg.Add(len(nodes))
for _, n := range nodes {
go parseNode(ctx, &wg, n)
}
wg.Wait()
var buf bytes.Buffer
for _, n := range nodes {
err = html.Render(&buf, n)
if err != nil {
return src
}
}
return buf.String()
}
func parseNode(ctx context.Context, wg *sync.WaitGroup, n *html.Node) {
defer wg.Done()
for {
if n.Type == html.ElementNode && n.DataAtom == atom.Img {
var src, width, height string
for _, a := range n.Attr {
switch a.Key {
case "src":
src = a.Val
case "width":
width = a.Val
case "height":
height = a.Val
}
}
_, err := strconv.Atoi(width)
if err == nil {
_, err = strconv.Atoi(height)
}
if err != nil {
wg.Add(1)
go setSize(ctx, wg, n, src)
}
}
if n.FirstChild != nil {
n = n.FirstChild
} else {
p := n
for p != nil && p.NextSibling == nil {
p = p.Parent
}
if p == nil {
break
}
n = p.NextSibling
}
}
}
func setSize(ctx context.Context, wg *sync.WaitGroup, n *html.Node, src string) {
defer wg.Done()
u, err := url.Parse(nconf.Call("get", "url").String())
if err != nil {
return
}
originalHost := u.Host
originalPath := u.Path
u, err = u.Parse(src)
if err != nil {
return
}
cleanPath := path.Clean(u.Path)
if u.Scheme != "http" && u.Scheme != "https" {
return
}
if strings.HasSuffix(u.Path, ".php") || strings.HasSuffix(u.Path, ".svg") {
return
}
src = u.String()
item, err := lru.Fetch(src, time.Minute*10, func() (interface{}, error) {
ch := make(chan image.Config, 1)
go func() {
if u.Host == originalHost {
if uploadURL := nconf.Call("get", "upload_url").String(); strings.HasPrefix(cleanPath, uploadURL) {
localPath := filepath.Join(nconf.Call("get", "base_dir").String(), nconf.Call("get", "upload_path").String(), strings.TrimPrefix(cleanPath, uploadURL))
f, err := os.Open(localPath)
if err != nil {
if debug {
winston.Call("warn", fmt.Sprintf("[nodebb-plugin-image-size] os.Open %q %v", localPath, err))
}
ch <- image.Config{}
return
}
defer f.Close()
config, _, err := image.DecodeConfig(f)
if err != nil {
if debug {
winston.Call("warn", fmt.Sprintf("[nodebb-plugin-image-size] image.DecodeConfig %q %v", localPath, err))
}
ch <- image.Config{}
return
}
ch <- config
return
}
if strings.HasPrefix(cleanPath, originalPath) {
ch <- image.Config{}
return
}
}
req, err := http.NewRequest("GET", src, nil)
if err != nil {
if debug {
winston.Call("warn", fmt.Sprintf("[nodebb-plugin-image-size] http.NewRequest %q %v", src, err))
}
ch <- image.Config{}
return
}
req = req.WithContext(ctx)
req.Header.Set("Accept", "image/*")
req.Header.Set("User-Agent", "nodebb-plugin-image-size/0.0 (+https://github.com/BenLubar/nodebb-plugin-image-size)")
resp, err := client.Do(req)
if err != nil {
if debug {
winston.Call("warn", fmt.Sprintf("[nodebb-plugin-image-size] client.Do %q %v", src, err))
}
ch <- image.Config{}
return
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
if debug {
winston.Call("warn", fmt.Sprintf("[nodebb-plugin-image-size] response status %q %s", src, resp.Status))
}
ch <- image.Config{}
return
}
config, _, err := image.DecodeConfig(resp.Body)
if err != nil {
if debug {
winston.Call("warn", fmt.Sprintf("[nodebb-plugin-image-size] image.DecodeConfig %q %v", src, err))
}
ch <- image.Config{}
return
}
ch <- config
}()
select {
case config := <-ch:
return config, nil
case <-ctx.Done():
if debug {
winston.Call("warn", fmt.Sprintf("[nodebb-plugin-image-size] timed out: %q", src))
}
return image.Config{}, nil
}
})
if err != nil {
// nothing we can do
return
}
config := item.Value().(image.Config)
if config.Width == 0 || config.Height == 0 {
return
}
for i, a := range n.Attr {
switch a.Key {
case "width":
n.Attr[i].Val = strconv.Itoa(config.Width)
config.Width = 0
case "height":
n.Attr[i].Val = strconv.Itoa(config.Height)
config.Height = 0
}
}
if config.Width != 0 {
n.Attr = append(n.Attr, html.Attribute{
Key: "width",
Val: strconv.Itoa(config.Width),
})
}
if config.Height != 0 {
n.Attr = append(n.Attr, html.Attribute{
Key: "height",
Val: strconv.Itoa(config.Height),
})
}
}
|
package handlers
import (
"encoding/json"
"net/http"
)
func readiness(w http.ResponseWriter, r *http.Request) {
status := struct {
Status string
}{
Status: "OK",
}
json.NewEncoder(w).Encode(status)
}
|
package main
import (
"fmt"
h "github.com/gb_home/hw2/helper"
prime "github.com/gb_home/hw2/primeNumber"
)
func main() {
fmt.Println("Написать функцию, которая определяет, четное ли число.")
fmt.Println("isMod(4):", h.IsMod(4))
fmt.Println("isMod(7):", h.IsMod(7))
fmt.Println("Написать функцию, которая определяет, делится ли число без остатка на 3.")
fmt.Println("isDivide(9):", h.IsDivide(9))
fmt.Println("isDivide(8):", h.IsDivide(8))
fmt.Println("Написать функцию, которая последовательно выводит на экран 100 первых чисел Фибоначчи, начиная от 0.")
h.CreateFib()
fmt.Println("Заполнить массив из 100 элементов различными простыми числами.")
prime.CreatePrimeNum()
}
|
package providers
import (
"database/sql"
"fmt"
_ "github.com/go-sql-driver/mysql"
"log"
"os"
// "time"
)
var (
db *sql.DB
)
const (
ADMIN = "a"
MODER = "m"
USER = "u"
NOUSER = "n"
)
//Define modules behaviors:
type (
UserTemplate struct {
Name, Phone, Email, Password_hash, Approve_token string
Updated int64
}
User struct {
User_id int64
Name string
Email string
Phone string
Messenger string
Auth_key string
Password_hash string
Approve_token string
Picture string
Birthday string
Updated int64
Lastlogin int64
Roles string
}
Storage interface {
NewConnection(drv, dsn string) (*sql.DB, error)
UpdateLastLogintime(db *sql.DB, user_id int64) (err error)
UpdateProfileTime(db *sql.DB) (err error)
// mwAdmin(): (user_id>0) => ->-> "/admin"
GetUserById(db *sql.DB, user_id int64) (User)
GetUsersPaged(db *sql.DB, limit int, offset int) ([]User, error) //SELECT * FROM users ORDER BY id DESC LIMIT 10 OFFSET 5
// mwGuest(): (user_id>0) => ->-> "/user"
// mwAccount(): (user_id==0) => ->-> "/guest/signup"
// /#signup:GET - Форма регистрации пользователя сайта PhotoSet,
// /guest/signup:POST - Регистрация пользователя сайта PhotoSet,
NewUserWithApproveToken(db *sql.DB, u User) (newApproveToken string,lastId int64)
// /guest/approvement:GET - Подтверждение почты или телефона пользователя,
ApproveNewUser(db *sql.DB, email ,link, setRole string) int64
// /guest/auth/@guestlink:GET - Форма ввода телефона пользователя перешедшего по ссылке гостя.
// /guest/login:GET|POST - Логин - аутентификация и авторизация пользователя,
IsUserByPhonenumber(db *sql.DB, phone string) int64
GetUserByEmailOnly(db *sql.DB, email string) User
GetUserByEmailRoles(db *sql.DB, email, roles string) User
// /guest/password/request:GET - Запрос восстановления пароля пользователя,
// /guest/password/validation/@link:GET - Подтверждение восстановления пароля пользователя,
GetRecoverPasswordLink(db *sql.DB, recoverChannel string) (recoverApproveToken string, err error)
ApproveRecoverPassword(db *sql.DB, link string) (User, error)
// /user/signout/request:GET - Запрос удаления аккаунта.
// /user/signout/validation/@link:GET - Подтверждение удаления аккаунта.
DeleteUser(db *sql.DB, user_id int64) (deleteApproveToken string, err error)
ApproveDeleteUser(db *sql.DB, link string) (User, error)
// /user/gdprinfo:GET - GDPR Информация о аккаунте.
GetGdprUserInfo(db *sql.DB, user_id int64) (User, err error)
// /user/logout:GET|POST - Выход.
// /user/profile:GET|POST - Редактирование профиля пользователя.
UpdateUserInfo(db *sql.DB, u User) (rowCnt int64)
}
)
func NewConnection(drv, dsn string) (*sql.DB, error) {
db, err := sql.Open(drv, dsn)
if err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
os.Exit(1)
}
if err = db.Ping(); err != nil {
os.Exit(1)
}
return db, nil
}
func IsUserByPhonenumber(db *sql.DB, phone string) int64 {
sqlStatement := `SELECT user_id FROM user WHERE phone = ?`
var u User
row := db.QueryRow(sqlStatement, phone)
err := row.Scan(
&u.User_id,
)
switch err {
case sql.ErrNoRows:
return 0
case nil:
return u.User_id
default:
fmt.Fprintf(os.Stderr, "%s\n", err)
os.Exit(1)
}
return 0
}
func GetUserById(db *sql.DB, id int64) User {
sqlStatement := `SELECT user_id, name, email, phone, messenger, auth_key, password_hash, approve_token, picture, birthday, updated, lastlogin, roles
FROM user WHERE user_id = ?`
var u User
row := db.QueryRow(sqlStatement, id)
err := row.Scan(
&u.User_id,
&u.Name,
&u.Email,
&u.Phone,
&u.Messenger,
&u.Auth_key,
&u.Password_hash,
&u.Approve_token,
&u.Picture,
&u.Birthday,
&u.Updated,
&u.Lastlogin,
&u.Roles,
)
switch err {
case sql.ErrNoRows:
fmt.Println("No rows were returned!")
return u
case nil:
return u
default:
fmt.Fprintf(os.Stderr, "%s\n", err)
os.Exit(1)
}
return u
}
func GetUserByEmailOnly(db *sql.DB, email string) User {
sqlStatement := `SELECT user_id, name, email, phone, messenger, auth_key, password_hash, approve_token, picture, birthday, updated, lastlogin, roles
FROM user WHERE email = ?`
var u User
row := db.QueryRow(sqlStatement, email)
err := row.Scan(
&u.User_id,
&u.Name,
&u.Email,
&u.Phone,
&u.Messenger,
&u.Auth_key,
&u.Password_hash,
&u.Approve_token,
&u.Picture,
&u.Birthday,
&u.Updated,
&u.Lastlogin,
&u.Roles,
)
switch err {
case sql.ErrNoRows:
fmt.Println("No rows were returned!")
return u
case nil:
return u
default:
fmt.Fprintf(os.Stderr, "%s\n", err)
os.Exit(1)
}
return u
}
func GetUserByEmailRoles(db *sql.DB, email, roles string) User {
sqlStatement := `SELECT user_id, name, email, phone, messenger, auth_key, password_hash, approve_token, picture, birthday, updated, lastlogin, roles
FROM user WHERE email = ? and roles = ?`
var u User
row := db.QueryRow(sqlStatement, email, roles)
err := row.Scan(
&u.User_id,
&u.Name,
&u.Email,
&u.Phone,
&u.Messenger,
&u.Auth_key,
&u.Password_hash,
&u.Approve_token,
&u.Picture,
&u.Birthday,
&u.Updated,
&u.Lastlogin,
&u.Roles,
)
switch err {
case sql.ErrNoRows:
fmt.Println("No rows were returned!")
return u
case nil:
return u
default:
fmt.Fprintf(os.Stderr, "%s\n", err)
os.Exit(1)
}
return u
}
func GetAllUsers(db *sql.DB) ([]*User, error) {
rows, err := db.Query("SELECT user_id, name, email, phone, messenger, auth_key, password_hash, approve_token, picture, birthday, updated, lastlogin, roles FROM user")
if err != nil {
// return nil, err
fmt.Fprintf(os.Stderr, "%s\n", err)
os.Exit(1)
}
defer rows.Close()
users := make([]*User, 0)
for rows.Next() {
u := new(User)
err := rows.Scan(
&u.User_id,
&u.Name,
&u.Email,
&u.Phone,
&u.Messenger,
&u.Auth_key,
&u.Password_hash,
&u.Approve_token,
&u.Picture,
&u.Birthday,
&u.Updated,
&u.Lastlogin,
&u.Roles,
)
if err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
os.Exit(1)
}
users = append(users, u)
}
if err = rows.Err(); err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
os.Exit(1)
}
return users, nil
}
// Paged data using:
// storage as providers imported!!!
// limit, _ := strconv.ParseInt(c.DefaultQuery("limit", "100"), 10, 64)
// offset, _ := strconv.ParseInt(c.DefaultQuery("offset", "0"), 10, 64)
// users, err := storage.GetUsersPaged(db, limit, offset)
func GetUsersPaged(db *sql.DB, limit int64, offset int64) ([]*User, error) {
rows, err := db.Query("SELECT user_id, name, email, password, roles FROM user ORDER BY user_id DESC LIMIT ? OFFSET ?", limit, offset)
if err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
os.Exit(1)
}
defer rows.Close()
users := make([]*User, 0)
for rows.Next() {
u := new(User)
err := rows.Scan(
&u.User_id,
&u.Name,
&u.Email,
&u.Password_hash,
&u.Roles,
)
if err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
os.Exit(1)
}
users = append(users, u)
}
if err = rows.Err(); err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
os.Exit(1)
}
return users, nil
}
// Return rows affected only!!!
func ApproveNewUser(db *sql.DB, email , token, setRole string) int64 {
stmt, err := db.Prepare(`update user set approve_token = "", roles = ? where email = ? and approve_token = ?`)
defer stmt.Close()
if err != nil {
log.Panicln(err)
}
res, err := stmt.Exec(setRole, email, token)
if err != nil {
log.Panicln(err)
}
rowCnt, err := res.RowsAffected()
if err != nil {
log.Panicln(err)
}
return rowCnt
}
func NewUserWithApproveToken(db *sql.DB, u User) (newApproveToken string, lastId int64){
stmt, err := db.Prepare(`
INSERT INTO user(user_id, name, email, phone, password_hash, approve_token, updated, roles)
VALUES
(null, ?, ?, ?, ?, ?, ?, ?);
`)
defer stmt.Close()
if err != nil {
log.Panicln(err)
}
res, err := stmt.Exec(u.Name, u.Email, u.Phone, u.Password_hash, u.Approve_token, u.Updated, u.Roles)
if err != nil {
log.Panicln(err)
}
lastId, err = res.LastInsertId()
if err != nil {
log.Panicln(err)
}
// rowCnt, err := res.RowsAffected()
// if err != nil {
// log.Panicln(err)
// }
// log.Println("RowsAffected():",rowCnt)
return u.Approve_token, lastId
}
func UpdateUserInfo(db *sql.DB, u User) (rowCnt int64){
stmt, err := db.Prepare(`UPDATE user SET
name=?, email= ?, phone=?, password_hash=?, picture=?, birthday=?, updated=? where user_id=?;
`)
defer stmt.Close()
if err != nil {
log.Panicln(err)
}
res, err := stmt.Exec(u.Name, u.Email, u.Phone, u.Password_hash, u.Picture, u.Updated, u.User_id)
if err != nil {
log.Panicln(err)
}
// lastId, err = res.LastInsertId()
// if err != nil {
// log.Panicln(err)
// }
rowCnt, err = res.RowsAffected()
if err != nil {
log.Panicln(err)
}
return rowCnt
}
|
/**
*packet def
*[LEN_16|CMDID_16|ID_32|FROM_16|TO_16|VCODE_16|PV_8|CSRC_8] = 16
*
*packet define file description
* @type = msg,entity
*<type = msg,cmd =110>
*<field =nid,type=uint32 desc=""/>
**/
package packet
import (
"encoding/binary"
"fmt"
"github.com/colefan/gsgo/netio/iobuffer"
)
const (
PACKET_PROXY_HEADER_LEN = 18 // 代理协议的包头18个字节
PACKET_LIMIT_SIZE = 65535 //一个包的最大大小,包含包头,实际大小需要减掉16
)
//编码器
type Encoder interface {
Encode(writeBuf *iobuffer.OutBuffer) *iobuffer.OutBuffer
}
//解码器
type Decoder interface {
Decode(data []byte) (bool, []byte)
}
//实体解码器
type EntityDecoder interface {
DecodeEntity(p *Packet) bool
}
type EntityEncoder interface {
EncodeEntity(writeBuf *iobuffer.OutBuffer) *iobuffer.OutBuffer
}
//协议解码
//协议头
type Header struct {
PackLen uint16 //协议体的长度
CmdID uint16 //协议号ID
ID uint32 //用户ID
FSID uint16 //
TSID uint16 //接受方服务ID
ValidCode uint16 //校验码
Version uint8 //协议版本号 0-255
ClientSrc uint8 //客户端来源
ErrCode uint16 //错误码,默认为0,标示正常
}
func (h *Header) Decode(data []byte) (bool, []byte) {
if len(data) < PACKET_PROXY_HEADER_LEN {
panic("not enough len for header,at least 18 bits")
return false, nil
}
h.PackLen = binary.BigEndian.Uint16(data)
data = data[2:]
h.CmdID = binary.BigEndian.Uint16(data)
data = data[2:]
h.ID = binary.BigEndian.Uint32(data)
data = data[4:]
h.FSID = binary.BigEndian.Uint16(data)
data = data[2:]
h.TSID = binary.BigEndian.Uint16(data)
data = data[2:]
h.ValidCode = binary.BigEndian.Uint16(data)
data = data[2:]
h.Version = uint8(data[0])
h.ClientSrc = uint8(data[1])
data = data[2:]
h.ErrCode = binary.BigEndian.Uint16(data)
data = data[2:]
return true, data
}
func (h *Header) Encode(writeBuf *iobuffer.OutBuffer) *iobuffer.OutBuffer {
if writeBuf == nil {
writeBuf = iobuffer.NewOutBuffer(1024)
}
writeBuf.PutUint16(h.PackLen)
writeBuf.PutUint16(h.CmdID)
writeBuf.PutUint32(h.ID)
writeBuf.PutUint16(h.FSID)
writeBuf.PutUint16(h.TSID)
writeBuf.PutUint16(h.ValidCode)
writeBuf.PutUint8(h.Version)
writeBuf.PutUint8(h.ClientSrc)
writeBuf.PutUint16(h.ErrCode)
return writeBuf
}
type Packet struct {
Header
headeRawData []byte
RawData []byte
PackDecoded bool
}
func Packing(data []byte) *Packet {
if len(data) < PACKET_PROXY_HEADER_LEN {
return nil
}
//fmt.Println("packing data = ", data)
pack := &Packet{RawData: data, PackDecoded: false}
pack.headeRawData = make([]byte, 0, PACKET_PROXY_HEADER_LEN)
pack.headeRawData = append(pack.headeRawData, data[0:PACKET_PROXY_HEADER_LEN]...)
b := false
b, pack.RawData = pack.Header.Decode(data)
if !b {
fmt.Println("b=>", b)
return nil
} else {
return pack
}
}
func NewEmptyPacket() *Packet {
pack := &Packet{PackDecoded: false}
return pack
}
func (this *Packet) IsDecoded() bool {
return this.PackDecoded
}
//PACKET的解码方法,需要被子类重写
func (this *Packet) DecodePacket() bool {
return false
}
//PACKET的编码方法需要被子类重写
func (this *Packet) EncodePacket(nLen int) *iobuffer.OutBuffer {
return nil
}
func (this *Packet) GetClientFromRawData() []byte {
data := make([]byte, 0, this.Header.PackLen+PACKET_PROXY_HEADER_LEN)
data = append(data, this.headeRawData...)
data = append(data, this.RawData...)
return data
}
func DecoderReadValue(this *Packet, v interface{}) bool {
switch vtype := v.(type) {
case *byte:
*v.(*byte) = this.RawData[0]
this.RawData = this.RawData[1:]
case *uint16:
*v.(*uint16) = binary.BigEndian.Uint16(this.RawData)
this.RawData = this.RawData[2:]
case *uint32:
*v.(*uint32) = binary.BigEndian.Uint32(this.RawData)
this.RawData = this.RawData[4:]
case *uint64:
*v.(*uint64) = binary.BigEndian.Uint64(this.RawData)
this.RawData = this.RawData[8:]
case *string:
strLen := binary.BigEndian.Uint16(this.RawData)
this.RawData = this.RawData[2:]
// fmt.Println("strLen=>", int(strLen), "data len =>", len(this.RawData))
if int(strLen) > 0 && len(this.RawData) >= int(strLen) {
*v.(*string) = string(this.RawData[0:strLen])
this.RawData = this.RawData[int(strLen):]
} else {
panic("not enough bytes to read for string")
return false
}
case *[]byte:
arrLen := binary.BigEndian.Uint16(this.RawData)
this.RawData = this.RawData[2:]
if arrLen > 0 {
*v.(*[]byte) = append(*v.(*[]byte), this.RawData[0:arrLen]...)
}
this.RawData = this.RawData[arrLen:]
case *[]uint16:
arrLen := binary.BigEndian.Uint16(this.RawData)
this.RawData = this.RawData[2:]
if arrLen > 0 {
for i := 0; i < int(arrLen); i++ {
*v.(*[]uint16) = append(*v.(*[]uint16), binary.BigEndian.Uint16(this.RawData))
this.RawData = this.RawData[2:]
}
}
case *[]uint32:
arrLen := binary.BigEndian.Uint16(this.RawData)
this.RawData = this.RawData[2:]
if arrLen > 0 {
for i := 0; i < int(arrLen); i++ {
*v.(*[]uint32) = append(*v.(*[]uint32), binary.BigEndian.Uint32(this.RawData))
this.RawData = this.RawData[4:]
}
}
case *[]uint64:
arrLen := binary.BigEndian.Uint16(this.RawData)
this.RawData = this.RawData[2:]
if arrLen > 0 {
for i := 0; i < int(arrLen); i++ {
*v.(*[]uint64) = append(*v.(*[]uint64), binary.BigEndian.Uint64(this.RawData))
this.RawData = this.RawData[8:]
}
}
default:
panic(vtype)
}
return true
}
func DecoderReadArrayLength(p *Packet) int {
nLen := binary.BigEndian.Uint16(p.RawData)
p.RawData = p.RawData[2:]
return int(nLen)
}
//传entity实例时需要传入指针
func DecoderReadEntity(p *Packet, entity EntityDecoder) bool {
return entity.DecodeEntity(p)
}
|
package easyorm
type Table struct {
}
func (t *Table) Count() (int, error) {
return 0, nil
}
func (t *Table) Add(value interface{}) (interface{}, error) {
return nil, nil
}
func (t *Table) Set(value interface{}) (interface{}, error) {
return nil, nil
}
func (t *Table) Get(key interface{}) (interface{}, error) {
return nil, nil
}
func (t *Table) Remove(key interface{}) error {
return nil
}
|
/*
Because I forgot to celebrate Pi Day (14.3), let's celebrate with π, e (Euler's number) and music!
Challenge
No, we don't have time to eat a pi-pizza, let's make a program.
What you need is 500 digits of π, and 10 digits of e.
The input is an integer n between 0 and 499 inclusive.
Then you should loop through the first n digits of π:
If the digit is:
0 then the note is C
1 then the note is D
2 then the note is E
3 then the note is F
4 then the note is G
5 then the note is A
6 then the note is B
7 then the note is C'
8 then the note is D'
9 then the note is E'
Next, for each digit in π, take a digit from e based on this mapping:
If the digit from π is 0, take the 1st digit from e
If the digit from π is 1, take the 2st digit from e
If the digit from π is 2, take the 3st digit from e
etc.
You need only 10 digits of e, because the digits in π are between 0 and 9.
Finally, take the note and the digit from e. Return a tuple (or equivalent) containing:
the note
the e digit divided by 4 (representing the beat)
Test cases
In:10
Out:
('D', 0.25)
('G', 2.0)
('D', 0.25)
('A', 0.25)
("E'", 1.0)
('E', 2.0)
('B', 2.0)
('A', 0.25)
('F', 0.5)
('A', 0.25)
In:5
Out:
('D', 0.25)
('G', 2.0)
('D', 0.25)
('A', 0.25)
("E'", 1.0)
Help
Here are 500 digits of π:
3.14159265358979323846264338327950288419716939937510582097494459230781640628620899862803482534211706798214808651328230664709384460955058223172535940812848111745028410270193852110555964462294895493038196442881097566593344612847564823378678316527120190914564856692346034861045432664821339360726024914127372458700660631558817488152092096282925409171536436789259036001133053054882046652138414695194151160943305727036575959195309218611738193261179310511854807446237996274956735188575272489122793818301194912
And 10 digits of e:
2.7182818284
Note that '3.' and '2.' don't count in the digits of π and e, and that we are using 0 indexing (so the 0th digit of π is 1 etc.).
Rules
This is code-golf so the shortest answer wins.
Optional; After every tuple or list output, there can be a trailing newline.
As one week is over, here is an ungolfed code in Python 2:
Python 2, 526 bytes
def music_maker(n):
i=p=1;x=3*100**n
while x:x=x*i/-~i/4;i+=2;p+=x/i
pi_number=str(p)[:-1] #First 3 lines calculates Calculate Pi
euler='7182818284'
del x,i,p #You don't need those Variables any more. They were ment for calculating
for i in range(n):
current_pi = pi_number[i] #Current Pi
current_e = euler[int(current_pi)] #Current e
number_to_note = {0:"C", 1:"D",2:"E",3:"F",4:"G",5:"A",6:"B",7:"C'",8:"D'",9:"E'"} #Dict number to note
print((number_to_note[int(current_pi)], int(current_e)/4)) #Prints result
*/
package main
import (
"fmt"
"reflect"
)
func main() {
test(10, []tuple{
{"D", 0.25},
{"G", 2.0},
{"D", 0.25},
{"A", 0.25},
{"E'", 1.0},
{"E", 2.0},
{"B", 2.0},
{"A", 0.25},
{"F", 0.5},
{"A", 0.25},
})
test(5, []tuple{
{"D", 0.25},
{"G", 2.0},
{"D", 0.25},
{"A", 0.25},
{"E'", 1.0},
})
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func test(n int, r []tuple) {
p := parse(n)
fmt.Println(p)
assert(reflect.DeepEqual(p, r))
}
func parse(n int) []tuple {
const pi = "14159265358979323846264338327950288419716939937510582097494459230781640628620899862803482534211706798214808651328230664709384460955058223172535940812848111745028410270193852110555964462294895493038196442881097566593344612847564823378678316527120190914564856692346034861045432664821339360726024914127372458700660631558817488152092096282925409171536436789259036001133053054882046652138414695194151160943305727036575959195309218611738193261179310511854807446237996274956735188575272489122793818301194912"
const e = "7182818284"
var notes = []string{"C", "D", "E", "F", "G", "A", "B", "C'", "D'", "E'"}
r := []tuple{}
for i := 0; i < n; i++ {
c := pi[i%len(pi)] - '0'
b := e[c] - '0'
r = append(r, tuple{
note: notes[c],
beat: float64(b) / 4,
})
}
return r
}
type tuple struct {
note string
beat float64
}
|
package main
import (
"bufio"
"fmt"
"os"
)
// https://www.hackerrank.com/challenges/sherlock-and-valid-string
func main() {
reader := bufio.NewReaderSize(os.Stdin, 100001)
l, _, _ := reader.ReadLine()
rf := make([]int, 26)
for _, i := range l {
rf[i-'a']++
}
a := -1
ac := 0
b := -1
bc := 0
fail := false
for _, f := range rf {
if f == 0 {
continue
}
if a == -1 {
a = f
ac++
} else if a == f {
ac++
} else {
if b == -1 {
b = f
bc++
} else if f == b {
bc++
} else {
fail = true
break
}
}
}
if fail {
fmt.Println("NO")
} else {
if ac < bc {
a, b = b, a
ac, bc = bc, ac
}
if b == -1 {
fmt.Println("YES")
} else if b == 1 && bc == 1 {
fmt.Println("YES")
} else if (b-a) == 1 && bc == 1 {
fmt.Println("YES")
} else {
fmt.Println("NO")
}
}
fmt.Println()
}
|
package learnfunc
import "testing"
func TestFuncParams(t*testing.T){
array1 := [3]string{"a","b","c"}
t.Logf("The array:%v\n",array1)
array2 := modifyArray(array1)
t.Logf("The array:%v\n",array2)
t.Logf("The array:%v\n",array1)
slice1 := []string{"x","y","z"}
t.Logf("The slice:%v\n",slice1)
slice2 := modifySlice(slice1)
t.Logf("The slice:%v\n",slice2)
t.Logf("The slice:%v\n",slice1)
complexArray1 := [3][]string{
[]string{"d","e","f"},
[]string{"g","h","i"},
[]string{"j","k","l"},
}
t.Logf("The complex array:%v\n",complexArray1)
complexArray2 := modifyComplexArray(complexArray1)
t.Logf("The modify complex array:%v\n",complexArray2)
t.Logf("The original complex array:%v\n",complexArray1)
}
|
package alert
import (
"crypto/tls"
"fmt"
"log"
"net"
"net/smtp"
"strconv"
"strings"
)
//return a smtp client
func Dial(addr string) (*smtp.Client, error) {
conn, err := tls.Dial("tcp", addr, nil)
if err != nil {
log.Println("Dialing Error:", err)
return nil, err
}
//分解主机端口字符串
host, _, _ := net.SplitHostPort(addr)
return smtp.NewClient(conn, host)
}
//refer to http://www.oschina.net/code/snippet_166520_34694
//参考net/smtp的func SendMail()
//使用net.Dial连接tls(ssl)端口时,smtp.NewClient()会卡住且不提示err
//len(to)>1时,to[1]开始提示是密送
func SendMailUsingTLS(addr string, auth smtp.Auth, from string,
to []string, msg []byte) (err error) {
//create smtp client
c, err := Dial(addr)
if err != nil {
log.Println("Create smpt client error:", err)
return err
}
defer c.Close()
if auth != nil {
if ok, _ := c.Extension("AUTH"); ok {
if err = c.Auth(auth); err != nil {
log.Println("Error during AUTH", err)
return err
}
}
}
if err = c.Mail(from); err != nil {
return err
}
for _, addr := range to {
if err = c.Rcpt(addr); err != nil {
return err
}
}
w, err := c.Data()
if err != nil {
return err
}
_, err = w.Write(msg)
if err != nil {
return err
}
err = w.Close()
if err != nil {
return err
}
return c.Quit()
}
func SendMail(user, password, host, to, subject, body, mailtype string) error {
hp := strings.Split(host, ":")
auth := smtp.PlainAuth("", user, password, hp[0])
var content_type string
if mailtype == "html" {
content_type = "Content-Type: text/" + mailtype + "; charset=UTF-8"
} else {
content_type = "Content-Type: text/plain" + "; charset=UTF-8"
}
msg := []byte("To: " + to + "\r\nFrom: " + user + "<" + user + ">\r\nSubject: " + subject + "\r\n" + content_type + "\r\n\r\n" + body)
send_to := strings.Split(to, ";")
err := smtp.SendMail(host, auth, user, send_to, msg)
return err
}
func MemoryAlert(nodeIP string, containerID string, memoryPercentage int) error {
host := "smtp.163.com:25"
user := "18767169274@163.com"
password := "cformalert123456"
//mailList := []string{"zhewang@daocloud.io", "zg.zhu@daocloud.io", "davidz@cform.io"}
mailList := []string{"zhewang@daocloud.io"}
alertInfo := "Cform production alert!!! " + "nodeIP:" + nodeIP + " ContainerID: " + containerID + " MemoryPercentage: " + strconv.Itoa(memoryPercentage) + "%"
subject := "Alert from daocloud"
body := `
<html>
<body>
<h3>
` + alertInfo + `
</h3>
</body>
</html>
`
fmt.Println("alert")
for _, value := range mailList {
err := SendMail(user, password, host, value, subject, body, "html")
if err != nil {
return err
} else {
fmt.Printf("send mail to %s successfully!\n", value)
}
}
return nil
}
|
package lambdatohttp
import (
"context"
"github.com/aws/aws-lambda-go/events"
"github.com/gorilla/mux"
"io"
"net/http"
"net/url"
"strings"
)
func ServeRequest(router *mux.Router, ctx context.Context, req events.APIGatewayProxyRequest) events.APIGatewayProxyResponse {
customHttpResponse := customHttpResponse{
Status: 0,
Headers: http.Header{},
Body: nil,
}
router.ServeHTTP(customHttpResponseWriter{
response: &customHttpResponse,
}, createHttpRequest(req))
return events.APIGatewayProxyResponse{
StatusCode: determineResponseStatus(customHttpResponse),
Headers: flattenResponseHeaders(customHttpResponse),
MultiValueHeaders: customHttpResponse.Headers,
Body: string(customHttpResponse.Body),
IsBase64Encoded: false,
}
}
func determineResponseStatus(res customHttpResponse) int {
status := 200
if res.Status > 0 {
status = res.Status
}
return status
}
func flattenResponseHeaders(res customHttpResponse) map[string]string {
singleValueHeaders := make(map[string]string)
for header, value := range res.Headers {
if len(value) > 1 {
singleValueHeaders[header] = strings.Join(value, ",")
}
}
return singleValueHeaders
}
func createHttpRequest(req events.APIGatewayProxyRequest) *http.Request {
return &http.Request{
Method: req.HTTPMethod,
URL: generateUrl(req),
Proto: req.RequestContext.Protocol,
ProtoMajor: 1,
ProtoMinor: 1,
Header: req.MultiValueHeaders,
Body: requestBodyReader{body: []byte(req.Body)}, // reader?!
GetBody: nil,
ContentLength: int64(len([]byte(req.Body))),
TransferEncoding: nil,
Close: false,
Host: req.RequestContext.DomainName,
Form: nil,
PostForm: nil,
MultipartForm: nil,
Trailer: nil,
RemoteAddr: "",
RequestURI: "",
TLS: nil,
Response: nil,
}
}
func generateQueryString(req events.APIGatewayProxyRequest) string {
queryParameters := make([]string, 0)
for param, val := range req.MultiValueQueryStringParameters {
for _, nextVal := range val {
queryParameters = append(queryParameters, param+"="+nextVal)
}
}
return strings.Join(queryParameters, "&")
}
func generateUrl(req events.APIGatewayProxyRequest) *url.URL {
return &url.URL{
Scheme: req.Headers["X-Forwarded-Proto"],
Opaque: "",
User: nil,
Host: req.RequestContext.DomainName,
Path: req.Path,
RawPath: req.Path,
ForceQuery: false,
RawQuery: generateQueryString(req),
Fragment: "",
RawFragment: "",
}
}
type customHttpResponse struct {
Status int
Headers http.Header
Body []byte
}
type customHttpResponseWriter struct {
response *customHttpResponse
}
func (w customHttpResponseWriter) Header() http.Header {
return w.response.Headers
}
func (w customHttpResponseWriter) Write(content []byte) (int, error) {
w.response.Body = append(w.response.Body, content...)
return len(content), nil
}
func (w customHttpResponseWriter) WriteHeader(statusCode int) {
w.response.Status = statusCode
}
type requestBodyReader struct {
body []byte
}
func (r requestBodyReader) Read(p []byte) (int, error) {
remaining := len(r.body)
if remaining == 0 {
return 0, io.EOF
}
if remaining > len(p) {
for x:=0;x<len(p);x++ {
p[x] = r.body[x]
}
r.body = r.body[len(p) - 1:]
return len(p), nil
} else {
for x:=0;x<remaining;x++ {
p[x] = r.body[x]
}
return remaining, nil
}
}
func (r requestBodyReader) Close() error {
return nil
}
|
package main
import (
"fmt"
"io/ioutil"
"net/http/httptest"
"strings"
"testing"
)
func TestNewServer(t *testing.T) {
s := NewServer(337, "something", 123)
if s.port != 337 {
t.Fatal("Invalid port found!")
}
if s.redisDB == nil {
t.Fatal("Invalid redis db client!")
}
if s.getMatch == nil {
t.Fatal("Get match invalid!")
}
if s.setMatch == nil {
t.Fatal("Set match invalid!")
}
}
func TestRoutingMatches(t *testing.T) {
s := NewServer(337, "localhost", 6379)
if s.getMatch.MatchString("/get/site.com/live/value") != true {
t.Fatal("invalid get match regex!")
}
if s.setMatch.MatchString("/set/site.com/live/value") != true {
t.Fatal("Invalid set match regex!")
}
}
func TestSetValue(t *testing.T) {
s := NewServer(123, "localhost", 6379)
w := httptest.NewRecorder()
s.setValue(w, "something", "live", "key")
resp := w.Result()
body, _ := ioutil.ReadAll(resp.Body)
if resp.StatusCode != 200 {
t.Fatal("Invalid status code received!")
}
bodyStr := string(body)
fmt.Println(bodyStr)
if strings.Contains(bodyStr, `"key":"key"`) == false {
t.Fatal("Did not find key field!")
}
}
func TestServeHTTP(t *testing.T) {
s := NewServer(123, "localhost", 6379)
w := httptest.NewRecorder()
r := httptest.NewRequest("GET", "/set/seomthing/something/something", nil)
s.ServeHTTP(w, r)
resp := w.Result()
body, _ := ioutil.ReadAll(resp.Body)
if resp.StatusCode != 200 {
t.Fatal("Invalid status code received!")
}
bodyStr := string(body)
fmt.Println(bodyStr)
if strings.Contains(bodyStr, `"key":"key"`) == false {
t.Fatal("Did not find key field!")
}
}
|
package main
import (
"bytes"
"compress/gzip"
"io/ioutil"
pb "learn_go/sockets/heartpackage/secondtest/protocol"
"log"
"net"
"os"
"time"
"google.golang.org/protobuf/proto"
)
var (
globalMainTable = &pb.MainTable{}
)
func GravelChannel(bytes []byte, message chan byte) {
for index, v := range bytes {
if index == 0 {
message <- v
// log.Println(string(v))
break
}
// message <- v
}
close(message)
}
func gzipUnCompress(content *[]byte) []byte {
var uncompressData bytes.Buffer
uncompressData.Write(*content)
r, _ := gzip.NewReader(&uncompressData)
defer r.Close()
undatas, _ := ioutil.ReadAll(r)
return undatas
}
func HeartBeating(conn net.Conn, message chan byte, timeout int, data []byte) {
select {
case fk := <-message:
if string(fk) == "h" {
remoteAddress := conn.RemoteAddr().String()
log.Printf("reciver %s of 心跳包 %s:\n", remoteAddress, string(data))
conn.SetDeadline(time.Now().Add(time.Duration(timeout) * time.Second))
break
} else if string(fk) == "r" {
log.Println("enter")
maininfo := &pb.MainInfo{}
proto.Unmarshal(data[1:], maininfo)
for _, v := range globalMainTable.Maintable {
if maininfo.Ip == v.Ip {
log.Println("the some ip update some info")
v.Status = maininfo.Status
v.Weight = maininfo.Weight
break
}
}
globalMainTable.Maintable = append(globalMainTable.Maintable, maininfo)
conn.Write([]byte("receive a register info"))
for _, v := range globalMainTable.Maintable {
log.Println(v.Weight, v.Status, v.Ip, v.Port)
}
log.Printf("receiver register %d info\n", len(globalMainTable.Maintable))
// TODO
// 当从miner信息收集完毕后广播给所有的congminer
} else {
log.Println("defaule")
}
case <-time.After(5 * time.Second):
conn.Close()
}
}
func handleConnection(conn net.Conn) {
defer conn.Close()
buffer := make([]byte, 1024)
for {
n, err := conn.Read(buffer)
if err != nil {
log.Println("server read err:", err)
return
}
Data := buffer[:n]
message := make(chan byte)
// 心跳计时
go HeartBeating(conn, message, 15, Data)
// 每次检测心跳是否有数据传来
go GravelChannel(Data[:1], message)
}
}
func server() {
server := "127.0.0.1:7375"
netListen, err := net.Listen("tcp", server)
if err != nil {
log.Println("connect error:", err)
os.Exit(1)
}
log.Println("waiting for client...")
for {
conn, err := netListen.Accept()
if err != nil {
log.Println(conn.RemoteAddr().String(), "fatal err:", err)
continue
}
// 设置短连接 10秒钟
conn.SetReadDeadline(time.Now().Add(time.Duration(20) * time.Second))
go handleConnection(conn)
}
}
// Init 初始化注册信息
func Init() *pb.MainInfo {
sub1 := &pb.MainInfo{
Weight: 90,
Status: false,
Ip: "127.0.0.1",
Port: 7375,
}
// maintable := &pb.MainTable{}
// maintable.Maintable = append(maintable.Maintable, sub1)
return sub1
}
func sendRegister(conn *net.TCPConn) {
maininfo := Init()
data, _ := proto.Marshal(maininfo)
prefix := []byte{'r'}
newData := []byte{}
newData = append(newData, prefix...)
newData = append(newData, data...)
flag := false
for {
sendCount, err := conn.Write(newData)
if err != nil {
log.Println("client write err")
return
}
buf := make([]byte, 1024)
for {
n, err := conn.Read(buf)
if err != nil {
log.Println("conn.Read err= ", err)
return
}
if n > 0 {
flag = true
break
}
}
if flag {
log.Println("register conn write success", sendCount)
break
}
}
}
func sendHeart(conn *net.TCPConn) {
for {
_, err := conn.Write([]byte("hello"))
if err != nil {
log.Println("client write err")
return
}
log.Println("conn write success")
// 切片缓冲
buf := make([]byte, 1024)
n, err := conn.Read(buf)
if err != nil {
log.Println("conn.Read err= ", err)
return
}
log.Println(string(buf[:n]))
time.Sleep(time.Second * 2)
}
}
func sender(conn *net.TCPConn) {
defer func() {
log.Println("client close")
conn.Close()
}()
sendRegister(conn)
sendHeart(conn)
}
func client() {
server := "127.0.0.1:7373"
tcpAddr, err := net.ResolveTCPAddr("tcp4", server)
if err != nil {
log.Println(os.Stderr, "fatal error:", err)
os.Exit(1)
}
conn, err := net.DialTCP("tcp", nil, tcpAddr)
if err != nil {
log.Println("fatal error:", err)
os.Exit(1)
}
log.Println(conn.RemoteAddr().String(), "connection success!")
sender(conn)
time.Sleep(time.Second * 4)
}
func main() {
client()
}
|
package middlewares
import (
"errors"
"fmt"
"github.com/Highway-Project/highway/logging"
"net/http"
)
type Middleware interface {
Process(handler http.HandlerFunc) http.HandlerFunc
}
type MiddlewareParams struct {
Params map[string]interface{}
}
func (mp *MiddlewareParams) GetStringList(key string) (res []string, exists bool, err error) {
v, exists := mp.Params[key]
if !exists {
return nil, exists, nil
}
_, ok := v.([]interface{})
if !ok {
msg := fmt.Sprintf("%s must be of type []string", key)
return nil, exists, errors.New(msg)
}
res = make([]string, len(v.([]interface{})))
for i, value := range v.([]interface{}) {
res[i], ok = value.(string)
if !ok {
msg := fmt.Sprintf("%s must be of type []string", key)
return nil, exists, errors.New(msg)
}
}
return res, exists, nil
}
func (mp *MiddlewareParams) GetBool(key string) (res bool, exists bool, err error) {
v, exists := mp.Params[key]
if !exists {
return false, exists, err
}
res, ok := v.(bool)
if !ok {
msg := fmt.Sprintf("%s must be of type bool", key)
return false, exists, errors.New(msg)
}
return res, exists, nil
}
func (mp *MiddlewareParams) GetInt(key string) (res int, exists bool, err error) {
v, exists := mp.Params[key]
if !exists {
return 0, exists, nil
}
res, ok := v.(int)
if !ok {
msg := fmt.Sprintf("%s must be of type int", key)
logging.Logger.WithError(err).Error(msg)
return 0, exists, errors.New(msg)
}
return res, exists, nil
}
|
package main
import (
"bufio"
"flag"
"fmt"
"github.com/PacketFire/go-ircd/parser"
"log"
"math/rand"
"net"
"os"
"strings"
"sync"
"time"
)
func init() {
rand.Seed(time.Now().UTC().UnixNano())
}
func main() {
flag.Parse()
defer func() {
if r := recover(); r != nil {
log.Printf("main: recovered from %v", r)
}
}()
l, err := net.Listen("tcp", ":6667")
if err != nil {
log.Fatalf("main: can't listen: %v", err)
}
host, err := os.Hostname()
ircd := NewIrcd(host)
if err := ircd.Serve(l); err != nil {
log.Printf("Serve: %s", err)
}
}
type Ircd struct {
hostname string
boottime time.Time
// number of currently connected clients
nclients int
// nick->client, protected by RWMutex
clients map[string]*Client
cm sync.RWMutex
channels map[string]*Channel
chm sync.RWMutex
}
func NewIrcd(host string) Ircd {
return Ircd{
hostname: host,
boottime: time.Now(),
clients: make(map[string]*Client),
channels: make(map[string]*Channel),
}
}
// Allocate a new Channel
func (i *Ircd) NewChannel(name string) *Channel {
ch := &Channel{
serv: i,
name: name,
users: make(map[*Client]struct{}),
modes: NewModeset(),
umodes: make(map[*Client]Modeset),
}
return ch
}
// Allocate a new Client
func (i *Ircd) NewClient(c net.Conn) *Client {
cl := Client{
serv: i,
con: c,
inlines: bufio.NewScanner(c),
modes: NewModeset(),
}
// grab just the ip of the remote user. pretty sure it's a TCPConn...
tcpa := c.RemoteAddr().(*net.TCPAddr)
cl.host = tcpa.IP.String()
return &cl
}
func (i *Ircd) AddClient(c *Client) error {
if c.nick == "" {
return fmt.Errorf("bad nick")
}
i.cm.Lock()
defer i.cm.Unlock()
if _, ok := i.clients[c.nick]; ok {
return fmt.Errorf("nick exists")
}
i.clients[c.nick] = c
i.nclients++
return nil
}
func (i *Ircd) RemoveClient(c *Client) error {
if c.nick == "" {
return fmt.Errorf("bad nick")
}
i.cm.Lock()
defer i.cm.Unlock()
if _, ok := i.clients[c.nick]; ok {
delete(i.clients, c.nick)
i.nclients--
} else {
return fmt.Errorf("no such nick %s", c.nick)
}
return nil
}
func (i *Ircd) FindByNick(nick string) *Client {
i.cm.RLock()
defer i.cm.RUnlock()
return i.clients[nick]
}
func (i *Ircd) FindChannel(name string) *Channel {
i.chm.RLock()
defer i.chm.RUnlock()
return i.channels[name]
}
func (i *Ircd) ChangeNick(old, nw string) error {
i.cm.Lock()
defer i.cm.Unlock()
if c, ok := i.clients[old]; ok {
i.clients[nw] = c
delete(i.clients, old)
} else {
return fmt.Errorf("no such nick %s", old)
}
return nil
}
func (i *Ircd) Serve(l net.Listener) error {
for {
rw, err := l.Accept()
if err != nil {
return err
}
c := i.NewClient(rw)
go i.serveClient(c)
}
}
type MessageHandler func(i *Ircd, c *Client, m parser.Message) error
var (
msgtab = map[string]MessageHandler{
"NICK": (*Ircd).HandleNick,
"USER": (*Ircd).HandleUser,
"QUIT": (*Ircd).HandleQuit,
"PING": (*Ircd).HandlePing,
"PRIVMSG": (*Ircd).HandlePrivmsg,
"MODE": (*Ircd).HandleMode,
"WHO": (*Ircd).HandleWho,
"JOIN": (*Ircd).HandleJoin,
"PART": (*Ircd).HandlePart,
}
)
func (i *Ircd) serveClient(c *Client) {
defer c.con.Close()
for c.inlines.Scan() {
var m parser.Message
if err := m.UnmarshalText(c.inlines.Bytes()); err != nil {
c.Error("malformed message")
}
log.Printf("Client.Serve: %s -> %s", c, m)
if h, ok := msgtab[m.Command]; ok {
if err := h(i, c, m); err != nil {
c.Errorf("%s: %s", m.Command, err)
}
} else {
c.Errorf("not implemented: %s", m.Command)
}
}
i.RemoveClient(c)
if err := c.inlines.Err(); err != nil {
log.Printf("serveClient: %s", err)
}
log.Printf("serveClient: %s is done", c)
}
func (i *Ircd) HandleNick(c *Client, m parser.Message) error {
if len(m.Args) != 1 {
c.EParams(m.Command)
return nil
} else if i.FindByNick(m.Args[0]) != nil {
// check if nick is in use
c.Send(i.hostname, "433", "*", m.Args[0], "Nickname already in use")
return nil
}
// write lock
c.Lock()
oldnick := c.nick
// check if we're actually updating an existing client's nick.
if oldc := i.FindByNick(c.nick); oldc != nil {
i.ChangeNick(c.nick, m.Args[0])
}
c.nick = m.Args[0]
c.Unlock()
c.RLock()
defer c.RUnlock()
if c.nick != "" && c.user != "" {
// ack the nick change only if we have an established user/nick
c.Send(fmt.Sprintf("%s!%s@%s", oldnick, c.user, c.host), "NICK", c.nick)
// send motd when everything is ready, just once
c.welcome.Do(func() {
i.AddClient(c)
i.DoMotd(c)
})
}
return nil
}
func (i *Ircd) HandleUser(c *Client, m parser.Message) error {
if len(m.Args) != 4 {
c.EParams(m.Command)
return nil
}
// write lock
c.Lock()
c.user = m.Args[0]
c.realname = m.Args[3]
c.Unlock()
c.RLock()
defer c.RUnlock()
if c.nick != "" && c.user != "" {
// send motd when everything is ready, just once
c.welcome.Do(func() {
i.AddClient(c)
i.DoMotd(c)
})
}
return nil
}
func (i *Ircd) HandleQuit(c *Client, m parser.Message) error {
c.Error("goodbye")
c.con.Close()
return nil
}
func (i *Ircd) HandlePing(c *Client, m parser.Message) error {
if len(m.Args) != 1 {
c.EParams(m.Command)
return nil
}
c.Send(i.hostname, "PONG", i.hostname, m.Args[0])
return nil
}
func (i *Ircd) HandlePrivmsg(c *Client, m parser.Message) error {
if len(m.Args) != 2 {
c.EParams(m.Command)
return nil
}
i.Privmsg(c, m.Args[0], m.Args[1])
return nil
}
const (
ModeQuery = iota
ModeAdd
ModeDel
)
// Change modes.
//
// TODO(mischief): check for user/chan modes when channels are implemented
func (i *Ircd) HandleMode(c *Client, m parser.Message) error {
c.Lock()
defer c.Unlock()
//dir := ModeAdd
switch n := len(m.Args); n {
case 1, 2:
// query
if strings.Index(m.Args[0], "#") == 0 || strings.Index(m.Args[0], "&") == 0 {
// channel
if ch := i.FindChannel(m.Args[0]); ch != nil {
if n == 1 {
// get
ch.um.RLock()
defer ch.um.RUnlock()
return nil
} else {
// set
ch.um.Lock()
defer ch.um.Unlock()
modes, _ := ch.modes.GetString()
c.Send("324", m.Args[0], modes)
return nil
}
} else {
// not found
c.Numeric("401", m.Args[0], "No such nick/channel")
return nil
}
} else {
// user
if m.Args[0] != c.nick {
// do nothing if this query is not for the sending user
return nil
}
if n == 1 {
// get
modes, _ := c.modes.GetString()
c.Numeric("221", modes)
} else {
// set
// TODO implement mode set for user
c.EParams(m.Command)
return nil
}
}
default:
c.EParams(m.Command)
return nil
}
return nil
}
// WHO
//
// reply format:
// "<channel> <user> <host> <server> <nick> ( "H" / "G" > ["*"] [ ( "@" / "+" ) ] :<hopcount> <real name>" */
func (i *Ircd) HandleWho(c *Client, m parser.Message) error {
// var operOnly bool
// TODO: zero args should show all non-invisible users
if len(m.Args) < 1 {
goto done
}
if strings.Index(m.Args[0], "#") == 0 || strings.Index(m.Args[0], "&") == 0 {
// WHO for channel
u := make(map[string]*Client)
i.chm.RLock()
if ch, ok := i.channels[m.Args[0]]; ok {
ch.um.RLock()
for cl, _ := range ch.users {
cl.RLock()
u[cl.nick] = cl
cl.RUnlock()
}
ch.um.RUnlock()
}
i.chm.RUnlock()
for _, cl := range u {
// read lock cl
cl.RLock()
c.Send(c.serv.hostname, "352", c.nick, m.Args[0], cl.user, cl.host, i.hostname, cl.nick, "H", fmt.Sprintf("0 %s", cl.realname))
cl.RUnlock()
}
} else {
// WHO for nick
if cl := i.FindByNick(m.Args[0]); cl != nil {
cl.RLock()
c.Send(c.serv.hostname, "352", c.nick, "0", cl.user, cl.host, i.hostname, cl.nick, "H", fmt.Sprintf("0 %s", cl.realname))
cl.RUnlock()
}
}
/*
if len(m.Args) == 2 && m.Args[1] == "o" {
operOnly = true
} else {
operOnly = false
}
*/
done:
c.Send(i.hostname, "315", "end of WHO")
return nil
}
// JOIN
func (i *Ircd) HandleJoin(c *Client, m parser.Message) error {
var thech *Channel
if len(m.Args) < 1 {
c.Numeric("461", m.Command, "need more parameters")
return nil
}
log.Printf("%s attempts to join %q", c, m.Args[0])
i.chm.Lock()
defer i.chm.Unlock()
chans := strings.Split(m.Args[0], ",")
for _, chname := range chans {
if ch, ok := i.channels[chname]; ok {
// channel exists - easy
ch.AddClient(c)
thech = ch
} else {
// channel doesn't exist
// sanity checks on channel.
if chname == "" {
c.Numeric("403", "*", "No such channel")
return nil
}
if strings.Index(chname, "#") != 0 && strings.Index(chname, "&") != 0 {
c.Numeric("403", chname, "No such channel")
return nil
}
if len(chname) < 2 {
c.Numeric("403", chname, "No such channel")
return nil
}
// checks passed. make a channel.
newch := i.NewChannel(chname)
i.channels[chname] = newch
newch.AddClient(c)
thech = newch
log.Printf("JOIN: New Channel %s", newch.name)
}
// send messages about join
thech.Join(c)
}
return nil
}
func (i *Ircd) HandlePart(c *Client, m parser.Message) error {
if len(m.Args) < 1 {
c.Numeric("461", m.Command, "need more parameters")
return nil
}
i.chm.Lock()
defer i.chm.Unlock()
chans := strings.Split(m.Args[0], ",")
for _, chname := range chans {
if ch, ok := i.channels[chname]; ok {
if err := ch.RemoveUser(c); err != nil {
c.Numeric("442", chname, "not on channel")
}
}
}
return nil
}
func (i *Ircd) Privmsg(from *Client, to, msg string) {
from.RLock()
defer from.RUnlock()
if tocl := i.FindByNick(to); tocl != nil {
tocl.RLock()
defer tocl.RUnlock()
tocl.Privmsg(from, msg)
} else if toch := i.FindChannel(to); toch != nil {
toch.Privmsg(from, msg)
} else {
from.Send(i.hostname, "401", from.nick, to, "No such user/nick")
}
}
func (i *Ircd) DoMotd(c *Client) {
c.Numeric("001", fmt.Sprintf("Welcome %s", c.Prefix()))
c.Numeric("002", fmt.Sprintf("We are %s running go-ircd", i.hostname))
c.Numeric("003", fmt.Sprintf("Booted %s", i.boottime))
c.Numeric("004", i.hostname, "go-ircd", "v", "m")
c.Numeric("251", fmt.Sprintf("There are %d users and %d services on %d servers", i.nclients, 0, 1))
c.Numeric("252", "0", "operator(s) online")
c.Numeric("253", "0", "unknown connection(s)")
c.Numeric("254", "0", "channel(s) formed")
c.Numeric("255", fmt.Sprintf("I have %d clients and %d servers", c.serv.nclients, 1))
c.Numeric("375", "- Message of the Day -")
c.Numeric("372", "- It works!")
c.Numeric("376", "End of MOTD")
}
type Client struct {
// server reference
serv *Ircd
// connection
con net.Conn
// scanner of incoming irc messages
inlines *bufio.Scanner
// used to prevent multiple clients appearing on NICK
welcome sync.Once
// RWMutex for the below data. we can't have multiple people reading/writing..
sync.RWMutex
// various names
nick, user, realname string
host string
// user modes
modes Modeset
// Channels we are on
channels map[string]*Channel
}
// make a prefix from this client
func (c *Client) Prefix() string {
return fmt.Sprintf("%s!%s@%s", c.nick, c.user, c.host)
}
func (c Client) String() string {
return c.Prefix()
}
func (c *Client) Send(prefix, command string, args ...string) error {
m := parser.Message{
Prefix: prefix,
Command: command,
Args: args,
}
b, err := m.MarshalText()
if err != nil {
return fmt.Errorf("marshalling %s failed: %s", m, err)
}
log.Printf("Send: %s <- %s", c, m)
fmt.Fprintf(c.con, "%s\r\n", b)
return err
}
func (c *Client) Privmsg(from *Client, msg string) {
c.RLock()
c.Send(from.Prefix(), "PRIVMSG", c.nick, msg)
c.RUnlock()
}
func (c *Client) Error(content string) error {
return c.Send(c.serv.hostname, "NOTICE", []string{"*", content}...)
}
func (c *Client) Errorf(format string, args ...interface{}) error {
return c.Error(fmt.Sprintf(format, args...))
}
func (c *Client) EParams(cmd string) error {
return c.Send(c.serv.hostname, "461", c.nick, cmd, "not enough parameters")
}
func (c *Client) Numeric(code string, msg ...string) error {
out := append([]string{c.nick}, msg...)
return c.Send(c.serv.hostname, code, out...)
}
|
package main
import (
"testing"
"github.com/stretchr/testify/require"
)
func Test_A(t *testing.T) {
testCases := []struct {
input string
expectedOutput int
}{
{"1,3,2", 1},
{"2,1,3", 10},
{"1,2,3", 27},
{"2,3,1", 78},
{"3,2,1", 438},
{"3,1,2", 1836},
}
for _, testCase := range testCases {
t.Run(testCase.input, func(t *testing.T) {
input := loadInput(testCase.input)
result := findLastSpokenNumber(input, 2020)
require.Equal(t, testCase.expectedOutput, result)
})
}
}
|
package parser
type StationDetails struct {
// Internal unique identifier.
Id int
// Name of the station as it is locally known; see info_* for translations.
Name string
// Guaranteed to be unique across all the suggestable stations; see `is_suggestable`.
Slug string
// The UIC code of the station.
UIC string
// SNCF sometimes uses an UIC code with 8 digits instead of 7. The last digit is a checksum.
UIC8_SNCF string
// Coordinates as decimal value.
Latitude float32
// Coordinates as decimal value.
Longitude float32
// A station can belong to a meta station whose id is this value,
// i.e. Paris Gare d’Austerlitz belongs to the metastation Paris.
ParentStationID int
// 2 letters, ISO 3166-1 alpha-2
Country string
// Continent/Country ISO codes.
TimeZone string
// Is this station a city? This field is unreliable
IsCity bool
// Is this station the Main Station? This field is unreliable
IsMainStation bool
// Specifies if the station is related to an airport
IsAirport bool
// Specify if the user can input this station.
IsSuggestable bool
// Specify if the country should be displayed to disambiguate the station's name.
CountryHint bool
// Presence of a SNCF self-service machine at the station.
SNCFSelfServiceMachine bool
// Some systems allow stations to be split in two, with two id values.
//If provided, the station identified by the given value should be considered as the actual station.
SameAs *int
// an identifier, which can be used to identify if 2 locations across multiple synchronised sources represent the same location.
NormalisedCode string
}
|
package k8sml
type K8sML interface {
GetID() string
GetVariableValue(variable string) interface{}
}
|
package main
import "testing"
func TestP48(t *testing.T) {
cases := []struct {
in1, in2 int
out int
}{
{10, 10, 405071317},
{1000, 10, 9110846700},
}
for _, c := range cases {
v := selfPower(c.in1, c.in2)
if v != c.out {
t.Errorf("P48: %v\tExpected: %v", v, c.out)
}
}
}
|
package main
import (
"fmt"
"log"
"net/http"
)
func listenforchecks() {
http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "OK")
})
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%s", globalFlags.Port), nil))
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compiler
import (
"fmt"
"github.com/google/gapid/gapis/api"
)
var errAborted error = api.ErrCmdAborted{}
func (c ErrorCode) Err() error {
switch c {
case ErrSuccess:
return nil
case ErrAborted:
return errAborted
default:
return fmt.Errorf("Unknown error code %v", c)
}
}
|
package collectors
import (
"time"
"github.com/cloudfoundry-community/go-cfclient"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
)
type OrganizationsCollector struct {
namespace string
environment string
deployment string
cfClient *cfclient.Client
organizationInfoMetric *prometheus.GaugeVec
organizationNonBasicServicesAllowedMetric *prometheus.GaugeVec
organizationInstanceMemoryMbLimitMetric *prometheus.GaugeVec
organizationTotalAppInstancesQuotaMetric *prometheus.GaugeVec
organizationTotalAppTasksQuotaMetric *prometheus.GaugeVec
organizationTotalMemoryMbQuotaMetric *prometheus.GaugeVec
organizationTotalPrivateDomainsQuotaMetric *prometheus.GaugeVec
organizationTotalReservedRoutePortsQuotaMetric *prometheus.GaugeVec
organizationTotalRoutesQuotaMetric *prometheus.GaugeVec
organizationTotalServiceKeysQuotaMetric *prometheus.GaugeVec
organizationTotalServicesQuotaMetric *prometheus.GaugeVec
organizationsScrapesTotalMetric prometheus.Counter
organizationsScrapeErrorsTotalMetric prometheus.Counter
lastOrganizationsScrapeErrorMetric prometheus.Gauge
lastOrganizationsScrapeTimestampMetric prometheus.Gauge
lastOrganizationsScrapeDurationSecondsMetric prometheus.Gauge
}
func NewOrganizationsCollector(
namespace string,
environment string,
deployment string,
cfClient *cfclient.Client,
) *OrganizationsCollector {
organizationInfoMetric := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "organization",
Name: "info",
Help: "Labeled Cloud Foundry Organization information with a constant '1' value.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
[]string{"organization_id", "organization_name", "quota_name"},
)
organizationNonBasicServicesAllowedMetric := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "organization",
Name: "non_basic_services_allowed",
Help: "A Cloud Foundry Organization can provision instances of paid service plans? (1 for true, 0 for false).",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
[]string{"organization_id", "organization_name"},
)
organizationInstanceMemoryMbLimitMetric := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "organization",
Name: "instance_memory_mb_limit",
Help: "Maximum amount of memory (Mb) an application instance can have in a Cloud Foundry Organization.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
[]string{"organization_id", "organization_name"},
)
organizationTotalAppInstancesQuotaMetric := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "organization",
Name: "total_app_instances_quota",
Help: "Total number of application instances that may be created in a Cloud Foundry Organization.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
[]string{"organization_id", "organization_name"},
)
organizationTotalAppTasksQuotaMetric := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "organization",
Name: "total_app_tasks_quota",
Help: "Total number of application tasks that may be created in a Cloud Foundry Organization.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
[]string{"organization_id", "organization_name"},
)
organizationTotalMemoryMbQuotaMetric := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "organization",
Name: "total_memory_mb_quota",
Help: "Total amount of memory (Mb) a Cloud Foundry Organization can have.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
[]string{"organization_id", "organization_name"},
)
organizationTotalPrivateDomainsQuotaMetric := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "organization",
Name: "total_private_domains_quota",
Help: "Total number of private domains that may be created in a Cloud Foundry Organization.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
[]string{"organization_id", "organization_name"},
)
organizationTotalReservedRoutePortsQuotaMetric := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "organization",
Name: "total_reserved_route_ports_quota",
Help: "Total number of routes that may be created with reserved ports in a Cloud Foundry Organization.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
[]string{"organization_id", "organization_name"},
)
organizationTotalRoutesQuotaMetric := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "organization",
Name: "total_routes_quota",
Help: "Total number of routes that may be created in a Cloud Foundry Organization.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
[]string{"organization_id", "organization_name"},
)
organizationTotalServiceKeysQuotaMetric := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "organization",
Name: "total_service_keys_quota",
Help: "Total number of service keys that may be created in a Cloud Foundry Organization.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
[]string{"organization_id", "organization_name"},
)
organizationTotalServicesQuotaMetric := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "organization",
Name: "total_services_quota",
Help: "Total number of service instances that may be created in a Cloud Foundry Organization.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
[]string{"organization_id", "organization_name"},
)
organizationsScrapesTotalMetric := prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: "organizations_scrapes",
Name: "total",
Help: "Total number of scrapes for Cloud Foundry Organizations.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
)
organizationsScrapeErrorsTotalMetric := prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: "organizations_scrape_errors",
Name: "total",
Help: "Total number of scrape errors of Cloud Foundry Organizations.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
)
lastOrganizationsScrapeErrorMetric := prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "",
Name: "last_organizations_scrape_error",
Help: "Whether the last scrape of Organizations metrics from Cloud Foundry resulted in an error (1 for error, 0 for success).",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
)
lastOrganizationsScrapeTimestampMetric := prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "",
Name: "last_organizations_scrape_timestamp",
Help: "Number of seconds since 1970 since last scrape of Organizations metrics from Cloud Foundry.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
)
lastOrganizationsScrapeDurationSecondsMetric := prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "",
Name: "last_organizations_scrape_duration_seconds",
Help: "Duration of the last scrape of Organizations metrics from Cloud Foundry.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
)
return &OrganizationsCollector{
namespace: namespace,
environment: environment,
deployment: deployment,
cfClient: cfClient,
organizationInfoMetric: organizationInfoMetric,
organizationNonBasicServicesAllowedMetric: organizationNonBasicServicesAllowedMetric,
organizationInstanceMemoryMbLimitMetric: organizationInstanceMemoryMbLimitMetric,
organizationTotalAppInstancesQuotaMetric: organizationTotalAppInstancesQuotaMetric,
organizationTotalAppTasksQuotaMetric: organizationTotalAppTasksQuotaMetric,
organizationTotalMemoryMbQuotaMetric: organizationTotalMemoryMbQuotaMetric,
organizationTotalPrivateDomainsQuotaMetric: organizationTotalPrivateDomainsQuotaMetric,
organizationTotalReservedRoutePortsQuotaMetric: organizationTotalReservedRoutePortsQuotaMetric,
organizationTotalRoutesQuotaMetric: organizationTotalRoutesQuotaMetric,
organizationTotalServiceKeysQuotaMetric: organizationTotalServiceKeysQuotaMetric,
organizationTotalServicesQuotaMetric: organizationTotalServicesQuotaMetric,
organizationsScrapesTotalMetric: organizationsScrapesTotalMetric,
organizationsScrapeErrorsTotalMetric: organizationsScrapeErrorsTotalMetric,
lastOrganizationsScrapeErrorMetric: lastOrganizationsScrapeErrorMetric,
lastOrganizationsScrapeTimestampMetric: lastOrganizationsScrapeTimestampMetric,
lastOrganizationsScrapeDurationSecondsMetric: lastOrganizationsScrapeDurationSecondsMetric,
}
}
func (c OrganizationsCollector) Collect(ch chan<- prometheus.Metric) {
var begun = time.Now()
errorMetric := float64(0)
if err := c.reportOrganizationsMetrics(ch); err != nil {
errorMetric = float64(1)
c.organizationsScrapeErrorsTotalMetric.Inc()
}
c.organizationsScrapeErrorsTotalMetric.Collect(ch)
c.organizationsScrapesTotalMetric.Inc()
c.organizationsScrapesTotalMetric.Collect(ch)
c.lastOrganizationsScrapeErrorMetric.Set(errorMetric)
c.lastOrganizationsScrapeErrorMetric.Collect(ch)
c.lastOrganizationsScrapeTimestampMetric.Set(float64(time.Now().Unix()))
c.lastOrganizationsScrapeTimestampMetric.Collect(ch)
c.lastOrganizationsScrapeDurationSecondsMetric.Set(time.Since(begun).Seconds())
c.lastOrganizationsScrapeDurationSecondsMetric.Collect(ch)
}
func (c OrganizationsCollector) Describe(ch chan<- *prometheus.Desc) {
c.organizationInfoMetric.Describe(ch)
c.organizationNonBasicServicesAllowedMetric.Describe(ch)
c.organizationInstanceMemoryMbLimitMetric.Describe(ch)
c.organizationTotalAppInstancesQuotaMetric.Describe(ch)
c.organizationTotalAppTasksQuotaMetric.Describe(ch)
c.organizationTotalMemoryMbQuotaMetric.Describe(ch)
c.organizationTotalPrivateDomainsQuotaMetric.Describe(ch)
c.organizationTotalReservedRoutePortsQuotaMetric.Describe(ch)
c.organizationTotalRoutesQuotaMetric.Describe(ch)
c.organizationTotalServiceKeysQuotaMetric.Describe(ch)
c.organizationTotalServicesQuotaMetric.Describe(ch)
c.organizationsScrapesTotalMetric.Describe(ch)
c.organizationsScrapeErrorsTotalMetric.Describe(ch)
c.lastOrganizationsScrapeErrorMetric.Describe(ch)
c.lastOrganizationsScrapeTimestampMetric.Describe(ch)
c.lastOrganizationsScrapeDurationSecondsMetric.Describe(ch)
}
func (c OrganizationsCollector) reportOrganizationsMetrics(ch chan<- prometheus.Metric) error {
c.organizationInfoMetric.Reset()
c.organizationNonBasicServicesAllowedMetric.Reset()
c.organizationInstanceMemoryMbLimitMetric.Reset()
c.organizationTotalAppInstancesQuotaMetric.Reset()
c.organizationTotalAppTasksQuotaMetric.Reset()
c.organizationTotalMemoryMbQuotaMetric.Reset()
c.organizationTotalPrivateDomainsQuotaMetric.Reset()
c.organizationTotalReservedRoutePortsQuotaMetric.Reset()
c.organizationTotalRoutesQuotaMetric.Reset()
c.organizationTotalServiceKeysQuotaMetric.Reset()
c.organizationTotalServicesQuotaMetric.Reset()
organizationQuotas, err := c.gatherOrganizationQuotas()
if err != nil {
log.Errorf("Error while listing organization quotas: %v", err)
return err
}
organizations, err := c.cfClient.ListOrgs()
if err != nil {
log.Errorf("Error while listing organizations: %v", err)
return err
}
for _, organization := range organizations {
var organizationQuota cfclient.OrgQuota
var ok bool
if organization.QuotaDefinitionGuid != "" {
if organizationQuota, ok = organizationQuotas[organization.QuotaDefinitionGuid]; ok {
c.reportOrganizationQuotasMetrics(organization.Guid, organization.Name, organizationQuota)
}
}
c.organizationInfoMetric.WithLabelValues(
organization.Guid,
organization.Name,
organizationQuota.Name,
).Set(float64(1))
}
c.organizationInfoMetric.Collect(ch)
c.organizationNonBasicServicesAllowedMetric.Collect(ch)
c.organizationInstanceMemoryMbLimitMetric.Collect(ch)
c.organizationTotalAppInstancesQuotaMetric.Collect(ch)
c.organizationTotalAppTasksQuotaMetric.Collect(ch)
c.organizationTotalMemoryMbQuotaMetric.Collect(ch)
c.organizationTotalPrivateDomainsQuotaMetric.Collect(ch)
c.organizationTotalReservedRoutePortsQuotaMetric.Collect(ch)
c.organizationTotalRoutesQuotaMetric.Collect(ch)
c.organizationTotalServiceKeysQuotaMetric.Collect(ch)
c.organizationTotalServicesQuotaMetric.Collect(ch)
return nil
}
func (c OrganizationsCollector) gatherOrganizationQuotas() (map[string]cfclient.OrgQuota, error) {
quotas, err := c.cfClient.ListOrgQuotas()
if err != nil {
return nil, err
}
orgQuotas := make(map[string]cfclient.OrgQuota, len(quotas))
for _, quota := range quotas {
orgQuotas[quota.Guid] = quota
}
return orgQuotas, nil
}
func (c OrganizationsCollector) reportOrganizationQuotasMetrics(orgGuid string, orgName string, orgQuota cfclient.OrgQuota) {
nonBasicServicesAllowed := 0
if orgQuota.NonBasicServicesAllowed {
nonBasicServicesAllowed = 1
}
c.organizationNonBasicServicesAllowedMetric.WithLabelValues(
orgGuid,
orgName,
).Set(float64(nonBasicServicesAllowed))
c.organizationInstanceMemoryMbLimitMetric.WithLabelValues(
orgGuid,
orgName,
).Set(float64(orgQuota.InstanceMemoryLimit))
c.organizationTotalAppInstancesQuotaMetric.WithLabelValues(
orgGuid,
orgName,
).Set(float64(orgQuota.AppInstanceLimit))
c.organizationTotalAppTasksQuotaMetric.WithLabelValues(
orgGuid,
orgName,
).Set(float64(orgQuota.AppTaskLimit))
c.organizationTotalMemoryMbQuotaMetric.WithLabelValues(
orgGuid,
orgName,
).Set(float64(orgQuota.MemoryLimit))
c.organizationTotalPrivateDomainsQuotaMetric.WithLabelValues(
orgGuid,
orgName,
).Set(float64(orgQuota.TotalPrivateDomains))
c.organizationTotalReservedRoutePortsQuotaMetric.WithLabelValues(
orgGuid,
orgName,
).Set(float64(orgQuota.TotalReservedRoutePorts))
c.organizationTotalRoutesQuotaMetric.WithLabelValues(
orgGuid,
orgName,
).Set(float64(orgQuota.TotalRoutes))
c.organizationTotalServiceKeysQuotaMetric.WithLabelValues(
orgGuid,
orgName,
).Set(float64(orgQuota.TotalServiceKeys))
c.organizationTotalServicesQuotaMetric.WithLabelValues(
orgGuid,
orgName,
).Set(float64(orgQuota.TotalServices))
}
|
package msgHandler
import (
"encoding/json"
cmn "github.com/HNB-ECO/HNB-Blockchain/HNB/consensus/algorand/common"
)
func (h *TDMMsgHandler) HandleNewRoundStepMsg(tdmMsg *cmn.TDMMessage) error {
nrsMsg := &cmn.NewRoundStepMessage{}
ConsLog.Infof(LOGTABLE_CONS, "HandleNewRoundStepMsg nrsMsg:", nrsMsg)
err := json.Unmarshal(tdmMsg.Payload, nrsMsg)
if err != nil {
return err
}
return nil
}
|
package leetcode
import "testing"
func TestRotatedDigits(t *testing.T) {
if rotatedDigits(10) != 4 {
t.Fatal()
}
}
|
package main
import (
"context"
"fmt"
"github.com/micro/go-micro"
srvHello "lemon_service/proto/hello"
)
func main() {
//先将自己注册到注册中心去
service := micro.NewService(micro.Name("go.micro.srv.clent"))
//初始参数
service.Init()
//创建 Hello对象客户端实例
client := srvHello.NewHelloService("go.micro.srv.Hello",service.Client())
//通过rpc服务调用Hello的SayHi 和Add方法
rsp,err := client.SayHi(context.Background(),&srvHello.Request{
Name: "柠檬酱",
Address: "西安",
})
if err != nil {
fmt.Printf("call SayHi error:%v\n",err)
}else {
fmt.Println("ret:",rsp.Ret)
}
rsp2,err := client.Add(context.TODO(),&srvHello.Params{
Num1: 100,
Num2: 201,
})
if err != nil {
fmt.Printf("call Add error:%v\n",err)
}else {
fmt.Println("ret:",rsp2.Res)
}
}
|
package base
const (
HeaderErrorMessage = "X-Warp10-Error-Message"
HeaderElapsed = "X-Warp10-Elapsed"
HeaderErrorLine = "X-Warp10-Error-Line"
HeaderFetched = "X-Warp10-Fetched"
HeaderOperations = "X-Warp10-Ops"
)
|
package brave
import (
"fmt"
"net/http"
"github.com/jinzhu/gorm"
)
func MigrateDatabase(db *gorm.DB) {
db.AutoMigrate(&MangaInfo{})
db.AutoMigrate(&ChapterInfo{})
db.AutoMigrate(&PageInfo{})
}
func GetMangaList(db *gorm.DB) []MangaInfo {
var mangaList []MangaInfo
db.Find(&mangaList)
return mangaList
}
var scraper Scraper = NewMarumaru()
func GetAllMangaList(db *gorm.DB) (result []MangaInfo) {
db.Find(&result)
return
}
func GetAllChapterList(db *gorm.DB) (result []ChapterInfo) {
db.Find(&result)
return
}
func GetUnscrapedChapterList(db *gorm.DB) (result []ChapterInfo) {
db.Where("status = ''").Find(&result)
return
}
func ScrapMangaList(db *gorm.DB) string {
mangaList := scraper.GetMangaList()
for _, mangaInfo := range mangaList {
var f MangaInfo
db.Where(&MangaInfo{Link: mangaInfo.Link}).First(&f)
if f.ID == 0 {
db.Create(&mangaInfo)
} else {
//db.Model(&f).Update(&mangaInfo)
}
}
return fmt.Sprintf("%d개의 만화를 스크랩", len(mangaList))
}
func ScrapMangas(db *gorm.DB, mangaList []MangaInfo) string {
ch := make(chan MangaScraped)
go GetAllChapters(scraper, mangaList, ch)
NumberOfChapter := 0
NumberOfNewChapter := 0
for range mangaList {
scraped := <-ch
NumberOfChapter += len(scraped.ChapterList)
db.Model(&scraped.Original).Update(&scraped.Additional)
for _, chapterInfo := range scraped.ChapterList {
var f ChapterInfo
db.Where(&ChapterInfo{Link: chapterInfo.Link}).First(&f)
if f.ID == 0 {
db.Create(&chapterInfo)
NumberOfNewChapter++
} else if f.Number != chapterInfo.Number {
db.Model(&f).Update(&ChapterInfo{Number: chapterInfo.Number})
}
}
}
return fmt.Sprintf("%d개의 만화에서 %d(+%d)개의 챕터", len(mangaList), NumberOfChapter, NumberOfNewChapter)
}
func ScrapChapters(db *gorm.DB, chapterList []ChapterInfo) string {
ch := make(chan ChapterScraped)
go GetAllPages(scraper, chapterList, ch)
NumberOfNewChapter := len(chapterList)
NumberOfNewPage := 0
for range chapterList {
scraped := <-ch
db.Model(&scraped.Original).Update(&scraped.Additional)
if len(scraped.PageList) == 0 {
continue
}
db.Where(&PageInfo{ChapterID: scraped.Original.ID}).Delete(&PageInfo{})
for _, pageInfo := range scraped.PageList {
db.Create(&pageInfo)
NumberOfNewPage++
}
}
return fmt.Sprintf("%d개의 챕터에서 %d개의 페이지를 스크랩", NumberOfNewChapter, NumberOfNewPage)
}
// FIXME: This functions is not side-effect.
func Proxy(url string) (res *http.Response, err error) {
return scraper.Proxy(url)
}
|
package auth
import portainer "github.com/portainer/portainer/api"
func getUserEndpointAuthorizations(user *portainer.User, endpoints []portainer.Endpoint, endpointGroups []portainer.EndpointGroup, roles []portainer.Role, userMemberships []portainer.TeamMembership) portainer.EndpointAuthorizations {
endpointAuthorizations := make(portainer.EndpointAuthorizations)
groupUserAccessPolicies := map[portainer.EndpointGroupID]portainer.UserAccessPolicies{}
groupTeamAccessPolicies := map[portainer.EndpointGroupID]portainer.TeamAccessPolicies{}
for _, endpointGroup := range endpointGroups {
groupUserAccessPolicies[endpointGroup.ID] = endpointGroup.UserAccessPolicies
groupTeamAccessPolicies[endpointGroup.ID] = endpointGroup.TeamAccessPolicies
}
for _, endpoint := range endpoints {
authorizations := getAuthorizationsFromUserEndpointPolicy(user, &endpoint, roles)
if len(authorizations) > 0 {
endpointAuthorizations[endpoint.ID] = authorizations
continue
}
authorizations = getAuthorizationsFromUserEndpointGroupPolicy(user, &endpoint, roles, groupUserAccessPolicies)
if len(authorizations) > 0 {
endpointAuthorizations[endpoint.ID] = authorizations
continue
}
authorizations = getAuthorizationsFromTeamEndpointPolicies(userMemberships, &endpoint, roles)
if len(authorizations) > 0 {
endpointAuthorizations[endpoint.ID] = authorizations
continue
}
endpointAuthorizations[endpoint.ID] = getAuthorizationsFromTeamEndpointGroupPolicies(userMemberships, &endpoint, roles, groupTeamAccessPolicies)
}
return endpointAuthorizations
}
func getAuthorizationsFromUserEndpointPolicy(user *portainer.User, endpoint *portainer.Endpoint, roles []portainer.Role) portainer.Authorizations {
policyRoles := make([]portainer.RoleID, 0)
policy, ok := endpoint.UserAccessPolicies[user.ID]
if ok {
policyRoles = append(policyRoles, policy.RoleID)
}
return getAuthorizationsFromRoles(policyRoles, roles)
}
func getAuthorizationsFromUserEndpointGroupPolicy(user *portainer.User, endpoint *portainer.Endpoint, roles []portainer.Role, groupAccessPolicies map[portainer.EndpointGroupID]portainer.UserAccessPolicies) portainer.Authorizations {
policyRoles := make([]portainer.RoleID, 0)
policy, ok := groupAccessPolicies[endpoint.GroupID][user.ID]
if ok {
policyRoles = append(policyRoles, policy.RoleID)
}
return getAuthorizationsFromRoles(policyRoles, roles)
}
func getAuthorizationsFromTeamEndpointPolicies(memberships []portainer.TeamMembership, endpoint *portainer.Endpoint, roles []portainer.Role) portainer.Authorizations {
policyRoles := make([]portainer.RoleID, 0)
for _, membership := range memberships {
policy, ok := endpoint.TeamAccessPolicies[membership.TeamID]
if ok {
policyRoles = append(policyRoles, policy.RoleID)
}
}
return getAuthorizationsFromRoles(policyRoles, roles)
}
func getAuthorizationsFromTeamEndpointGroupPolicies(memberships []portainer.TeamMembership, endpoint *portainer.Endpoint, roles []portainer.Role, groupAccessPolicies map[portainer.EndpointGroupID]portainer.TeamAccessPolicies) portainer.Authorizations {
policyRoles := make([]portainer.RoleID, 0)
for _, membership := range memberships {
policy, ok := groupAccessPolicies[endpoint.GroupID][membership.TeamID]
if ok {
policyRoles = append(policyRoles, policy.RoleID)
}
}
return getAuthorizationsFromRoles(policyRoles, roles)
}
func getAuthorizationsFromRoles(roleIdentifiers []portainer.RoleID, roles []portainer.Role) portainer.Authorizations {
var roleAuthorizations []portainer.Authorizations
for _, id := range roleIdentifiers {
for _, role := range roles {
if role.ID == id {
roleAuthorizations = append(roleAuthorizations, role.Authorizations)
break
}
}
}
processedAuthorizations := make(portainer.Authorizations)
if len(roleAuthorizations) > 0 {
processedAuthorizations = roleAuthorizations[0]
for idx, authorizations := range roleAuthorizations {
if idx == 0 {
continue
}
processedAuthorizations = mergeAuthorizations(processedAuthorizations, authorizations)
}
}
return processedAuthorizations
}
func mergeAuthorizations(a, b portainer.Authorizations) portainer.Authorizations {
c := make(map[portainer.Authorization]bool)
for k := range b {
if _, ok := a[k]; ok {
c[k] = true
}
}
return c
}
|
package domain
const (
SUCCESS_CODE = "0"
SUCCESS_MESSAGE = "success"
)
type BaseResponse struct {
Code string `json:"code"`
Message string `json:"message"`
}
type AddInstanceRsp struct {
BaseResponse
Data InstanceInfo `json:"data"`
}
type ListInstanceResponse struct {
BaseResponse
Datas []InstanceConfig `json:"datas"`
}
|
/*****************************************************************
* Copyright©,2020-2022, email: 279197148@qq.com
* Version: 1.0.0
* @Author: yangtxiang
* @Date: 2020-08-03 16:03
* Description:
*****************************************************************/
package netstream
import (
"github.com/go-xe2/x/core/logger"
"github.com/go-xe2/x/os/xlog"
"sync/atomic"
)
func (p *TStreamClient) OnReady(conn StreamConn) {
}
func (p *TStreamClient) OnRecv(conn StreamConn, data []byte) {
defer func() {
if e := recover(); e != nil {
p.Log(logger.LEVEL_WARN, "OnRecv error:", e)
}
}()
// 收到数据包处理
if p.handler != nil {
p.handler.OnRecv(conn, data)
}
}
func (p *TStreamClient) OnCall(conn StreamConn, data []byte) ([]byte, error) {
// 同步调用,返回数据
defer func() {
if e := recover(); e != nil {
p.Log(logger.LEVEL_WARN, "OnCall error:", e)
}
}()
if p.handler != nil {
return p.handler.OnCall(conn, data)
}
return data, nil
}
// 收到消息并回复
func (p *TStreamClient) OnSendTo(conn StreamConn, toConn string, data []byte) {
// 转发数据
}
func (p *TStreamClient) OnCallTo(conn StreamConn, toConn string, data []byte) ([]byte, error) {
return data, nil
}
func (p *TStreamClient) OnHeartbeat(conn StreamConn) {
// 收到收跳时,回复心跳
xlog.Debug("收到心跳包======>>")
conn.UpdateHeartbeat(false)
}
func (p *TStreamClient) OnDisconnect(conn StreamConn) {
select {
case <-p.closed:
// 人工关闭客户端,不再重连
return
default:
}
defer func() {
if e := recover(); e != nil {
p.Log(logger.LEVEL_WARN, "OnDisconnect error:", e)
}
}()
xlog.Debug("断线准备重试连接中")
if n := atomic.LoadInt32(&p.retryStatus); n != 0 {
// 当前重试连接进程未完成,不进行重试处理
xlog.Debug("重试连接协程已经运行.")
return
}
xlog.Debug("准备重试连接")
// 断开重连接
//// 非人工关闭,尝试重连
atomic.StoreInt32(&p.retryStatus, 1)
defer atomic.StoreInt32(&p.retryStatus, 0)
if !p.RetryConnect() {
// 重试连接成功后,不触发OnDisconnect事件,只有重试之后仍然连接不上的情况触发
if p.handler != nil {
p.handler.OnDisconnect(conn)
}
} else {
if p.handler != nil {
p.handler.OnReconnect(conn)
}
}
}
func (p *TStreamClient) OnConnect(conn StreamConn) {
select {
case <-p.closed:
p.closed = make(chan byte, 1)
default:
}
// 客户端已经连接上, 通知服务端,客户端已就绪
xlog.Debug("客户端连接上...")
_ = p.conn.SendReady()
defer func() {
if e := recover(); e != nil {
p.Log(logger.LEVEL_WARN, "OnConnect error:", e)
}
}()
if p.handler != nil {
p.handler.OnConnect(conn)
}
// 启动心跳检查
p.conn.UpdateHeartbeat(false)
p.heartbeatProcessLoop()
}
// 处理请求
func (p *TStreamClient) OnRequest(reqConn StreamConn, reqId string, namespace string, body []byte) {
if p.handler != nil {
p.handler.OnRequest(reqId, namespace, body)
}
}
func (p *TStreamClient) OnResponse(resConn StreamConn, reqId string, body []byte) {
if p.handler != nil {
p.handler.OnResponse(reqId, body)
}
}
|
package engine
import (
"fmt"
"net"
"sync"
"time"
log "github.com/golang/glog"
)
// TrackerEntry contains the Src and Dst IPs, as well as a map of Dst Ports
// and how many times that port was scanned.
type TrackerEntry struct {
DstIP *net.IP
SrcIP *net.IP
Ports map[int]int
expiry time.Time
}
// Tracker contains the methods for tracking new connections, and retrieving
// entries that constitute port scanning.
type Tracker struct {
portScanners chan *TrackerEntry
minimumPortScanned int
maxAge time.Duration
// protects everything below.
l sync.Mutex
m map[string]*TrackerEntry
}
// newTracker takes the maximum age each entry should be tracked for, and
// the minimum ports scanned before a src IP is considered a "port scanner"
// and returns an instance of Tracker.
func newTracker(maxAge, evaluationInterval time.Duration, minimumPortScanned int) (t *Tracker) {
t = &Tracker{
portScanners: make(chan *TrackerEntry),
minimumPortScanned: minimumPortScanned,
maxAge: maxAge,
m: make(map[string]*TrackerEntry),
}
go func() {
for now := range time.Tick(evaluationInterval) {
t.l.Lock()
for k, v := range t.m {
if now.After(v.expiry) {
log.Infof("removing %q because entry is expired", k)
delete(t.m, k)
}
}
t.l.Unlock()
}
}()
return
}
// Add adds the connection v into the tracker. Connections are tracked in a
// Src IP + Dst IP tuple.
func (t *Tracker) Add(v *Connection) {
t.l.Lock()
// TODO(michaelmcallister): clarify if port scanning is *any* dst IP on
// the interface, or a specific one. With the current implementation a
// port scanner could scan up to 2 ports * N IP addresses on the interface.
// If it's any Dst IP address, change the key to simply be the Src IP.
key := fmt.Sprintf("[%s]>[%s]", v.Src.IP, v.Dst.IP)
log.V(2).Infof("Tracking entry %s -> %s", v.Src, v.Dst)
_, ok := t.m[key]
if !ok {
t.m[key] = &TrackerEntry{
DstIP: &v.Dst.IP,
SrcIP: &v.Src.IP,
Ports: make(map[int]int),
expiry: time.Now().Add(t.maxAge),
}
}
t.m[key].Ports[v.Dst.Port]++
if len(t.m[key].Ports) > t.minimumPortScanned {
log.V(2).Infof("%s scanned > %d", key, t.minimumPortScanned)
t.portScanners <- t.m[key]
}
t.l.Unlock()
}
// PortScanners returns a channel that callers can retrieve Entries that
// scan multiple ports.
func (t *Tracker) PortScanners() chan *TrackerEntry {
return t.portScanners
}
// Connections returns the total number of currently tracked connections.
// This includes each dst port, for instance if a single IP address scans
// 3 ports on the host this would be counted as 3 connections. It also counts
// the number of times the port was scanned, for instance if a single IP scans
// port 80 five times the connections would be counted as 5.
func (t *Tracker) Connections() int {
var count int
t.l.Lock()
for _, v := range t.m {
for _, p := range v.Ports {
count += p
}
}
t.l.Unlock()
return count
}
func (t *Tracker) Close() {
close(t.portScanners)
}
|
package version
import (
"strings"
"github.com/Masterminds/semver/v3"
"github.com/pkg/errors"
)
type DataplaneCompatibility struct {
Envoy string `json:"envoy"`
}
type Compatibility struct {
KumaDP map[string]DataplaneCompatibility `json:"kumaDp"`
}
var CompatibilityMatrix = Compatibility{
KumaDP: map[string]DataplaneCompatibility{
"1.0.0": {
Envoy: "1.16.0",
},
"1.0.1": {
Envoy: "1.16.0",
},
"1.0.2": {
Envoy: "1.16.1",
},
"1.0.3": {
Envoy: "1.16.1",
},
"1.0.4": {
Envoy: "1.16.1",
},
"1.0.5": {
Envoy: "1.16.2",
},
"1.0.6": {
Envoy: "1.16.2",
},
"1.0.7": {
Envoy: "1.16.2",
},
"1.0.8": {
Envoy: "1.16.2",
},
"~1.1.0": {
Envoy: "~1.17.0",
},
"~1.2.0": {
Envoy: "~1.18.0",
},
"~1.3.0": {
Envoy: "~1.18.4",
},
},
}
// DataplaneConstraints returns which Envoy should be used with given version of Kuma.
// This information is later used in the GUI as a warning.
// Kuma ships with given Envoy version, but user can use their own Envoy version (especially on Universal)
// therefore we need to inform them that they are not using compatible version.
func (c Compatibility) DataplaneConstraints(version string) (*DataplaneCompatibility, error) {
v, err := semver.NewVersion(version)
if err != nil {
return nil, errors.Wrapf(err, "could not build a constraint %s", version)
}
var matchedCompat []DataplaneCompatibility
for constraintRaw, dpCompat := range c.KumaDP {
constraint, err := semver.NewConstraint(constraintRaw)
if err != nil {
return nil, errors.Wrapf(err, "could not build a constraint %s", constraintRaw)
}
if constraint.Check(v) {
matchedCompat = append(matchedCompat, dpCompat)
}
}
if len(matchedCompat) == 0 {
return nil, errors.Errorf("no constraints for version: %s found", version)
}
if len(matchedCompat) > 1 {
var matched []string
for _, c := range matchedCompat {
matched = append(matched, c.Envoy)
}
return nil, errors.Errorf(
"more than one constraint for version %s: %s",
version,
strings.Join(matched, ", "),
)
}
return &matchedCompat[0], nil
}
|
package helpers
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
)
func TestSha1HexDigest(t *testing.T) {
assert := assert.New(t)
s1, err := Sha1HexDigest("tonic")
assert.NoError(err)
assert.Equal(s1, "dfe953579d49b555adf16d1823a71a8e463351c2")
s2, err := Sha1HexDigest([]byte{'t', 'o', 'n', 'i', 'c'})
assert.NoError(err)
assert.Equal(s2, "dfe953579d49b555adf16d1823a71a8e463351c2")
}
func TestFileOperations(t *testing.T) {
assert := assert.New(t)
root, err := ioutil.TempDir("", "phistage-*")
assert.NoError(err)
defer os.RemoveAll(root)
f1 := filepath.Join(root, "逍遥派")
assert.NoError(OverWriteFile(f1, "北冥神功"))
c1, err := ioutil.ReadFile(f1)
assert.NoError(err)
assert.Equal(string(c1), "北冥神功")
assert.NoError(OverWriteFile(f1, "北冥神功"))
c2, err := ioutil.ReadFile(f1)
assert.NoError(err)
assert.Equal(string(c2), "北冥神功")
assert.NoError(OverWriteFile(f1, "天山六阳掌"))
c3, err := ioutil.ReadFile(f1)
assert.NoError(err)
assert.Equal(string(c3), "天山六阳掌")
f2 := filepath.Join(root, "明教")
assert.NoError(WriteIfNotExist(f2, []byte("乾坤大挪移")))
c4, err := ioutil.ReadFile(f2)
assert.NoError(err)
assert.Equal(string(c4), "乾坤大挪移")
assert.NoError(WriteIfNotExist(f2, []byte("圣火令法")))
c5, err := ioutil.ReadFile(f2)
assert.NoError(err)
assert.Equal(string(c5), "乾坤大挪移")
}
|
package game_map
import (
"github.com/faiface/pixel/pixelgl"
"github.com/steelx/go-rpg-cgm/animation"
"github.com/steelx/go-rpg-cgm/state_machine"
"reflect"
)
type CSStandBy struct {
Name string
Character *Character
CombatState *CombatState
Entity *Entity
Anim animation.Animation
AnimId string
}
//char *Character, cs *CombatState
func CSStandByCreate(args ...interface{}) state_machine.State {
charV := reflect.ValueOf(args[0])
char := charV.Interface().(*Character)
csV := reflect.ValueOf(args[1])
cs := csV.Interface().(*CombatState)
return &CSStandBy{
Name: csStandby,
Character: char,
CombatState: cs,
Entity: char.Entity,
Anim: animation.Create([]int{char.Entity.StartFrame}, true, 0.16),
}
}
func (s CSStandBy) IsFinished() bool {
return true
}
func (s *CSStandBy) Enter(data ...interface{}) {
s.AnimId = reflect.ValueOf(data[0]).Interface().(string)
frames := s.Character.GetCombatAnim(s.AnimId)
s.Anim.SetFrames(frames)
}
func (s *CSStandBy) Render(win *pixelgl.Window) {
//The *CombatState will do the render for us
}
func (s *CSStandBy) Exit() {
}
func (s *CSStandBy) Update(dt float64) {
s.Anim.Update(dt)
s.Entity.SetFrame(s.Anim.Frame())
}
|
package main
import "fmt"
// 罗马数字有如下符号:
// 基本字符 I V X L C D M
// 对应阿拉伯数字 1 5 10 50 100 500 1000
// 计数规则:
// 相同的数字连写,所表示的数等于这些数字相加得到的数,例如:III = 3
// 小的数字在大的数字右边,所表示的数等于这些数字相加得到的数,例如:VIII = 8
// 小的数字,限于(I、X和C)在大的数字左边,所表示的数等于大数减去小数所得的数,例如:IV = 4
// 正常使用时,连续的数字重复不得超过三次
// 在一个数的上面画横线,表示这个数扩大1000倍(本题只考虑3999以内的数,所以用不到这条规则)
func romanToInt(s string) int {
m := map[byte] int{
'I':1,
'V':5,
'X':10,
'L':50,
'C':100,
'D':500,
'M':1000,
}
fmt.Println(m)
n := len(s)
sum := m[s[n-1]]
for i:=n-2; i>=0; i-- {
if m[s[i]] < m[s[i+1]] {
// 小数在大数左边
sum -= m[s[i]]
} else {
sum += m[s[i]]
}
}
return sum
}
func main() {
fmt.Println(romanToInt("VIII"))
}
|
package main
import (
"flag"
"fmt"
"os"
"runtime/pprof"
"sync"
)
type counter struct {
count int
}
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to file")
func main() {
flag.Parse()
f, err := os.Create(*cpuprofile)
if err != nil {
panic(err)
}
pprof.StartCPUProfile(f) // 開始CPU profiling
defer pprof.StopCPUProfile() // 在main結束後執行StopCPUProfile
var wg sync.WaitGroup
wg.Add(3)
counter := counter{}
counter.AddOne()
counter.AddBillion()
counter.AddBillion2()
fmt.Println(counter.count)
}
func (c *counter) AddOne() {
c.count++
}
func (c *counter) AddBillion() {
for i := 0; i < 10000; i++ {
for j := 0; j < 100000; j++ {
c.count++
}
}
}
func (c *counter) AddBillion2() {
for i := 0; i < 10000; i++ {
for j := 0; j < 100000; j++ {
c.count++
}
}
}
|
package plantuml
import "io"
const ThemeCerulean = "https://raw.githubusercontent.com/bschwarz/puml-themes/master/themes/cerulean/puml-theme-cerulean.puml"
type Diagram struct {
includes []string
renderables []Renderable
}
func NewDiagram() *Diagram {
d := &Diagram{}
return d
}
func (d *Diagram) Add(r ...Renderable) *Diagram {
d.renderables = append(d.renderables, r...)
return d
}
func (d *Diagram) Include(inc ...string) *Diagram {
d.includes = append(d.includes, inc...)
return d
}
func (d *Diagram) Render(wr io.Writer) error {
w := strWriter{Writer: wr}
w.Print("@startuml\n")
for _, include := range d.includes {
w.Print("!include ")
w.Print(include)
w.Print("\n")
}
for _, renderable := range d.renderables {
if err := renderable.Render(wr); err != nil {
return err
}
}
w.Print("@enduml\n")
return w.Err
}
|
// 25. Break "random access read/write" AES CTR
package main
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"encoding/base64"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
)
const secret = "YELLOW SUBMARINE"
func main() {
files := os.Args[1:]
if len(files) == 0 {
if err := decryptCTR(os.Stdin); err != nil {
fmt.Fprintln(os.Stderr, err)
}
}
for _, file := range files {
f, err := os.Open(file)
if err != nil {
fmt.Fprintln(os.Stderr, err)
continue
}
if err := decryptCTR(f); err != nil {
fmt.Fprintln(os.Stderr, err)
}
f.Close()
}
}
// decryptCTR generates a CTR editor from base64-encoded,
// ECB-encrypted input, breaks it, and prints the plaintext.
func decryptCTR(in io.Reader) error {
buf, err := decryptECB(in)
if err != nil {
return err
}
x, err := newCTREditor(buf)
if err != nil {
return err
}
buf, err = breakCTR(x)
if err != nil {
return err
}
fmt.Print(string(buf))
return nil
}
// ctrEditor permits random-access CTR editing.
type ctrEditor struct {
c cipher.Block
iv []byte
ciphertext []byte
}
// newCTREditor takes a buffer and creates a CTR editor with a random key.
func newCTREditor(buf []byte) (*ctrEditor, error) {
c, err := aes.NewCipher(RandomBytes(aes.BlockSize))
if err != nil {
return nil, err
}
iv := RandomBytes(aes.BlockSize)
stream := cipher.NewCTR(c, iv)
stream.XORKeyStream(buf, buf)
return &ctrEditor{c, iv, buf}, nil
}
// breakCTR decrypts and returns the ciphertext.
func breakCTR(x *ctrEditor) ([]byte, error) {
ciphertext := x.show()
if err := x.edit(ciphertext, 0); err != nil {
return nil, err
}
return x.show(), nil
}
// show returns a read-only copy of the ciphertext.
func (x *ctrEditor) show() []byte {
return append([]byte{}, x.ciphertext...)
}
// edit takes new plaintext and an offset, and edits the ciphertext.
func (x *ctrEditor) edit(plaintext []byte, offset int) error {
if offset < 0 || offset > len(x.ciphertext) {
return errors.New("edit: invalid offset")
}
// Decrypt before copying the new data.
stream := cipher.NewCTR(x.c, x.iv)
stream.XORKeyStream(x.ciphertext, x.ciphertext)
if len(x.ciphertext) < offset+len(plaintext) {
x.ciphertext = append(x.ciphertext[:offset], plaintext...)
} else {
copy(x.ciphertext[offset:], plaintext)
}
target := x.ciphertext[offset : offset+len(plaintext)]
// Regenerate the stream cipher.
stream = cipher.NewCTR(x.c, x.iv)
stream.XORKeyStream(target, target)
return nil
}
// decryptECB takes base64-encoded, ECB-encrypted input and returns the plaintext.
func decryptECB(in io.Reader) ([]byte, error) {
in = base64.NewDecoder(base64.StdEncoding, in)
buf, err := ioutil.ReadAll(in)
if err != nil {
return nil, err
}
c, err := aes.NewCipher([]byte(secret))
if err != nil {
return nil, err
}
NewECBDecrypter(c).CryptBlocks(buf, buf)
buf, err = PKCS7Unpad(buf, c.BlockSize())
if err != nil {
return nil, err
}
return buf, nil
}
// ecbDecrypter represents an ECB decryption block mode.
type ecbDecrypter struct{ cipher.Block }
// NewECBDecrypter returns a block mode for ECB decryption.
func NewECBDecrypter(c cipher.Block) cipher.BlockMode {
return ecbDecrypter{c}
}
// CryptBlocks decrypts a buffer in ECB mode.
func (x ecbDecrypter) CryptBlocks(dst, src []byte) {
// The src buffer length must be a multiple of the block size,
// and the dst buffer must be at least the length of src.
for n := x.BlockSize(); len(src) > 0; {
x.Decrypt(dst[:n], src[:n])
dst = dst[n:]
src = src[n:]
}
}
// PKCS7Unpad returns a buffer with PKCS#7 padding removed.
func PKCS7Unpad(buf []byte, blockSize int) ([]byte, error) {
errInvalidPadding := errors.New("PKCS7Unpad: invalid padding")
if len(buf) < blockSize {
return nil, errInvalidPadding
}
// Examine the value of the last byte.
b := buf[len(buf)-1]
n := len(buf) - int(b)
if int(b) == 0 || int(b) > blockSize ||
!bytes.Equal(bytes.Repeat([]byte{b}, int(b)), buf[n:]) {
return nil, errInvalidPadding
}
return dup(buf[:n]), nil
}
// RandomBytes returns a random buffer of the desired length.
func RandomBytes(n int) []byte {
buf := make([]byte, n)
if _, err := rand.Read(buf); err != nil {
panic(err)
}
return buf
}
// dup returns a copy of a buffer.
func dup(buf []byte) []byte {
return append([]byte{}, buf...)
}
|
package leetcode
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func inOrder(cur *TreeNode, c chan *TreeNode) {
if cur != nil {
inOrder(cur.Left, c)
c <- cur
inOrder(cur.Right, c)
}
}
func minDiffInBST(root *TreeNode) int {
c := make(chan *TreeNode)
go func() {
inOrder(root, c)
close(c)
}()
var prev *TreeNode = nil
minDiff := -1
for v := range c {
if prev != nil && (minDiff == -1 || v.Val-prev.Val < minDiff) {
minDiff = v.Val - prev.Val
}
prev = v
}
return minDiff
}
|
package analysis
import (
"fmt"
"github.com/PuerkitoBio/goquery"
"strings"
)
func AnalysisHot(body string) ([] map[string] string, error) {
doc, err := goquery.NewDocumentFromReader(strings.NewReader(body))
if err != nil {
return nil, err
}
result := make([]map[string]string, 10)
doc.Find("span.item_title").Each(func(index int, selection *goquery.Selection) {
localTitle := selection.Find("a").Text()
localUrl, hasAttr := selection.Find("a").Attr("href")
if !hasAttr {
fmt.Println(err, "Can not fin href")
} else {
titleUrlMap := make(map[string]string)
titleUrlMap["https://www.v2ex.com"+localUrl] = localTitle
result = append(result, titleUrlMap)
}
})
return result, nil
}
|
package ttt_test
import (
"github.com/abdulrahmank/solver/tic_tac_toe/ttt"
"testing"
)
func TestBoard_Init(t *testing.T) {
board := ttt.Board{}
board.Init(3, 3)
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
if board.Cells[i][j].Row != i && board.Cells[i][j].Column != j {
t.Errorf("Expected %d, %d, but was %d, %d", i, i, board.Cells[i][j].Row, board.Cells[i][j].Column)
}
}
}
}
func TestBoard_AddValToRight(t *testing.T) {
board := ttt.Board{}
board.Init(3, 3)
_, err := board.AddValToRight(0, 0, "X")
if err != nil {
t.Error("Expected nil")
}
if board.Cells[0][1].Val != "X" {
t.Errorf("Expected X but was %v", board.Cells[0][1].Val)
}
}
func TestBoard_AddValToLeft(t *testing.T) {
board := ttt.Board{}
board.Init(3, 3)
_, err := board.AddValToLeft(0, 1, "X")
if err != nil {
t.Error("Expected nil")
}
if board.Cells[0][0].Val != "X" {
t.Errorf("Expected X but was %v", board.Cells[0][1].Val)
}
}
func TestBoard_AddValToTopOf(t *testing.T) {
board := ttt.Board{}
board.Init(3, 3)
_, err := board.AddValToTopOf(1, 0, "X")
if err != nil {
t.Error("Expected nil")
}
if board.Cells[0][0].Val != "X" {
t.Errorf("Expected X but was %v", board.Cells[0][1].Val)
}
}
func TestBoard_AddValToBottomOf(t *testing.T) {
board := ttt.Board{}
board.Init(3, 3)
_, err := board.AddValToBottomOf(0, 0, "X")
if err != nil {
t.Error("Expected nil")
}
if board.Cells[1][0].Val != "X" {
t.Errorf("Expected X but was %v", board.Cells[0][1].Val)
}
}
func TestBoard_HorizontalWin(t *testing.T) {
board := ttt.Board{}
board.Init(3, 3)
board.Cells[0][0].Val = "X"
board.Cells[0][1].Val = "X"
board.Cells[0][2].Val = "X"
board.Cells[1][0].Val = "X"
board.Cells[1][1].Val = "Y"
board.Cells[1][2].Val = "X"
if !board.IsHorizontalWin(0, "X") {
t.Error("Expected win")
}
if board.IsHorizontalWin(1, "X") {
t.Error("Din't expect a win")
}
}
func TestBoard_VerticalWin(t *testing.T) {
board := ttt.Board{}
board.Init(3, 3)
board.Cells[0][0].Val = "X"
board.Cells[1][0].Val = "X"
board.Cells[2][0].Val = "X"
board.Cells[0][1].Val = "X"
board.Cells[1][1].Val = "Y"
board.Cells[2][1].Val = "X"
if !board.IsVerticalWin(0, "X") {
t.Error("Expected win")
}
if board.IsVerticalWin(1, "X") {
t.Error("Din't expect a win")
}
}
func TestBoard_LeadingDiagonalWin(t *testing.T) {
board := ttt.Board{}
board.Init(3, 3)
board.Cells[0][0].Val = "X"
board.Cells[1][1].Val = "X"
board.Cells[2][2].Val = "X"
if !board.IsDiagonalWin("X") {
t.Error("Expected win")
}
if board.IsDiagonalWin("O") {
t.Error("Din't expect a win")
}
}
func TestBoard_TrailingDiagonalWin(t *testing.T) {
board := ttt.Board{}
board.Init(3, 3)
board.Cells[0][2].Val = "X"
board.Cells[1][1].Val = "X"
board.Cells[2][0].Val = "X"
if !board.IsDiagonalWin("X") {
t.Error("Expected win")
}
if board.IsDiagonalWin("O") {
t.Error("Din't expect a win")
}
}
func TestBoard_GetEmptyCells(t *testing.T) {
board := ttt.Board{}
board.Init(3, 3)
board.Cells[0][0].Val = "X"
board.Cells[1][1].Val = "X"
board.Cells[1][2].Val = "X"
expected :=
[]ttt.Cell{*board.Cells[0][1], *board.Cells[0][2], *board.Cells[1][0], *board.Cells[2][0],
*board.Cells[2][1], *board.Cells[2][2]}
actual := board.GetEmptyCells()
if len(actual) != len(expected) {
t.Errorf("Expected %d but was %d", len(actual), len(expected))
}
for _, cell := range expected {
if !contains(actual, cell) {
t.Errorf("Not all cells found")
break
}
}
}
func contains(cells []ttt.Cell, cell ttt.Cell) bool {
for _, c := range cells {
if c == cell {
return true
}
}
return false
}
|
package config
const (
TradeInfoFile = "./stockInfo.txt"
)
const (
FirstTagIndex = iota + 1
SecondTagIndex
ThirdTagIndex
)
|
package lecimg
import (
"image"
"log"
"github.com/disintegration/gift"
"github.com/mitchellh/mapstructure"
)
type ResizeOption struct {
WidthScale float64
HeightScale float64
ScaleCover bool
}
func NewResizeOption(m map[string]interface{}) (*ResizeOption, error) {
option := ResizeOption{}
err := mapstructure.Decode(m, &option)
if err != nil {
return nil, err
}
return &option, nil
}
type ResizeResult struct {
image image.Image
filename string
scaled bool
}
func (r ResizeResult) Img() image.Image {
return r.image
}
func (r ResizeResult) Log() {
if !r.scaled {
log.Printf("Resize skipped : %s\n", r.filename)
}
}
// ----------------------------------------------------------------------------
type ResizeFilter struct {
option ResizeOption
}
func NewResizeFilter(option ResizeOption) *ResizeFilter {
return &ResizeFilter{option: option}
}
func (f ResizeFilter) Run(s *FilterSource) FilterResult {
if !f.option.ScaleCover && s.index == 0 {
return ResizeResult{image: s.image, filename: s.filename, scaled: false}
}
bbox := s.image.Bounds()
width := int(f.option.WidthScale * float64(bbox.Dx()))
height := int(f.option.HeightScale * float64(bbox.Dy()))
resizedImage := ResizeImage(s.image, width, height, false)
return ResizeResult{image: resizedImage, filename: s.filename, scaled: true}
}
// ----------------------------------------------------------------------------
// ResizeImage resizes image to given dimension while preserving aspect ratio.
func ResizeImage(src image.Image, width, height int, keepAspectRatio bool) image.Image {
var g *gift.GIFT
if keepAspectRatio {
g = gift.New(gift.ResizeToFit(width, height, gift.LanczosResampling))
} else {
g = gift.New(gift.Resize(width, height, gift.LanczosResampling))
}
dest := image.NewRGBA(g.Bounds(src.Bounds()))
g.Draw(dest, src)
return dest
}
|
package full
import (
"github.com/filecoin-project/specs-actors/v4/actors/builtin"
"github.com/ipfs/go-cid"
)
func BuiltinName4(code cid.Cid) string{
return builtin.ActorNameByCode(code)
}
|
package main
// This is a simple file server. For security, it support non-hierarchy directory (flat directory structure, no sub
// directories).
// The files are stored in the "files" directory as gzip files and served with the "Content-Encoding: gzip" HTTP
// response header (if the "accept-encoding: gzip" HTTP request header was sent).
import (
"compress/gzip"
"embed"
"encoding/json"
"fmt"
"html/template"
"io"
"io/fs"
"log"
"math"
"net/http"
"os"
"path/filepath"
"strconv"
"strings"
)
const (
BASE_PATH = "/"
FILE_SERVER_API_PATH = BASE_PATH + "files/cli/"
HEALTH_API_PATH = BASE_PATH + "health"
READY_API_PATH = BASE_PATH + "ready"
FILE_SERVER_DIR = "files"
SERVER_PORT_ENV = "SERVER_PORT"
)
type fileMetadata struct {
// file name
Name string `json:"name"`
// file type
Mime string `json:"mime"`
// file size in bytes (before compression)
Size int64 `json:"size"`
// Operation System
OS string `json:"os"`
}
var (
serverPort = ":8080"
mux = &http.ServeMux{}
fileServerDir = FILE_SERVER_DIR
indexTemplate *template.Template
fileMetadataList map[string]fileMetadata
)
func getMetadata(reader io.Reader) (map[string]fileMetadata, error) {
dec := json.NewDecoder(reader)
var fileList []fileMetadata
err := dec.Decode(&fileList)
if err != nil {
return nil, err
}
res := make(map[string]fileMetadata)
for _, md := range fileList {
if md.OS == "darwin" {
md.OS = "macOS"
}
res[md.Name] = md
}
return res, nil
}
//go:embed static
//go:embed metadata
var embeddedFiles embed.FS
// Boot
// Dont use init() because init and go:embed won't work
func boot() {
// change the default port if the "SERVER_PORT" environment variable is set
if port, ok := os.LookupEnv(SERVER_PORT_ENV); ok {
if err := validatePort(port); err != nil {
panic(err)
}
serverPort = ":" + port
}
// compress all the files, if not already compressed
err := compressFiles()
if err != nil {
panic(err)
}
reader, err := embeddedFiles.Open("metadata/files.json")
if err != nil {
panic(err)
}
fileMetadataList, err = getMetadata(reader)
if err != nil {
panic(err)
}
indexTemplate, err = template.ParseFS(embeddedFiles, "static/index.gohtml")
if err != nil {
panic(err)
}
mux = setupMux()
}
func setupMux() *http.ServeMux {
mx := http.NewServeMux()
mx.Handle(FILE_SERVER_API_PATH, filterMethods(http.StripPrefix(FILE_SERVER_API_PATH, getGzipFile())))
mx.HandleFunc(HEALTH_API_PATH, ping)
mx.HandleFunc(READY_API_PATH, ping)
mx.HandleFunc("/", index)
return mx
}
func compressFiles() error {
return filepath.Walk(fileServerDir, func(path string, info fs.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() && info.Name() != fileServerDir {
return filepath.SkipDir
}
if !info.IsDir() {
if !strings.HasSuffix(path, ".gz") {
compressedFileName := path + ".gz"
log.Printf("Compressing file; file name: %s, compressed file name: %s\n", path, compressedFileName)
in, err := os.Open(path)
if err != nil {
log.Printf("Error while compressing %s; can't open the file; %v\n", path, err)
return err
}
defer in.Close()
out, err := os.OpenFile(compressedFileName, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
log.Printf("Error while compressing %s; can't create new file; %v\n", compressedFileName, err)
return err
}
defer out.Close()
zw := gzip.NewWriter(out)
defer zw.Close()
rwz := io.TeeReader(in, zw)
_, err = io.ReadAll(rwz)
if err != nil {
return err
}
log.Println("Removing the uncompressed file; file name:", path)
if err = os.Remove(path); err != nil {
log.Println("Failed to remove the uncompressed file; file name:", path)
}
}
}
return nil
})
}
// make sure the port is numeric in the right range
func validatePort(port string) error {
p, err := strconv.Atoi(port)
if err != nil {
return fmt.Errorf("wrong port format; %w", err)
}
if p <= 0 || p > math.MaxUint16 {
return fmt.Errorf("wrong port number; %d", p)
}
return nil
}
func index(w http.ResponseWriter, _ *http.Request) {
_ = indexTemplate.Execute(w, fileMetadataList)
}
// the file server implementation
func getGzipFile() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
fileName := r.URL.Path
rawName := strings.TrimSuffix(fileName, ".gz")
metadata, found := fileMetadataList[rawName]
if !found {
http.NotFound(w, r)
return
}
log.Println("File request. File name: ", fileName)
if strings.Contains(fileName, "/") {
log.Println("Wrong path: includes sub-directories; Requested path: ", fileName)
http.NotFound(w, r)
return
}
// if the the request is for a compressed file, just serve it
if strings.HasSuffix(fileName, ".gz") {
filePath := fmt.Sprintf("%s/%s", fileServerDir, fileName)
http.ServeFile(w, r, filePath)
return
}
filePath := fmt.Sprintf("%s/%s.gz", fileServerDir, fileName)
file, err := os.Open(filePath)
if err != nil {
log.Println("File not found. File name: ", filePath)
http.NotFound(w, r)
return
}
defer file.Close()
w.Header().Add("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, fileName))
w.Header().Set("Content-Type", metadata.Mime)
var fileReader io.Reader = file
if isStrInArr(r.Header["Accept-Encoding"], "gzip") {
w.Header().Add("Content-Encoding", "gzip")
log.Println("serving compressed file")
} else {
log.Println("serving non-compressed file")
fileReader, err = gzip.NewReader(file)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
log.Println("Can't get gzip reader.", err)
fmt.Fprintln(w, "Something went wrong")
return
}
}
reader := io.TeeReader(fileReader, w)
_, err = io.ReadAll(reader)
if err != nil {
log.Println("error while serving a file;", err)
}
}
}
func filterMethods(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodGet || r.Method == http.MethodHead {
next.ServeHTTP(w, r)
} else if r.Method == http.MethodOptions {
w.Header().Set("Allow", "OPTIONS, GET, HEAD")
w.WriteHeader(http.StatusNoContent)
} else {
log.Println("unsupported method:", r.Method)
w.WriteHeader(http.StatusMethodNotAllowed)
}
})
}
func ping(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
}
func isStrInArr(headers []string, str string) bool {
for _, header := range headers {
for _, s := range strings.Split(header, ",") {
if strings.TrimSpace(s) == str {
return true
}
}
}
return false
}
func main() {
boot()
//addr := fmt.Sprintf(":%s", os.Getenv("SERVER_PORT"))
//if err := http.ListenAndServeTLS(addr, "", "", nil); err != nil {
// panic(err)
//}
log.Println("Starting the CLI Download server on port", serverPort[1:])
if err := http.ListenAndServe(serverPort, mux); err != nil {
panic(err)
}
}
|
package devops
import "testing"
func TestNewFortune(t *testing.T) {
fortune, err := NewFortune()
if err != nil {
t.Errorf("E! %v", err)
}
if len(fortune) < 1 {
t.Error("fortune is empty")
}
}
|
package parse
import (
"fmt"
"strconv"
"testing"
)
func TestParseIntToFloatUnits(t *testing.T) {
cases := []struct {
desc string
input uint64
wantNum float64
wantUnits string
}{
{
desc: "no limit to TB",
input: 2000 * Terabyte,
wantNum: 2000,
wantUnits: TB,
},
{
desc: "1 TB",
input: Terabyte,
wantNum: 1.0,
wantUnits: TB,
},
{
desc: "1.5 TB",
input: uint64(1.5 * float64(Terabyte)),
wantNum: 1.5,
wantUnits: TB,
},
{
desc: "1TB - 1GB",
input: Terabyte - Gigabyte,
wantNum: 1023,
wantUnits: GB,
},
{
desc: "1 GB",
input: Gigabyte,
wantNum: 1.0,
wantUnits: GB,
},
{
desc: "1.5 GB",
input: uint64(1.5 * float64(Gigabyte)),
wantNum: 1.5,
wantUnits: GB,
},
{
desc: "2.0 GB",
input: 2 * Gigabyte,
wantNum: 2.0,
wantUnits: GB,
},
{
desc: "1 GB - 1 MB",
input: Gigabyte - Megabyte,
wantNum: 1023.0,
wantUnits: MB,
},
{
desc: "1 MB",
input: Megabyte,
wantNum: 1.0,
wantUnits: MB,
},
{
desc: "1.5 MB",
input: uint64(1.5 * float64(Megabyte)),
wantNum: 1.5,
wantUnits: MB,
},
{
desc: "1020 kB",
input: Megabyte - 4*Kilobyte,
wantNum: 1020.0,
wantUnits: KB,
},
{
desc: "1 kB",
input: Kilobyte,
wantNum: 1.0,
wantUnits: KB,
},
{
desc: "1.5 kB",
input: uint64(1.5 * float64(Kilobyte)),
wantNum: 1.5,
wantUnits: KB,
},
{
desc: "1000 bytes",
input: Kilobyte - 24,
wantNum: 1000,
wantUnits: B,
},
}
for _, c := range cases {
val, units := parseIntToFloatUnits(c.input)
if got := val; got != c.wantNum {
t.Errorf("%s: incorrect val: got %f want %f", c.desc, got, c.wantNum)
}
if got := units; got != c.wantUnits {
t.Errorf("%s: incorrect units: got %s want %s", c.desc, got, c.wantUnits)
}
}
}
func TestParseIntToFloatUnitsPanic(t *testing.T) {
func() {
defer func() {
if re := recover(); re == nil {
t.Errorf("did not panic when should")
}
}()
parseIntToFloatUnits(0)
}()
}
func TestBytesToDecimalFormat(t *testing.T) {
cases := []struct {
desc string
input uint64
want string
}{
{
desc: "no limit to TB",
input: 2000 * Terabyte,
want: "2000.00 " + TB,
},
{
desc: "1 TB",
input: Terabyte,
want: "1.00 " + TB,
},
{
desc: "1.5 TB",
input: uint64(1.5 * float64(Terabyte)),
want: "1.50 " + TB,
},
{
desc: "1.25 TB",
input: uint64(1.25 * float64(Terabyte)),
want: "1.25 " + TB,
},
{
desc: ".50 TB",
input: uint64(.50 * float64(Terabyte)),
want: "512.00 " + GB,
},
}
for _, c := range cases {
if got := BytesToDecimalFormat(c.input); got != c.want {
t.Errorf("%s: incorrect return: got %s want %s", c.desc, got, c.want)
}
}
}
func TestBytesToPGFormat(t *testing.T) {
cases := []struct {
desc string
input uint64
want string
}{
{
desc: "no limit to TB",
input: 2000 * Terabyte,
want: "2000" + TB,
},
{
desc: "1 TB",
input: Terabyte,
want: "1" + TB,
},
{
desc: "1.5 TB",
input: uint64(1.5 * float64(Terabyte)),
want: "1536" + GB,
},
{
desc: "1TB - 1GB",
input: Terabyte - Gigabyte,
want: "1023" + GB,
},
{
desc: "1TB - 1MB",
input: Terabyte - Megabyte,
want: "1048575" + MB,
},
{
desc: "1 GB",
input: Gigabyte,
want: "1" + GB,
},
{
desc: "1.5 GB",
input: uint64(1.5 * float64(Gigabyte)),
want: "1536" + MB,
},
{
desc: "2.0 GB",
input: 2 * Gigabyte,
want: "2" + GB,
},
{
desc: "1 GB - 1MB",
input: Gigabyte - Megabyte,
want: "1023" + MB,
},
{
desc: "1 MB",
input: Megabyte,
want: "1" + MB,
},
{
desc: "1.5 MB",
input: uint64(1.5 * float64(Megabyte)),
want: "1536" + KB,
},
{
desc: "1020 kB",
input: Megabyte - 4*Kilobyte,
want: "1020" + KB,
},
{
desc: "1 kB",
input: Kilobyte,
want: "1" + KB,
},
{
desc: "1.5 kB, round up",
input: uint64(1.5 * float64(Kilobyte)),
want: "2" + KB,
},
{
desc: "1.4 kB, round down",
input: 1400,
want: "1" + KB,
},
{
desc: "1000 bytes",
input: Kilobyte - 24,
want: "1" + KB,
},
}
for _, c := range cases {
if got := BytesToPGFormat(c.input); got != c.want {
t.Errorf("%s: incorrect return: got %s want %s", c.desc, got, c.want)
}
}
}
func TestPGFormatToBytes(t *testing.T) {
tooBigInt := "9223372036854775808"
_, tooBigErr := strconv.ParseInt(tooBigInt, 10, 64)
cases := []struct {
desc string
input string
want uint64
errMsg string
}{
{
desc: "incorrect format #1",
input: " 64MB", // no leading spaces
errMsg: fmt.Sprintf(errIncorrectBytesFormatFmt, " 64MB"),
},
{
desc: "incorrect format #2",
input: "64b", // bytes not allowed
errMsg: fmt.Sprintf(errIncorrectBytesFormatFmt, "64b"),
},
{
desc: "incorrect format #3",
input: "64 GB", // no space between num and units,
errMsg: fmt.Sprintf(errIncorrectBytesFormatFmt, "64 GB"),
},
{
desc: "incorrect format #4",
input: "-64MB", // negative memory is a no-no
errMsg: fmt.Sprintf(errIncorrectBytesFormatFmt, "-64MB"),
},
{
desc: "incorrect format #5",
input: tooBigInt + MB,
errMsg: fmt.Sprintf(errCouldNotParseBytesFmt, tooBigErr),
},
{
desc: "incorrect format #6",
input: "5.5" + MB, // decimal memory is a no-no
errMsg: fmt.Sprintf(errIncorrectBytesFormatFmt, "5.5"+MB),
},
{
desc: "valid bytes",
input: "65536" + B,
want: 64 * Kilobyte,
},
{
desc: "valid kilobytes",
input: "64" + KB,
want: 64 * Kilobyte,
},
{
desc: "valid kilobytes, oversized",
input: "2048" + KB,
want: 2048 * Kilobyte,
},
{
desc: "valid megabytes",
input: "64" + MB,
want: 64 * Megabyte,
},
{
desc: "valid megabytes, oversized",
input: "2048" + MB,
want: 2048 * Megabyte,
},
{
desc: "valid gigabytes",
input: "64" + GB,
want: 64 * Gigabyte,
},
{
desc: "valid gigabytes, oversized",
input: "2048" + GB,
want: 2048 * Gigabyte,
},
{
desc: "valid terabytes",
input: "64" + TB,
want: 64 * Terabyte,
},
{
desc: "valid terabytes, oversized",
input: "2048" + TB,
want: 2048 * Terabyte,
},
{
desc: "valid megabytes, wrapped in single-quotes",
input: "'64MB'",
want: 64 * Megabyte,
},
}
for _, c := range cases {
bytes, err := PGFormatToBytes(c.input)
if len(c.errMsg) > 0 { // failure cases
if err == nil {
t.Errorf("%s: unexpectedly err is nil: want %s", c.desc, c.errMsg)
} else if got := err.Error(); got != c.errMsg {
t.Errorf("%s: unexpected err msg: got\n%s\nwant\n%s", c.desc, got, c.errMsg)
}
} else {
if err != nil {
t.Errorf("%s: unexpected err: got %v", c.desc, err)
}
if got := bytes; got != c.want {
t.Errorf("%s: incorrect bytes: got %d want %d", c.desc, got, c.want)
}
}
}
}
func TestPGFormatToTime(t *testing.T) {
cases := []struct {
desc string
input string
defUnits TimeUnit
varType VarType
wantNum float64
wantUnits TimeUnit
errMsg string
}{
{
desc: "set statement_timeout to '13ms';",
input: "13ms",
defUnits: Milliseconds,
varType: VarTypeInteger,
wantNum: 13.0,
wantUnits: Milliseconds,
},
{
desc: "set statement_timeout to '13ms'; #2",
input: "'13ms'",
defUnits: Milliseconds,
varType: VarTypeInteger,
wantNum: 13.0,
wantUnits: Milliseconds,
},
{
desc: "set statement_timeout to 7;",
input: "7",
defUnits: Milliseconds,
varType: VarTypeInteger,
wantNum: 7.0,
wantUnits: Milliseconds,
},
{
desc: "set statement_timeout to 13;",
input: "13",
defUnits: Milliseconds,
varType: VarTypeInteger,
wantNum: 13.0,
wantUnits: Milliseconds,
},
{
desc: "set statement_timeout to 13.5;",
input: "13.5",
defUnits: Milliseconds,
varType: VarTypeInteger,
wantNum: 14.0,
wantUnits: Milliseconds,
},
{
desc: "set statement_timeout to 13.4;",
input: "13.4",
defUnits: Milliseconds,
varType: VarTypeInteger,
wantNum: 13.0,
wantUnits: Milliseconds,
},
{
desc: "set statement_timeout to '13.4ms';",
input: "13.4ms",
defUnits: Milliseconds,
varType: VarTypeInteger,
wantNum: 13.0,
wantUnits: Milliseconds,
},
{
desc: "set statement_timeout to '13min';",
input: "13min",
defUnits: Milliseconds,
varType: VarTypeInteger,
wantNum: 13.0,
wantUnits: Minutes,
},
{
desc: "set statement_timeout to '13.0min';",
input: "13.0min",
defUnits: Milliseconds,
varType: VarTypeInteger,
wantNum: 13.0,
wantUnits: Minutes,
},
{
desc: "set statement_timeout to '1.5s';",
input: "1.5s",
defUnits: Milliseconds,
varType: VarTypeInteger,
wantNum: 1500.0,
wantUnits: Milliseconds,
},
{
desc: "set statement_timeout to '1.5min';",
input: "1.5min",
defUnits: Milliseconds,
varType: VarTypeInteger,
wantNum: 90.0,
wantUnits: Seconds,
},
{
desc: "set statement_timeout to '1.3h';",
input: "1.3h",
defUnits: Milliseconds,
varType: VarTypeInteger,
wantNum: 78.0,
wantUnits: Minutes,
},
{
desc: "set statement_timeout to '42.0 min';",
input: "42.0 min",
defUnits: Milliseconds,
varType: VarTypeInteger,
wantNum: 42.0,
wantUnits: Minutes,
},
{
desc: "set statement_timeout to '42.1 min';",
input: "42.1 min",
defUnits: Milliseconds,
varType: VarTypeInteger,
wantNum: 2526.0,
wantUnits: Seconds,
},
{
desc: "set statement_timeout to 'bob';",
input: "bob",
defUnits: Milliseconds,
varType: VarTypeInteger,
errMsg: fmt.Sprintf(errIncorrectTimeFormatFmt, "bob"),
},
{
desc: "set statement_timeout to '42 bob';",
input: "42 bob",
defUnits: Milliseconds,
varType: VarTypeInteger,
errMsg: fmt.Sprintf(errIncorrectTimeFormatFmt, "42 bob"),
},
{
desc: "set vacuum_cost_delay to 250;",
input: "250",
defUnits: Milliseconds,
varType: VarTypeReal,
wantNum: 250.0,
wantUnits: Milliseconds,
},
{
desc: "set vacuum_cost_delay to 250.0;",
input: "250.0",
defUnits: Milliseconds,
varType: VarTypeReal,
wantNum: 250.0,
wantUnits: Milliseconds,
},
{
desc: "set vacuum_cost_delay to 1.3;",
input: "1.3",
defUnits: Milliseconds,
varType: VarTypeReal,
wantNum: 1300.0,
wantUnits: Microseconds,
},
{
desc: "set vacuum_cost_delay to '1.3ms';",
input: "1.3ms",
defUnits: Milliseconds,
varType: VarTypeReal,
wantNum: 1300.0,
wantUnits: Microseconds,
},
{
desc: "set vacuum_cost_delay to '1300us';",
input: "1300us",
defUnits: Milliseconds,
varType: VarTypeReal,
wantNum: 1300.0,
wantUnits: Microseconds,
},
{
desc: "37.1 goats",
input: "37.1 goats",
defUnits: Milliseconds,
varType: VarTypeReal,
errMsg: fmt.Sprintf(errIncorrectTimeFormatFmt, "37.1 goats"),
},
{
desc: "37.42.1min",
input: "37.42.1min",
defUnits: Milliseconds,
varType: VarTypeReal,
errMsg: fmt.Sprintf(errIncorrectTimeFormatFmt, "37.42.1min"),
},
}
for _, c := range cases {
v, u, err := PGFormatToTime(c.input, c.defUnits, c.varType)
if len(c.errMsg) > 0 { // failure cases
if err == nil {
t.Errorf("%s: unexpectedly err is nil: want %s", c.desc, c.errMsg)
} else if got := err.Error(); got != c.errMsg {
t.Errorf("%s: unexpected err msg: got\n%s\nwant\n%s", c.desc, got, c.errMsg)
}
} else {
if err != nil {
t.Errorf("%s: unexpected err: got %v", c.desc, err)
}
if got := v; got != c.wantNum {
t.Errorf("%s: incorrect num: got %f want %f", c.desc, got, c.wantNum)
}
if got := u; got != c.wantUnits {
t.Errorf("%s: incorrect units: got %s want %s", c.desc, got, c.wantUnits)
}
}
}
}
func TestTimeConversion(t *testing.T) {
// test cases generated with the following query
/*
with x(unit, const, val) as
(
values
('us', 'Microseconds' , interval '1 microsecond'),
('ms', 'Milliseconds' , interval '1 millisecond'),
('s', 'Seconds' , interval '1 second'),
('min', 'Minutes' , interval '1 minute'),
('h', 'Hours' , interval '1 hour'),
('d', 'Days' , interval '24 hours')
)
select string_agg(format
(
$${
desc: "%s -> %s",
from: %s,
to: %s,
want: %s / %s,
}$$,
f.unit,
t.unit,
f.const,
t.const,
extract(epoch from f.val),
extract(epoch from t.val)
), E',\n' order by f.const, t.const)
from x f
cross join x t
;
*/
cases := []struct {
desc string
from TimeUnit
to TimeUnit
want float64
errMsg string
}{
{
desc: "d -> d",
from: Days,
to: Days,
want: 86400.000000 / 86400.000000,
},
{
desc: "d -> h",
from: Days,
to: Hours,
want: 86400.000000 / 3600.000000,
},
{
desc: "d -> us",
from: Days,
to: Microseconds,
want: 86400.000000 / 0.000001,
},
{
desc: "d -> ms",
from: Days,
to: Milliseconds,
want: 86400.000000 / 0.001000,
},
{
desc: "d -> min",
from: Days,
to: Minutes,
want: 86400.000000 / 60.000000,
},
{
desc: "d -> s",
from: Days,
to: Seconds,
want: 86400.000000 / 1.000000,
},
{
desc: "h -> d",
from: Hours,
to: Days,
want: 3600.000000 / 86400.000000,
},
{
desc: "h -> h",
from: Hours,
to: Hours,
want: 3600.000000 / 3600.000000,
},
{
desc: "h -> us",
from: Hours,
to: Microseconds,
want: 3600.000000 / 0.000001,
},
{
desc: "h -> ms",
from: Hours,
to: Milliseconds,
want: 3600.000000 / 0.001000,
},
{
desc: "h -> min",
from: Hours,
to: Minutes,
want: 3600.000000 / 60.000000,
},
{
desc: "h -> s",
from: Hours,
to: Seconds,
want: 3600.000000 / 1.000000,
},
{
desc: "us -> d",
from: Microseconds,
to: Days,
want: 0.000001 / 86400.000000,
},
{
desc: "us -> h",
from: Microseconds,
to: Hours,
want: 0.000001 / 3600.000000,
},
{
desc: "us -> us",
from: Microseconds,
to: Microseconds,
want: 0.000001 / 0.000001,
},
{
desc: "us -> ms",
from: Microseconds,
to: Milliseconds,
want: 0.000001 / 0.001000,
},
{
desc: "us -> min",
from: Microseconds,
to: Minutes,
want: 0.000001 / 60.000000,
},
{
desc: "us -> s",
from: Microseconds,
to: Seconds,
want: 0.000001 / 1.000000,
},
{
desc: "ms -> d",
from: Milliseconds,
to: Days,
want: 0.001000 / 86400.000000,
},
{
desc: "ms -> h",
from: Milliseconds,
to: Hours,
want: 0.001000 / 3600.000000,
},
{
desc: "ms -> us",
from: Milliseconds,
to: Microseconds,
want: 0.001000 / 0.000001,
},
{
desc: "ms -> ms",
from: Milliseconds,
to: Milliseconds,
want: 0.001000 / 0.001000,
},
{
desc: "ms -> min",
from: Milliseconds,
to: Minutes,
want: 0.001000 / 60.000000,
},
{
desc: "ms -> s",
from: Milliseconds,
to: Seconds,
want: 0.001000 / 1.000000,
},
{
desc: "min -> d",
from: Minutes,
to: Days,
want: 60.000000 / 86400.000000,
},
{
desc: "min -> h",
from: Minutes,
to: Hours,
want: 60.000000 / 3600.000000,
},
{
desc: "min -> us",
from: Minutes,
to: Microseconds,
want: 60.000000 / 0.000001,
},
{
desc: "min -> ms",
from: Minutes,
to: Milliseconds,
want: 60.000000 / 0.001000,
},
{
desc: "min -> min",
from: Minutes,
to: Minutes,
want: 60.000000 / 60.000000,
},
{
desc: "min -> s",
from: Minutes,
to: Seconds,
want: 60.000000 / 1.000000,
},
{
desc: "s -> d",
from: Seconds,
to: Days,
want: 1.000000 / 86400.000000,
},
{
desc: "s -> h",
from: Seconds,
to: Hours,
want: 1.000000 / 3600.000000,
},
{
desc: "s -> us",
from: Seconds,
to: Microseconds,
want: 1.000000 / 0.000001,
},
{
desc: "s -> ms",
from: Seconds,
to: Milliseconds,
want: 1.000000 / 0.001000,
},
{
desc: "s -> min",
from: Seconds,
to: Minutes,
want: 1.000000 / 60.000000,
},
{
desc: "s -> s",
from: Seconds,
to: Seconds,
want: 1.000000 / 1.000000,
},
}
for _, c := range cases {
conv, err := TimeConversion(c.from, c.to)
if c.errMsg != "" {
if err != nil {
t.Errorf("%s: unexpectedly err is nil: want %s", c.desc, c.errMsg)
} else if got := err.Error(); got != c.errMsg {
t.Errorf("%s: unexpected err msg: got\n%s\nwant\n%s", c.desc, got, c.errMsg)
}
} else {
if err != nil {
t.Errorf("%s: unexpected err: got %v", c.desc, err)
}
if got := conv; got != c.want {
t.Errorf("%s: incorrect conv: got %f want %f", c.desc, got, c.want)
}
}
}
}
|
/* For license and copyright information please see LEGAL file in repository */
package approuter
// PingPeer : Endpoints can use PING to verify that their peers are still alive or to check reachability to the peer.
func PingPeer() {
// If the payload is not empty, the recipient MUST generate a PONG frame containing the same Data.
}
|
package di
import "strings"
func parseTag(tag string) (name string, optional bool) {
options := strings.Split(tag, ",")
if len(options) == 0 {
return "", false
}
if len(options) == 1 && options[0] == "optional" {
return "", true
}
if len(options) == 1 {
return options[0], false
}
if len(options) == 2 && options[1] == "optional" {
return options[0], true
}
panic("incorrect di tag")
}
|
// Copyright 2017 Jeff Foley. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
package sources
import (
"fmt"
"regexp"
"time"
"github.com/OWASP/Amass/amass/core"
"github.com/OWASP/Amass/amass/utils"
)
// IPv4Info is data source object type that implements the DataSource interface.
type IPv4Info struct {
BaseDataSource
baseURL string
}
// NewIPv4Info returns an initialized IPv4Info as a DataSource.
func NewIPv4Info(srv core.AmassService) DataSource {
i := &IPv4Info{baseURL: "http://ipv4info.com"}
i.BaseDataSource = *NewBaseDataSource(srv, core.SCRAPE, "IPv4info")
return i
}
// Query returns the subdomain names discovered when querying this data source.
func (i *IPv4Info) Query(domain, sub string) []string {
var unique []string
if domain != sub {
return []string{}
}
url := i.getURL(domain)
page, err := utils.GetWebPage(url, nil)
if err != nil {
i.Service.Config().Log.Printf("%s: %v", url, err)
return unique
}
time.Sleep(time.Second)
i.Service.SetActive()
url = i.ipSubmatch(page, domain)
page, err = utils.GetWebPage(url, nil)
if err != nil {
i.Service.Config().Log.Printf("%s: %v", url, err)
return unique
}
time.Sleep(time.Second)
i.Service.SetActive()
url = i.domainSubmatch(page, domain)
page, err = utils.GetWebPage(url, nil)
if err != nil {
i.Service.Config().Log.Printf("%s: %v", url, err)
return unique
}
time.Sleep(time.Second)
i.Service.SetActive()
url = i.subdomainSubmatch(page, domain)
page, err = utils.GetWebPage(url, nil)
if err != nil {
i.Service.Config().Log.Printf("%s: %v", url, err)
return unique
}
i.Service.SetActive()
re := utils.SubdomainRegex(domain)
for _, sd := range re.FindAllString(page, -1) {
if u := utils.NewUniqueElements(unique, sd); len(u) > 0 {
unique = append(unique, u...)
}
}
return unique
}
func (i *IPv4Info) getURL(domain string) string {
format := i.baseURL + "/search/%s"
return fmt.Sprintf(format, domain)
}
func (i *IPv4Info) ipSubmatch(content, domain string) string {
re := regexp.MustCompile("/ip-address/(.*)/" + domain)
subs := re.FindAllString(content, -1)
if len(subs) == 0 {
return ""
}
return i.baseURL + subs[0]
}
func (i *IPv4Info) domainSubmatch(content, domain string) string {
re := regexp.MustCompile("/dns/(.*?)/" + domain)
subs := re.FindAllString(content, -1)
if len(subs) == 0 {
return ""
}
return i.baseURL + subs[0]
}
func (i *IPv4Info) subdomainSubmatch(content, domain string) string {
re := regexp.MustCompile("/subdomains/(.*?)/" + domain)
subs := re.FindAllString(content, -1)
if len(subs) == 0 {
return ""
}
return i.baseURL + subs[0]
}
|
// Package v1alpha1 contains API Schema definitions for the pulumi v1alpha1 API group
// +k8s:deepcopy-gen=package,register
// +groupName=pulumi.com
package v1alpha1
|
package piscine
import "github.com/01-edu/z01"
func IsNegative(nb int) {
trueval := 'T'
falsval := 'F'
if nb >= 0 {
z01.PrintRune(falsval)
z01.PrintRune(10)
} else {
z01.PrintRune(trueval)
z01.PrintRune(10)
}
}
|
package controlplane
import (
"context"
"net"
"net/http"
"net/http/pprof"
"net/url"
"time"
envoy_service_discovery_v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
"github.com/gorilla/mux"
"github.com/rs/zerolog"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
"google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/reflection"
"github.com/pomerium/pomerium/config"
"github.com/pomerium/pomerium/config/envoyconfig"
"github.com/pomerium/pomerium/config/envoyconfig/filemgr"
"github.com/pomerium/pomerium/internal/atomicutil"
"github.com/pomerium/pomerium/internal/controlplane/xdsmgr"
"github.com/pomerium/pomerium/internal/events"
"github.com/pomerium/pomerium/internal/httputil/reproxy"
"github.com/pomerium/pomerium/internal/log"
"github.com/pomerium/pomerium/internal/telemetry"
"github.com/pomerium/pomerium/internal/telemetry/requestid"
"github.com/pomerium/pomerium/internal/urlutil"
"github.com/pomerium/pomerium/internal/version"
"github.com/pomerium/pomerium/pkg/envoy/files"
pom_grpc "github.com/pomerium/pomerium/pkg/grpc"
"github.com/pomerium/pomerium/pkg/grpcutil"
)
type versionedConfig struct {
*config.Config
version int64
}
// A Service can be mounted on the control plane.
type Service interface {
Mount(r *mux.Router)
}
// A Server is the control-plane gRPC and HTTP servers.
type Server struct {
GRPCListener net.Listener
GRPCServer *grpc.Server
HTTPListener net.Listener
MetricsListener net.Listener
MetricsRouter *mux.Router
DebugListener net.Listener
DebugRouter *mux.Router
Builder *envoyconfig.Builder
EventsMgr *events.Manager
currentConfig *atomicutil.Value[versionedConfig]
name string
xdsmgr *xdsmgr.Manager
filemgr *filemgr.Manager
metricsMgr *config.MetricsManager
reproxy *reproxy.Handler
httpRouter *atomicutil.Value[*mux.Router]
authenticateSvc Service
proxySvc Service
haveSetCapacity map[string]bool
}
// NewServer creates a new Server. Listener ports are chosen by the OS.
func NewServer(cfg *config.Config, metricsMgr *config.MetricsManager, eventsMgr *events.Manager) (*Server, error) {
srv := &Server{
metricsMgr: metricsMgr,
EventsMgr: eventsMgr,
reproxy: reproxy.New(),
haveSetCapacity: map[string]bool{},
currentConfig: atomicutil.NewValue(versionedConfig{
Config: cfg,
}),
httpRouter: atomicutil.NewValue(mux.NewRouter()),
}
var err error
// setup gRPC
srv.GRPCListener, err = net.Listen("tcp4", net.JoinHostPort("127.0.0.1", cfg.GRPCPort))
if err != nil {
return nil, err
}
ui, si := grpcutil.AttachMetadataInterceptors(
metadata.Pairs(
grpcutil.MetadataKeyEnvoyVersion, files.FullVersion(),
grpcutil.MetadataKeyPomeriumVersion, version.FullVersion(),
),
)
srv.GRPCServer = grpc.NewServer(
grpc.StatsHandler(telemetry.NewGRPCServerStatsHandler(cfg.Options.Services)),
grpc.ChainUnaryInterceptor(requestid.UnaryServerInterceptor(), ui),
grpc.ChainStreamInterceptor(requestid.StreamServerInterceptor(), si),
)
reflection.Register(srv.GRPCServer)
srv.registerAccessLogHandlers()
grpc_health_v1.RegisterHealthServer(srv.GRPCServer, pom_grpc.NewHealthCheckServer())
// setup HTTP
srv.HTTPListener, err = net.Listen("tcp4", net.JoinHostPort("127.0.0.1", cfg.HTTPPort))
if err != nil {
_ = srv.GRPCListener.Close()
return nil, err
}
srv.MetricsListener, err = net.Listen("tcp4", net.JoinHostPort("127.0.0.1", cfg.MetricsPort))
if err != nil {
_ = srv.GRPCListener.Close()
_ = srv.HTTPListener.Close()
return nil, err
}
srv.DebugListener, err = net.Listen("tcp4", net.JoinHostPort("127.0.0.1", cfg.DebugPort))
if err != nil {
_ = srv.GRPCListener.Close()
_ = srv.HTTPListener.Close()
_ = srv.DebugListener.Close()
return nil, err
}
if err := srv.updateRouter(cfg); err != nil {
return nil, err
}
srv.DebugRouter = mux.NewRouter()
srv.MetricsRouter = mux.NewRouter()
// pprof
srv.DebugRouter.Path("/debug/pprof/cmdline").HandlerFunc(pprof.Cmdline)
srv.DebugRouter.Path("/debug/pprof/profile").HandlerFunc(pprof.Profile)
srv.DebugRouter.Path("/debug/pprof/symbol").HandlerFunc(pprof.Symbol)
srv.DebugRouter.Path("/debug/pprof/trace").HandlerFunc(pprof.Trace)
srv.DebugRouter.PathPrefix("/debug/pprof/").HandlerFunc(pprof.Index)
// metrics
srv.MetricsRouter.Handle("/metrics", srv.metricsMgr)
srv.filemgr = filemgr.NewManager()
srv.filemgr.ClearCache()
srv.Builder = envoyconfig.New(
srv.GRPCListener.Addr().String(),
srv.HTTPListener.Addr().String(),
srv.MetricsListener.Addr().String(),
srv.filemgr,
srv.reproxy,
)
ctx := log.WithContext(context.Background(), func(c zerolog.Context) zerolog.Context {
return c.Str("server_name", cfg.Options.Services)
})
res, err := srv.buildDiscoveryResources(ctx)
if err != nil {
return nil, err
}
srv.xdsmgr = xdsmgr.NewManager(res)
envoy_service_discovery_v3.RegisterAggregatedDiscoveryServiceServer(srv.GRPCServer, srv.xdsmgr)
return srv, nil
}
// Run runs the control-plane gRPC and HTTP servers.
func (srv *Server) Run(ctx context.Context) error {
eg, ctx := errgroup.WithContext(ctx)
handle := srv.EventsMgr.Register(func(evt events.Event) {
withGRPCBackoff(ctx, func() error {
return srv.storeEvent(ctx, evt)
})
})
defer srv.EventsMgr.Unregister(handle)
// start the gRPC server
eg.Go(func() error {
log.Info(ctx).Str("addr", srv.GRPCListener.Addr().String()).Msg("starting control-plane gRPC server")
return srv.GRPCServer.Serve(srv.GRPCListener)
})
// gracefully stop the gRPC server on context cancellation
eg.Go(func() error {
<-ctx.Done()
ctx, cancel := context.WithCancel(ctx)
ctx, cleanup := context.WithTimeout(ctx, time.Second*5)
defer cleanup()
go func() {
srv.GRPCServer.GracefulStop()
cancel()
}()
go func() {
<-ctx.Done()
srv.GRPCServer.Stop()
cancel()
}()
<-ctx.Done()
return nil
})
for _, entry := range []struct {
Name string
Listener net.Listener
Handler http.Handler
}{
{"http", srv.HTTPListener, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
srv.httpRouter.Load().ServeHTTP(w, r)
})},
{"debug", srv.DebugListener, srv.DebugRouter},
{"metrics", srv.MetricsListener, srv.MetricsRouter},
} {
entry := entry
hsrv := (&http.Server{
BaseContext: func(li net.Listener) context.Context {
return ctx
},
Handler: entry.Handler,
})
// start the HTTP server
eg.Go(func() error {
log.Info(ctx).
Str("addr", entry.Listener.Addr().String()).
Msgf("starting control-plane %s server", entry.Name)
return hsrv.Serve(entry.Listener)
})
// gracefully stop the HTTP server on context cancellation
eg.Go(func() error {
<-ctx.Done()
ctx, cleanup := context.WithTimeout(ctx, time.Second*5)
defer cleanup()
return hsrv.Shutdown(ctx)
})
}
return eg.Wait()
}
// OnConfigChange updates the pomerium config options.
func (srv *Server) OnConfigChange(ctx context.Context, cfg *config.Config) error {
if err := srv.updateRouter(cfg); err != nil {
return err
}
srv.reproxy.Update(ctx, cfg)
prev := srv.currentConfig.Load()
srv.currentConfig.Store(versionedConfig{
Config: cfg,
version: prev.version + 1,
})
res, err := srv.buildDiscoveryResources(ctx)
if err != nil {
return err
}
srv.xdsmgr.Update(ctx, res)
return nil
}
// EnableAuthenticate enables the authenticate service.
func (srv *Server) EnableAuthenticate(svc Service) error {
srv.authenticateSvc = svc
return srv.updateRouter(srv.currentConfig.Load().Config)
}
// EnableProxy enables the proxy service.
func (srv *Server) EnableProxy(svc Service) error {
srv.proxySvc = svc
return srv.updateRouter(srv.currentConfig.Load().Config)
}
func (srv *Server) updateRouter(cfg *config.Config) error {
httpRouter := mux.NewRouter()
srv.addHTTPMiddleware(httpRouter, cfg)
if err := srv.mountCommonEndpoints(httpRouter, cfg); err != nil {
return err
}
if srv.authenticateSvc != nil {
seen := make(map[string]struct{})
// mount auth handler for both internal and external endpoints
for _, fn := range []func() (*url.URL, error){cfg.Options.GetAuthenticateURL, cfg.Options.GetInternalAuthenticateURL} {
authenticateURL, err := fn()
if err != nil {
return err
}
authenticateHost := urlutil.StripPort(authenticateURL.Host)
if _, ok := seen[authenticateHost]; ok {
continue
}
seen[authenticateHost] = struct{}{}
srv.authenticateSvc.Mount(httpRouter.Host(authenticateHost).Subrouter())
}
}
if srv.proxySvc != nil {
srv.proxySvc.Mount(httpRouter)
}
srv.httpRouter.Store(httpRouter)
return nil
}
|
// Copyright (c) 2023 Target Brands, Inc. All rights reserved.
//
// Use of this source code is governed by the LICENSE file in this repository.
package vela
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"reflect"
"strconv"
"strings"
"time"
"github.com/buildkite/yaml"
"github.com/go-vela/sdk-go/version"
"github.com/go-vela/types"
"github.com/google/go-querystring/query"
"github.com/sirupsen/logrus"
)
const (
userAgent = "vela-sdk-go"
)
type (
// Client is a client that manages communication with the Vela API.
Client struct {
// HTTP client used to communicate with the Vela API.
client *http.Client
// Base URL for Vela API requests.
baseURL *url.URL
// User agent used when communicating with the Vela API.
UserAgent string
// Vela service for authentication.
Admin *AdminService
Authentication *AuthenticationService
Authorization *AuthorizationService
Build *BuildService
Deployment *DeploymentService
Hook *HookService
Log *LogService
Pipeline *PipelineService
Repo *RepoService
SCM *SCMService
Schedule *ScheduleService
Secret *SecretService
Step *StepService
Svc *SvcService
Worker *WorkerService
}
service struct {
client *Client
}
// ListOptions represents the optional parameters to various List methods that
// support pagination.
ListOptions struct {
// For paginated result sets, page of results to retrieve.
Page int `url:"page,omitempty"`
// For paginated result sets, the number of results to include per page.
PerPage int `url:"per_page,omitempty"`
}
// OAuthExchangeOptions represents the required
// parameters to exchange for tokens.
OAuthExchangeOptions struct {
Code string `url:"code,omitempty"`
State string `url:"state,omitempty"`
}
// LoginOptions represents the optional parameters
// to launch the login process.
LoginOptions struct {
Type string `url:"type,omitempty"`
Port string `url:"port,omitempty"`
}
)
// NewClient returns a new Vela API client.
// baseURL has to be the HTTP endpoint of the Vela API.
// If no httpClient is provided, then the http.DefaultClient will be used.
func NewClient(baseURL, id string, httpClient *http.Client) (*Client, error) {
// use http.DefaultClient if no client is provided
if httpClient == nil {
httpClient = http.DefaultClient
httpClient.Timeout = time.Second * 15
}
// we must have a url provided to create the client
if len(baseURL) == 0 {
return nil, fmt.Errorf("no Vela baseURL provided")
}
// parse url provided for the client
url, err := url.Parse(baseURL)
if err != nil {
return nil, err
}
// prepare the user agent string
ua := fmt.Sprintf("%s/%s", userAgent, version.Version.String())
// if an ID was given, use it in the user agent string
if len(id) > 0 {
ua = fmt.Sprintf("%s (%s)", ua, id)
}
// create initial client fields
c := &Client{
client: httpClient,
baseURL: url,
UserAgent: ua,
}
// instantiate all client services
c.Authentication = &AuthenticationService{client: c}
c.Authorization = &AuthorizationService{client: c}
c.Admin = &AdminService{
&AdminBuildService{client: c},
&AdminCleanService{client: c},
&AdminDeploymentService{client: c},
&AdminHookService{client: c},
&AdminRepoService{client: c},
&AdminSecretService{client: c},
&AdminSvcService{client: c},
&AdminStepService{client: c},
&AdminUserService{client: c},
&AdminWorkerService{client: c},
}
c.Build = &BuildService{client: c}
c.Deployment = &DeploymentService{client: c}
c.Hook = &HookService{client: c}
c.Log = &LogService{client: c}
c.Pipeline = &PipelineService{client: c}
c.Repo = &RepoService{client: c}
c.SCM = &SCMService{client: c}
c.Schedule = &ScheduleService{client: c}
c.Secret = &SecretService{client: c}
c.Step = &StepService{client: c}
c.Svc = &SvcService{client: c}
c.Worker = &WorkerService{client: c}
return c, nil
}
// SetTimeout sets the timeout for the http client.
func (c *Client) SetTimeout(d time.Duration) {
c.client.Timeout = d
}
// buildURLForRequest will build the URL (as a string) that will be called.
// It does several cleaning tasks for us.
func (c *Client) buildURLForRequest(urlStr string) (string, error) {
// capture base url from client for string
u := c.baseURL.String()
// If there is no / at the end, add one.
if !strings.HasSuffix(u, "/") {
u += "/"
}
// remove "/" prefix from url
urlStr = strings.TrimPrefix(urlStr, "/")
// parse trimmed url string
rel, err := url.Parse(urlStr)
if err != nil {
return "", err
}
u += rel.String()
return u, nil
}
// addAuthentication adds the necessary authentication to the request.
func (c *Client) addAuthentication(req *http.Request) error {
// token that will be sent with the request depending on auth type
token := ""
// handle access + refresh tokens
// refresh access token if needed
if c.Authentication.HasAccessAndRefreshAuth() {
isExpired := IsTokenExpired(*c.Authentication.accessToken)
if isExpired {
logrus.Debug("access token has expired")
isRefreshExpired := IsTokenExpired(*c.Authentication.refreshToken)
if isRefreshExpired {
return fmt.Errorf("your tokens have expired - please log in again with 'vela login'")
}
logrus.Debug("fetching new access token with existing refresh token")
// send API call to refresh the access token to Vela
//
// https://pkg.go.dev/github.com/go-vela/sdk-go/vela?tab=doc#AuthenticationService.RefreshAccessToken
_, err := c.Authentication.RefreshAccessToken(*c.Authentication.refreshToken)
if err != nil {
return err
}
}
// set (new?) access token as the token
token = *c.Authentication.accessToken
}
// handle personal access token
if c.Authentication.HasPersonalAccessTokenAuth() {
// send API call to exchange token for access token to Vela
//
// https://pkg.go.dev/github.com/go-vela/sdk-go/vela?tab=doc#AuthenticationService.AuthenticateWithToken
at, _, err := c.Authentication.AuthenticateWithToken(*c.Authentication.personalAccessToken)
if err != nil {
return err
}
token = at
}
// handle plain token
if c.Authentication.HasTokenAuth() {
token = *c.Authentication.token
}
// make sure token is not empty
if len(token) == 0 {
return fmt.Errorf("token has no value")
}
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token))
return nil
}
// addOptions adds the parameters in opt as url query parameters to s.
// opt must be a struct whose fields may contain "url" tags.
func addOptions(s string, opt interface{}) (string, error) {
// return url if option is a pointer but is also nil
v := reflect.ValueOf(opt)
if v.Kind() == reflect.Ptr && v.IsNil() {
return s, nil
}
// parse url provided for the options
u, err := url.Parse(s)
if err != nil {
return s, err
}
// add query values to url
qs, err := query.Values(opt)
if err != nil {
return s, err
}
// safely encode url with query values
u.RawQuery = qs.Encode()
return u.String(), nil
}
// NewRequest creates an API request.
// A relative URL can be provided in url,
// in which case it is resolved relative to the baseURL of the Client.
// Relative URLs should always be specified without a preceding slash.
// If specified, the value pointed to by body is JSON encoded and included as the request body.
func (c *Client) NewRequest(method, url string, body interface{}) (*http.Request, error) {
// build url for request
u, err := c.buildURLForRequest(url)
if err != nil {
return nil, err
}
// variable to store http request
var req *http.Request
// handle body based on body type
switch body := body.(type) {
// io.ReadCloser is used for streaming endpoints
case io.ReadCloser:
req, err = http.NewRequest(method, u, body)
if err != nil {
return nil, err
}
// default assumes JSON body
default:
var buf io.ReadWriter
if body != nil {
// buffer to store request body
buf = new(bytes.Buffer)
// encode request body into buffer for request
err := json.NewEncoder(buf).Encode(body)
if err != nil {
return nil, err
}
}
// create new http request from built url and body
req, err = http.NewRequest(method, u, buf)
if err != nil {
return nil, err
}
}
// apply authentication to request if client is set
if c.Authentication.HasAuth() {
err = c.addAuthentication(req)
if err != nil {
return nil, err
}
}
// add the user agent for the request
req.Header.Add("User-Agent", c.UserAgent)
// apply default header for content-type
req.Header.Add("Content-Type", "application/json")
return req, nil
}
// Response represents an Vela API response.
// This wraps the standard http.Response returned from Vela.
type Response struct {
*http.Response
// Values hold basic information pertaining to how to paginate
// through response results
NextPage int
PrevPage int
FirstPage int
LastPage int
}
// newResponse creates a new Response for the provided http.Response.
// r must not be nil.
func newResponse(r *http.Response) *Response {
response := &Response{Response: r}
response.populatePageValues()
return response
}
// populatePageValues parses the HTTP Link response headers and populates the
// various pagination link values in the Response.
func (r *Response) populatePageValues() {
if links, ok := r.Response.Header["Link"]; ok && len(links) > 0 {
for _, link := range strings.Split(links[0], ",") {
segments := strings.Split(strings.TrimSpace(link), ";")
// link must at least have href and rel
if len(segments) < 2 {
continue
}
// ensure href is properly formatted
if !strings.HasPrefix(segments[0], "<") || !strings.HasSuffix(segments[0], ">") {
continue
}
// try to pull out page parameter
url, err := url.Parse(segments[0][1 : len(segments[0])-1])
if err != nil {
continue
}
page := url.Query().Get("page")
if page == "" {
continue
}
for _, segment := range segments[1:] {
switch strings.TrimSpace(segment) {
case `rel="next"`:
r.NextPage, _ = strconv.Atoi(page)
case `rel="prev"`:
r.PrevPage, _ = strconv.Atoi(page)
case `rel="first"`:
r.FirstPage, _ = strconv.Atoi(page)
case `rel="last"`:
r.LastPage, _ = strconv.Atoi(page)
}
}
}
}
}
// Call is a combined function for Client.NewRequest and Client.Do.
//
// Most API methods are quite the same.
// Get the URL, apply options, make a request, and get the response.
// Without adding special headers or something.
// To avoid a big amount of code duplication you can Client.Call.
//
// method is the HTTP method you want to call.
// url is the URL you want to call.
// body is the HTTP body.
// respType is the type that the HTTP response will resolve to.
//
// For more information read https://github.com/google/go-github/issues/234
func (c *Client) Call(method, url string, body, respType interface{}) (*Response, error) {
// create new request from parameters
req, err := c.NewRequest(method, url, body)
if err != nil {
return nil, err
}
// send request with client
resp, err := c.Do(req, respType)
if err != nil {
return resp, err
}
return resp, err
}
// CallWithHeaders is a combined function for Client.NewRequest and Client.Do.
//
// Most API methods are quite the same.
// Get the URL, apply options, make a request, and get the response.
// Without adding special headers or something.
// To avoid a big amount of code duplication you can Client.Call.
//
// method is the HTTP method you want to call.
// url is the URL you want to call.
// body is the HTTP body.
// respType is the type that the HTTP response will resolve to.
// headers is a map of HTTP headers.
//
// For more information read https://github.com/google/go-github/issues/234
func (c *Client) CallWithHeaders(method, url string, body, respType interface{}, headers map[string]string) (*Response, error) {
// create new request from parameters
req, err := c.NewRequest(method, url, body)
if err != nil {
return nil, err
}
// add header key or overwrite key with new values
for k, v := range headers {
req.Header.Set(k, v)
}
// send request with client
resp, err := c.Do(req, respType)
if err != nil {
return resp, err
}
return resp, err
}
// Do sends an API request and returns the API response.
// The API response is JSON decoded and stored in the value pointed to by respType,
// or returned as an error if an API error has occurred.
// If respType implements the io.Writer interface, the raw response body will
// be written to respType, without attempting to first decode it.
func (c *Client) Do(req *http.Request, respType interface{}) (*Response, error) {
// send request with client
resp, err := c.client.Do(req)
if err != nil {
return nil, err
}
// defer closing response body
defer resp.Body.Close()
// wrap response
response := &Response{Response: resp}
// check response for errors
err = CheckResponse(resp)
if err != nil {
// if error is present, we still return the response so the caller
// may inspect it further for debugging and troubleshooting
return response, err
}
// if return object is provided
if respType != nil {
// copy response body if object implements io.Writer interface
if w, ok := respType.(io.Writer); ok {
_, err = io.Copy(w, resp.Body)
if err != nil {
return response, err
}
} else {
// copy all bytes from response body
body, err := io.ReadAll(resp.Body)
// ensure response body is not empty so the user may inspect
// it further for debugging and troubleshooting
resp.Body = io.NopCloser(bytes.NewBuffer(body))
if err != nil {
// if error is present, we still return the response so the caller
// may inspect it further for debugging and troubleshooting
return response, err
}
// check if the content type is YAML
if strings.Contains(resp.Header.Get("Content-Type"), "application/x-yaml") {
// unmarshal the body as YAML to the return object
_ = yaml.Unmarshal(body, respType)
} else {
// unmarshal the body as JSON to the return object
_ = json.Unmarshal(body, respType)
}
}
}
return response, err
}
// CheckResponse checks the API response for errors, and returns them if present.
// A response is considered an error if it has a status code outside the 200 range.
func CheckResponse(r *http.Response) error {
// return no error if successful response code
if c := r.StatusCode; http.StatusOK <= c && c <= 299 {
return nil
}
// custom response type
resp := types.Error{}
// read all bytes from response body
b, _ := io.ReadAll(r.Body)
// unmarshal bytes into custom response type
err := json.Unmarshal(b, &resp)
if err != nil {
//nolint:nilerr // ignore returning nil
return nil
}
return fmt.Errorf(*resp.Message)
}
|
package wallet
import (
"errors"
"github.com/appditto/pippin_nano_wallet/libs/database"
"github.com/appditto/pippin_nano_wallet/libs/database/ent"
"github.com/appditto/pippin_nano_wallet/libs/database/ent/account"
"github.com/appditto/pippin_nano_wallet/libs/utils"
"github.com/go-redis/redis/v9"
)
var ErrWalletLocked = errors.New("wallet is locked")
var ErrBadPassword = errors.New("bad password")
var ErrWalletNotLocked = errors.New("wallet not locked")
// This is encrypted wallets and adhoc accounts
// We store decrypted seeds in redis, while encrypted ones are stored in the database
// Encryption is an optional behavior
// Encrypt entrypoint
// If password is a blank string, we disable encryption for the wallet
// If password is not a blank string, we enable encryption for the wallet
func (w *NanoWallet) EncryptWallet(wallet *ent.Wallet, password string) (bool, error) {
if wallet == nil {
return false, ErrInvalidWallet
} else if !wallet.Encrypted && password == "" {
// Wallet is not encrypted and no password is set
return false, ErrBadPassword
}
var seed string
var err error
if wallet.Encrypted {
// Retrieve decrypted seed from storage, wallet has to be unlocked
seed, err = GetDecryptedKeyFromStorage(wallet, "seed")
if err != nil {
return false, err
}
} else {
// Wallet is not encrypted, so we can just use the seed
seed = wallet.Seed
}
if password == "" {
// Unlock wallet
tx, err := w.DB.Tx(w.Ctx)
if err != nil {
return false, err
}
_, err = tx.Wallet.UpdateOne(wallet).SetEncrypted(false).SetSeed(seed).Save(w.Ctx)
if err != nil {
tx.Rollback()
return false, err
}
adhocAccts, err := tx.Account.Query().Where(account.WalletID(wallet.ID), account.PrivateKeyNotNil()).All(w.Ctx)
if err != nil {
tx.Rollback()
return false, err
}
for _, acct := range adhocAccts {
key, err := GetDecryptedKeyFromStorage(wallet, acct.Address)
if err != nil {
tx.Rollback()
return false, err
}
_, err = tx.Account.UpdateOne(acct).SetPrivateKey(key).Save(w.Ctx)
if err != nil {
tx.Rollback()
return false, err
}
}
err = tx.Commit()
if err != nil {
return false, err
}
wallet.Encrypted = false
wallet.Seed = seed
database.GetRedisDB().Del(wallet.ID.String())
return true, nil
}
// We are updating the password
crypter := utils.NewAesCrypt(password)
encryptedSeed, err := crypter.Encrypt(seed)
if err != nil {
return false, err
}
tx, err := w.DB.Tx(w.Ctx)
if err != nil {
return false, err
}
_, err = tx.Wallet.UpdateOne(wallet).SetEncrypted(true).SetSeed(encryptedSeed).Save(w.Ctx)
if err != nil {
tx.Rollback()
return false, err
}
// Encrypt all adhoc private keys
adhocAccts, err := tx.Account.Query().Where(account.WalletID(wallet.ID), account.PrivateKeyNotNil()).All(w.Ctx)
if err != nil {
tx.Rollback()
return false, err
}
for _, acct := range adhocAccts {
encryptedKey, err := crypter.Encrypt(*acct.PrivateKey)
if err != nil {
return false, err
}
_, err = tx.Account.UpdateOne(acct).SetPrivateKey(encryptedKey).Save(w.Ctx)
if err != nil {
tx.Rollback()
return false, err
}
}
err = tx.Commit()
if err != nil {
return false, err
}
wallet.Encrypted = true
wallet.Seed = encryptedSeed
return true, nil
}
func (w *NanoWallet) LockWallet(wallet *ent.Wallet) error {
if wallet == nil {
return ErrInvalidWallet
} else if !wallet.Encrypted {
return ErrWalletNotLocked
}
database.GetRedisDB().Del(wallet.ID.String())
return nil
}
func (w *NanoWallet) UnlockWallet(wallet *ent.Wallet, password string) (bool, error) {
if wallet == nil {
return false, ErrInvalidWallet
} else if !wallet.Encrypted {
return false, ErrWalletNotLocked
}
crypter := utils.NewAesCrypt(password)
seed, err := crypter.Decrypt(wallet.Seed)
if err != nil {
return false, ErrBadPassword
}
err = SetDecryptedKeyToStorage(wallet, "seed", seed)
if err != nil {
return false, err
}
// Every adhoc account gets decrypted too
adhocAccts, err := w.DB.Account.Query().Where(account.WalletID(wallet.ID), account.PrivateKeyNotNil()).All(w.Ctx)
if err != nil {
return false, err
}
for _, acct := range adhocAccts {
key, err := crypter.Decrypt(*acct.PrivateKey)
if err != nil {
return false, err
}
err = SetDecryptedKeyToStorage(wallet, acct.Address, key)
if err != nil {
return false, err
}
}
return true, nil
}
// Retrieve decrypted key from storage if it exists
func GetDecryptedKeyFromStorage(wallet *ent.Wallet, key string) (string, error) {
if wallet == nil {
return "", ErrInvalidWallet
} else if !wallet.Encrypted {
return wallet.Seed, nil
}
key, err := database.GetRedisDB().Hget(wallet.ID.String(), key)
if err == redis.Nil {
return "", ErrWalletLocked
} else if err != nil {
// Unknown error
return "", err
}
return key, nil
}
// Set decrypted key to storage
func SetDecryptedKeyToStorage(wallet *ent.Wallet, key string, seed string) error {
if wallet == nil {
return ErrInvalidWallet
} else if !wallet.Encrypted {
return ErrWalletNotLocked
}
return database.GetRedisDB().Hset(wallet.ID.String(), key, seed)
}
|
package gofile
type GetServer struct {
Status string `json:"status"`
Data struct {
Server string `json:"server"`
} `json:"data"`
}
type Upload struct {
Status string `json:"status"`
Data struct {
DownloadPage string `json:"downloadPage"`
Code string `json:"code"`
ParentFolder string `json:"parentFolder"`
FileID string `json:"fileId"`
FileName string `json:"fileName"`
Md5 string `json:"md5"`
DirectLink string `json:"directLink"`
Info string `json:"info"`
} `json:"data"`
}
|
package lambdacalculus
import (
"testing"
)
var one = Succ(Zero)
var two = Succ(one)
var three = Succ(two)
var four = Succ(three)
func TestZero(t *testing.T) {
res := Zero(f)(x)
if res != 0 {
t.Errorf("Zero does not return 0")
}
}
func TestSucc(t *testing.T) {
res := Succ(Zero)(f)(x)
if res != 1 {
t.Errorf("Successor of Zero should be 1")
}
}
func TestSuccSucc(t *testing.T) {
res := Succ(Succ(Zero))(f)(x)
if res != 2 {
t.Errorf("Successor of successor of Zero should be 2")
}
}
func TestSuccSuccSucc(t *testing.T) {
res := Succ(Succ(Succ(Zero)))(f)(x)
if res != 3 {
t.Errorf("Successor of successor of successor of Zero should be 3")
}
}
func TestSum_0(t *testing.T) {
res := one(Succ)(three).(ChurchNumber)(f)(x)
if res != 4 {
t.Errorf("Sum of one + three should be 4 instead is %v", res)
}
}
func TestSum_0_0(t *testing.T) {
res := Sum(Zero)(Zero)(f)(x)
if res != 0 {
t.Errorf("Sum of zero + zero should be 0 instead is %v", res)
}
}
func TestSum_0_3(t *testing.T) {
res := Sum(Zero)(three)(f)(x)
if res != 3 {
t.Errorf("Sum of zero + three should be 3 instead is %v", res)
}
}
func TestSum_4_0(t *testing.T) {
res := Sum(four)(Zero)(f)(x)
if res != 4 {
t.Errorf("Sum of four + zero should be 4 instead is %v", res)
}
}
func TestSum_2_4(t *testing.T) {
res := Sum(two)(four)(f)(x)
if res != 6 {
t.Errorf("Sum of two + four should be 6 instead is %v", res)
}
}
func TestSum_4_2(t *testing.T) {
res := Sum(four)(two)(f)(x)
if res != 6 {
t.Errorf("Sum of four + two should be 6 instead is %v", res)
}
}
func TestSum_Sum_3_4_Sum_1_2(t *testing.T) {
res := Sum(Sum(three)(four))(Sum(one)(two))(f)(x)
if res != 10 {
t.Errorf("Sum of sum(3)(4) + sum(1)(2) should be 10 instead is %v", res)
}
}
func TestMult_4_2(t *testing.T) {
res := Mult(four)(two)(f)(x)
if res != 8 {
t.Errorf("Mult of four * two should be 8 instead is %v", res)
}
}
func TestMult_4_0(t *testing.T) {
res := Mult(four)(Zero)(f)(x)
if res != 0 {
t.Errorf("Mult of four * zero should be 0 instead is %v", res)
}
}
func TestMult_0_4(t *testing.T) {
res := Mult(Zero)(four)(f)(x)
if res != 0 {
t.Errorf("Mult of zero * four should be 0 instead is %v", res)
}
}
func TestMult_0_0(t *testing.T) {
res := Mult(Zero)(Zero)(f)(x)
if res != 0 {
t.Errorf("Mult of zero * zero should be 0 instead is %v", res)
}
}
func TestMult_Sum_1_2_Mult_2_3(t *testing.T) {
res := Mult(Sum(one)(two))(Mult(two)(three))(f)(x)
if res != 18 {
t.Errorf("Mult of sum(1)(2) + mult(2)(3) should be 18 instead is %v", res)
}
}
func TestPow_0_4(t *testing.T) {
res := Pow(Zero)(four)(f)(x)
if res != 1 {
t.Errorf("Power of four to zero should be 1 instead is %v", res)
}
}
func TestPow_4_0(t *testing.T) {
res := Pow(four)(Zero)(f)(x)
if res != 0 {
t.Errorf("Power of zero to four should be 0 instead is %v", res)
}
}
func TestPow_2_3(t *testing.T) {
res := Pow(two)(three)(f)(x)
if res != 9 {
t.Errorf("Power of three to two should be 9 instead is %v", res)
}
}
func TestPow_3_4(t *testing.T) {
res := Pow(four)(three)(f)(x)
if res != 81 {
t.Errorf("Power of three to four should be 81 instead is %v", res)
}
}
func TestIsZero_0(t *testing.T) {
res := IsZero(Zero)(true)(false)
if !res.(bool) {
t.Errorf("IsZero of zero should be true instead is %v", res)
}
}
func TestIsZero_1(t *testing.T) {
res := IsZero(one)(true)(false)
if res.(bool) {
t.Errorf("IsZero of one should be false instead is %v", res)
}
}
func TestNextPair(t *testing.T) {
p00 := Tuple2Struct(Zero)(Zero)
resFirst := nextPair(p00)(First).(ChurchNumber)(f)(x)
if resFirst != 0 {
t.Errorf("The first element of the pair should be 0 instead is %v", resFirst)
}
resSecond := nextPair(p00)(Second).(ChurchNumber)(f)(x)
if resSecond != 1 {
t.Errorf("The first element of the pair should be 1 instead is %v", resSecond)
}
}
func TestNextPairRepeated(t *testing.T) {
p00 := Tuple2Struct(Zero)(Zero)
nextP := nextPair(nextPair(nextPair(nextPair(p00))))
resFirst := nextP(First).(ChurchNumber)(f)(x)
if resFirst != 3 {
t.Errorf("The first element of the pair should be 3 instead is %v", resFirst)
}
resSecond := nextP(Second).(ChurchNumber)(f)(x)
if resSecond != 4 {
t.Errorf("The first element of the pair should be 4 instead is %v", resSecond)
}
}
func TestPrev(t *testing.T) {
res := Prev(three)(f)(x)
if res != 2 {
t.Errorf("Predecessor of three should be 2")
}
}
func TestPrevPrev(t *testing.T) {
res := Prev(Prev(three))(f)(x)
if res != 1 {
t.Errorf("Predecessor of Predecessor of three should be 1")
}
}
func TestPrev0(t *testing.T) {
res := Prev(Prev(Prev(Prev(three))))(f)(x)
if res != 0 {
t.Errorf("Predecessor can not go below zero")
}
}
func TestSub_4_3(t *testing.T) {
res := Sub(four)(three)(f)(x)
if res != 1 {
t.Errorf("Suf of four - four should be 1 instead is %v", res)
}
}
func TestSub_2_2(t *testing.T) {
res := Sub(two)(two)(f)(x)
if res != 0 {
t.Errorf("Suf of two - two should be 0 instead is %v", res)
}
}
func TestSub_4_0(t *testing.T) {
res := Sub(four)(Zero)(f)(x)
if res != 4 {
t.Errorf("Suf of four - zero should be 4 instead is %v", res)
}
}
|
package words
import (
"strings"
"regexp"
"github.com/deepdeeppink/tgbot/cfg"
"github.com/deepdeeppink/tgbot/state"
api "gopkg.in/telegram-bot-api.v4"
)
type Phrase struct {
SpellName string
State state.State
Message *api.Message
}
func NewPhrase(m *api.Message, userLevel int) *Phrase {
var spellname string
line := m.Text
config := cfg.GetConfig()
s := make(state.State)
line = normalize(line)
re := regexp.MustCompile(`\s+`)
for _, word := range re.Split(line, -1) {
if config.IsSpell(word) {
if config.Spells[word].Level >= userLevel {
spellname = word
}
} else if name, found := config.GetParamName(word); found {
s[name] = word
}
}
return &Phrase{
SpellName: spellname,
State: s,
Message: m,
}
}
func (p *Phrase) Empty() bool {
return len(p.SpellName) == 0 && p.State.Empty()
}
func (p *Phrase) NoSpell() bool {
return len(p.SpellName) == 0
}
func normalize(line string) string {
config := cfg.GetConfig()
line = strings.ToLower(line)
line = wraps(line)
line = strings.Replace(line, "/", " ", -1) // перевод команд телеграма в обычные слова
for alias, word := range config.Aliases {
line = strings.Replace(line, wraps(alias), wraps(word), -1)
}
return line
}
func wraps(s string) string {
return " " + s + " "
}
|
/*
* Copyright 2018-present Open Networking Foundation
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package model
import (
"context"
"errors"
"fmt"
"reflect"
"strings"
"github.com/gogo/protobuf/proto"
"github.com/opencord/voltha-lib-go/v4/pkg/db"
"github.com/opencord/voltha-lib-go/v4/pkg/log"
)
// RequestTimestamp attribute used to store a timestamp in the context object
const RequestTimestamp contextKey = "request-timestamp"
type contextKey string
// Path holds the information for a specific location within the data model
type Path struct {
kvStore *db.Backend
path string
}
// NewDBPath returns a path to the default db location
func NewDBPath(kvStore *db.Backend) *Path {
return &Path{kvStore: kvStore}
}
// SubPath returns a path which points to a more specific db location
func (p *Path) SubPath(path string) *Path {
path = strings.TrimRight(strings.TrimLeft(path, "/"), "/")
return &Path{
kvStore: p.kvStore,
path: p.path + path + "/",
}
}
// Proxy contains all the information needed to reference a specific resource within the kv
type Proxy Path
// Proxy returns a new proxy which references the specified resource
func (p *Path) Proxy(resource string) *Proxy {
resource = strings.TrimRight(strings.TrimLeft(resource, "/"), "/")
return &Proxy{
kvStore: p.kvStore,
path: p.path + resource + "/",
}
}
// List will retrieve information from the data model at the proxy's path location, and write it to the target slice
// target must be a type of the form *[]<proto.Message Type> For example: *[]*voltha.Device
func (p *Proxy) List(ctx context.Context, target interface{}) error {
logger.Debugw(ctx, "proxy-list", log.Fields{
"path": p.path,
})
// verify type of target is *[]*<type>
pointerType := reflect.TypeOf(target) // *[]*<type>
if pointerType.Kind() != reflect.Ptr {
return errors.New("target is not of type *[]*<type>")
}
sliceType := pointerType.Elem() // []*type
if sliceType.Kind() != reflect.Slice {
return errors.New("target is not of type *[]*<type>")
}
elemType := sliceType.Elem() // *type
if sliceType.Implements(reflect.TypeOf((*proto.Message)(nil)).Elem()) {
return errors.New("target slice does not contain elements of type proto.Message")
}
dataType := elemType.Elem() // type
blobs, err := p.kvStore.List(ctx, p.path)
if err != nil {
return fmt.Errorf("failed to retrieve %s from kvstore: %s", p.path, err)
}
logger.Debugw(ctx, "parsing-data-blobs", log.Fields{
"path": p.path,
"size": len(blobs),
})
ret := reflect.MakeSlice(sliceType, len(blobs), len(blobs))
i := 0
for _, blob := range blobs {
data := reflect.New(dataType)
if err := proto.Unmarshal(blob.Value.([]byte), data.Interface().(proto.Message)); err != nil {
return fmt.Errorf("failed to unmarshal %s: %s", blob.Key, err)
}
ret.Index(i).Set(data)
i++
}
reflect.ValueOf(target).Elem().Set(ret)
return nil
}
// Get will retrieve information from the data model at the proxy's path location, and write it to target
func (p *Proxy) Get(ctx context.Context, id string, target proto.Message) (bool, error) {
completePath := p.path + id
logger.Debugw(ctx, "proxy-get", log.Fields{
"path": completePath,
})
blob, err := p.kvStore.Get(ctx, completePath)
if err != nil {
return false, fmt.Errorf("failed to retrieve %s from kvstore: %s", completePath, err)
} else if blob == nil {
return false, nil // this blob does not exist
}
logger.Debugw(ctx, "parsing-data-blobs", log.Fields{
"path": completePath,
})
if err := proto.Unmarshal(blob.Value.([]byte), target); err != nil {
return false, fmt.Errorf("failed to unmarshal %s: %s", blob.Key, err)
}
return true, nil
}
// Set will add new or update existing entry at the proxy's path location
func (p *Proxy) Set(ctx context.Context, id string, data proto.Message) error {
completePath := p.path + id
logger.Debugw(ctx, "proxy-add", log.Fields{
"path": completePath,
})
blob, err := proto.Marshal(data)
if err != nil {
return fmt.Errorf("unable to save to kvStore, error marshalling: %s", err)
}
if err := p.kvStore.Put(ctx, completePath, blob); err != nil {
return fmt.Errorf("unable to write to kvStore: %s", err)
}
return nil
}
// Remove will delete an entry at the proxy's path location
func (p *Proxy) Remove(ctx context.Context, id string) error {
completePath := p.path + id
logger.Debugw(ctx, "proxy-remove", log.Fields{
"path": completePath,
})
if err := p.kvStore.Delete(ctx, completePath); err != nil {
return fmt.Errorf("unable to delete %s in kvStore: %s", completePath, err)
}
return nil
}
|
package filter
// Labeled is used to access labels of an object
type Labeled interface {
GetLabels() map[string]string
}
// Temporary objects
type staticlabeled struct {
labels map[string]string
}
func (s staticlabeled) GetLabels() map[string]string {
return s.labels
}
// GetLabeled returns a Labeled object that returns the passed map
func GetLabeled(m map[string]string) Labeled {
if m == nil {
return staticlabeled{}
}
copy := make(map[string]string)
for k, v := range m {
copy[k] = v
}
return staticlabeled{labels: copy}
}
|
package main
import (
"context"
"fmt"
"strings"
"sync"
pb "github.com/semi-technologies/contextionary/contextionary"
core "github.com/semi-technologies/contextionary/contextionary/core"
schema "github.com/semi-technologies/contextionary/contextionary/schema"
"github.com/semi-technologies/contextionary/extensions"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func (s *server) AddExtension(ctx context.Context, params *pb.ExtensionInput) (*pb.AddExtensionResult, error) {
err := s.extensionStorer.Put(ctx, params.Concept, extensions.ExtensionInput{
Definition: strings.ToLower(params.Definition),
Weight: params.Weight,
})
if err != nil {
return nil, GrpcErrFromTyped(err)
}
return &pb.AddExtensionResult{}, nil
}
func (s *server) Meta(ctx context.Context, params *pb.MetaParams) (*pb.MetaOverview, error) {
return &pb.MetaOverview{
Version: Version,
WordCount: int64(s.combinedContextionary.GetNumberOfItems()),
}, nil
}
func (s *server) IsWordPresent(ctx context.Context, word *pb.Word) (*pb.WordPresent, error) {
asExtension, err := s.extensionLookerUpper.Lookup(word.Word)
if err != nil {
return nil, GrpcErrFromTyped(err)
}
if asExtension != nil {
return &pb.WordPresent{Present: true}, nil // TODO: add note about extension
}
i := s.combinedContextionary.WordToItemIndex(word.Word)
return &pb.WordPresent{Present: i.IsPresent()}, nil
}
func (s *server) IsWordStopword(ctx context.Context, word *pb.Word) (*pb.WordStopword, error) {
sw := s.stopwordDetector.IsStopWord(word.Word)
return &pb.WordStopword{Stopword: sw}, nil
}
func (s *server) SchemaSearch(ctx context.Context, params *pb.SchemaSearchParams) (*pb.SchemaSearchResults, error) {
s.logger.WithField("params", params).Info()
c := schema.New(s.combinedContextionary)
res, err := c.SchemaSearch(params)
s.logger.
WithField("res", res).
WithField("err", err).Info()
return res, GrpcErrFromTyped(err)
}
func (s *server) SafeGetSimilarWordsWithCertainty(ctx context.Context, params *pb.SimilarWordsParams) (*pb.SimilarWordsResults, error) {
words := s.combinedContextionary.SafeGetSimilarWordsWithCertainty(params.Word, params.Certainty)
return &pb.SimilarWordsResults{
Words: pbWordsFromStrings(words),
}, nil
}
func pbWordsFromStrings(input []string) []*pb.Word {
output := make([]*pb.Word, len(input), len(input))
for i, word := range input {
output[i] = &pb.Word{Word: word}
}
return output
}
func (s *server) MultiVectorForWord(ctx context.Context, params *pb.WordList) (*pb.VectorList, error) {
lock := &sync.Mutex{}
out := make([]*pb.Vector, len(params.Words))
var errors []error
concurrent := s.config.MaximumBatchSize
for i := 0; i < len(params.Words); i += concurrent {
end := i + concurrent
if end > len(params.Words) {
end = len(params.Words)
}
batch := params.Words[i:end]
var wg = &sync.WaitGroup{}
for j, elem := range batch {
wg.Add(1)
go func(i, j int, word string) {
defer wg.Done()
word = strings.ToLower(word)
vec, err := s.vectorizer.VectorForWord(word)
if err != nil {
lock.Lock()
errors = append(errors, err)
lock.Unlock()
return
}
if vec == nil {
lock.Lock()
out[i+j] = &pb.Vector{}
lock.Unlock()
return
}
lock.Lock()
out[i+j] = vectorToProto(vec.vector)
lock.Unlock()
}(i, j, elem.Word)
}
wg.Wait()
}
if len(errors) > 0 {
return nil, joinErrors(errors)
}
return &pb.VectorList{
Vectors: out,
}, nil
}
func joinErrors(in []error) error {
msgs := make([]string, len(in))
for i, err := range in {
msgs[i] = fmt.Sprintf("at pos %d: %v", i, err)
}
return fmt.Errorf("%s", strings.Join(msgs, ", "))
}
func (s *server) VectorForWord(ctx context.Context, params *pb.Word) (*pb.Vector, error) {
wo, err := s.vectorizer.VectorForWord(params.Word)
if err != nil {
return nil, GrpcErrFromTyped(err)
}
if wo == nil {
return nil, status.Error(codes.NotFound, fmt.Sprintf("word %s is not in the contextionary", params.Word))
}
return vectorToProto(wo.vector), nil
}
func (s *server) VectorForCorpi(ctx context.Context, params *pb.Corpi) (*pb.Vector, error) {
overrides := assembleOverrideMap(params.Overrides)
vector, err := s.vectorizer.Corpi(params.Corpi, overrides)
if err != nil {
if err == ErrNoUsableWords {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
return nil, status.Error(codes.Internal, err.Error())
}
return vectorToProto(vector), nil
}
func assembleOverrideMap(in []*pb.Override) map[string]string {
if in == nil || len(in) == 0 {
return nil
}
out := map[string]string{}
for _, or := range in {
out[or.Word] = or.Expression
}
return out
}
func (s *server) vectorForWords(words []string) (*core.Vector, error) {
var vectors []core.Vector
for _, word := range words {
vector, err := s.vectorForWord(word)
if err != nil {
return nil, GrpcErrFromTyped(err)
}
if vector == nil {
continue
}
vectors = append(vectors, *vector)
}
if len(vectors) == 0 {
return nil, nil
}
return core.ComputeCentroid(vectors)
}
func (s *server) vectorForWord(word string) (*core.Vector, error) {
wi := s.combinedContextionary.WordToItemIndex(word)
if s.stopwordDetector.IsStopWord(word) {
return nil, nil
}
if !wi.IsPresent() {
return nil, nil
}
res, err := s.combinedContextionary.GetVectorForItemIndex(wi)
return res, GrpcErrFromTyped(err)
}
func vectorToProto(in *core.Vector) *pb.Vector {
a := in.ToArray()
output := make([]*pb.VectorEntry, len(a), len(a))
for i, entry := range a {
output[i] = &pb.VectorEntry{Entry: entry}
}
source := make([]*pb.InputElement, len(in.Source))
for i, s := range in.Source {
source[i] = &pb.InputElement{
Concept: s.Concept,
Occurrence: s.Occurrence,
Weight: float32(s.Weight),
}
}
return &pb.Vector{Entries: output, Source: source}
}
func vectorFromProto(in *pb.Vector) core.Vector {
asFloats := make([]float32, len(in.Entries), len(in.Entries))
for i, entry := range in.Entries {
asFloats[i] = entry.Entry
}
return core.NewVector(asFloats)
}
func (s *server) NearestWordsByVector(ctx context.Context, params *pb.VectorNNParams) (*pb.NearestWords, error) {
ii, dist, err := s.combinedContextionary.GetNnsByVector(vectorFromProto(params.Vector), int(params.N), int(params.K))
if err != nil {
return nil, GrpcErrFromTyped(err)
}
words, _, err := s.itemIndexesToWordsAndOccs(ii)
if err != nil {
return nil, GrpcErrFromTyped(err)
}
return &pb.NearestWords{
Distances: dist,
Words: words,
}, nil
}
func (s *server) MultiNearestWordsByVector(ctx context.Context, params *pb.VectorNNParamsList) (*pb.NearestWordsList, error) {
lock := &sync.Mutex{}
out := make([]*pb.NearestWords, len(params.Params))
var errors []error
concurrent := s.config.MaximumBatchSize
requiredMinOcc := s.combinedContextionary.OccurrencePercentile(s.config.NeighborOccurrenceIgnorePercentile)
for i := 0; i < len(params.Params); i += concurrent {
end := i + concurrent
if end > len(params.Params) {
end = len(params.Params)
}
batch := params.Params[i:end]
var wg = &sync.WaitGroup{}
for j, elem := range batch {
wg.Add(1)
go func(i, j int, elem *pb.VectorNNParams) {
defer wg.Done()
ii, dist, err := s.combinedContextionary.GetNnsByVector(vectorFromProto(elem.Vector), int(elem.N)*5, int(elem.K)) // multiply by 5 to account for filtering
if err != nil {
lock.Lock()
errors = append(errors, GrpcErrFromTyped(err))
lock.Unlock()
return
}
words, occs, err := s.itemIndexesToWordsAndOccs(ii)
if err != nil {
lock.Lock()
errors = append(errors, GrpcErrFromTyped(err))
lock.Unlock()
return
}
filteredWords := make([]string, elem.N) // can never be lnoger than what the user asked for
filteredI := 0
for i := range words {
if filteredI >= len(filteredWords) {
break
}
if occs[i] >= requiredMinOcc {
filteredWords[filteredI] = words[i]
filteredI++
}
}
vectors, err := s.itemIndexesToVectors(ii)
if err != nil {
lock.Lock()
errors = append(errors, GrpcErrFromTyped(err))
lock.Unlock()
return
}
out[i+j] = &pb.NearestWords{
Distances: dist,
Words: filteredWords[:filteredI],
Vectors: vectors,
}
}(i, j, elem)
}
wg.Wait()
}
if len(errors) > 0 {
return nil, joinErrors(errors)
}
return &pb.NearestWordsList{
Words: out,
}, nil
}
func (s *server) itemIndexesToWordsAndOccs(in []core.ItemIndex) ([]string, []uint64, error) {
words := make([]string, len(in), len(in))
occs := make([]uint64, len(in), len(in))
for i, itemIndex := range in {
w, err := s.combinedContextionary.ItemIndexToWord(itemIndex)
if err != nil {
return nil, nil, GrpcErrFromTyped(err)
}
words[i] = w
occ, err := s.combinedContextionary.ItemIndexToOccurrence(itemIndex)
if err != nil {
return nil, nil, GrpcErrFromTyped(err)
}
occs[i] = occ
}
return words, occs, nil
}
func (s *server) itemIndexesToVectors(in []core.ItemIndex) (*pb.VectorList, error) {
out := &pb.VectorList{
Vectors: make([]*pb.Vector, len(in)),
}
for i, itemIndex := range in {
vector, err := s.combinedContextionary.GetVectorForItemIndex(itemIndex)
if err != nil {
return nil, GrpcErrFromTyped(err)
}
out.Vectors[i] = vectorToProto(vector)
}
return out, nil
}
|
package logging
import (
"encoding/json"
"net"
"os"
"sync"
"testing"
"time"
. "github.com/anthonybishopric/gotcha"
"github.com/sirupsen/logrus"
)
func TestLoggingCanMergeFields(t *testing.T) {
fields1 := logrus.Fields{
"foo": "a",
"bar": "b",
}
fields2 := logrus.Fields{
"foo": "z",
"baz": "q",
}
res := NewLogger(nil).WithFields(fields1).WithFields(fields2).Data
Assert(t).AreEqual("z", res["foo"], "Should have taken new value's foo")
Assert(t).AreEqual("b", res["bar"], "Should have taken old value's bar")
Assert(t).AreEqual("q", res["baz"], "Should have taken old value's baz")
}
func TestSubLoggerMergesFields(t *testing.T) {
logger := NewLogger(logrus.Fields{
"foo": "a",
"bar": "b",
})
sub := logger.SubLogger(logrus.Fields{
"foo": "z",
"baz": "q",
})
res := sub.NoFields().Data
Assert(t).AreEqual("z", res["foo"], "Should have taken new value's foo")
Assert(t).AreEqual("b", res["bar"], "Should have taken old value's bar")
Assert(t).AreEqual("q", res["baz"], "Should have taken old value's baz")
Assert(t).AreEqual("a", logger.NoFields().Data["foo"], "Should not have overwritten original")
}
func TestLoggingMergeDoesNotModifyOriginalMap(t *testing.T) {
fields1 := logrus.Fields{
"foo": "a",
}
fields2 := logrus.Fields{
"foo": "b",
}
logger := NewLogger(nil)
logger.WithFields(fields1).WithFields(fields2) // merges fields
logger.WithFields(fields1)
Assert(t).AreEqual("a", fields1["foo"], "Should not have modified the original fields")
}
func TestWithFieldsCombinesBaseFieldsAndGiven(t *testing.T) {
logger := NewLogger(logrus.Fields{
"foo": "a",
})
entry := logger.WithFields(logrus.Fields{
"baz": "c",
})
Assert(t).AreEqual("a", entry.Data["foo"], "should have kept foo")
Assert(t).AreEqual("c", entry.Data["baz"], "should have merged baz")
}
type fakeStackError struct {
lineNumber int
filename string
function string
}
func (f *fakeStackError) LineNumber() int {
return f.lineNumber
}
func (f *fakeStackError) Filename() string {
return f.filename
}
func (f *fakeStackError) Function() string {
return f.function
}
func (f *fakeStackError) Error() string {
return "error message"
}
func TestWithError(t *testing.T) {
err := &fakeStackError{
lineNumber: 45,
filename: "foo.go",
function: "foo.New",
}
logger := NewLogger(logrus.Fields{})
entry := logger.WithError(err)
Assert(t).AreEqual(entry.Data["line_number"], 45, "bad line number")
Assert(t).AreEqual(entry.Data["filename"], "foo.go", "bad filename")
Assert(t).AreEqual(entry.Data["function"], "foo.New", "bad function")
Assert(t).AreEqual(45, logger.WithField("a", 1).WithError(err).Data["line_number"], "no error when chained second")
Assert(t).AreEqual(45, logger.WithError(err).WithField("a", 1).Data["line_number"], "no error when chained first")
Assert(t).AreEqual(45, logger.WithErrorAndFields(err, logrus.Fields{"a": 1}).Data["line_number"], "no error with combined call")
}
func TestAddSocketHook(t *testing.T) {
logger := NewLogger(logrus.Fields{})
socket_location := "test_socket.sock"
os.Remove(socket_location)
l, err := net.Listen("unix", socket_location)
Assert(t).IsNil(err, "Got an unexpected error when trying to listen to socket")
defer l.Close()
// make goroutine to listen to socket and write what it gets to channel
out := make(chan []byte)
go func() {
fd, err := l.Accept()
Assert(t).IsNil(err, "Got an unexpected error when trying to call accept() on socket")
buf := make([]byte, 1024)
n, err := fd.Read(buf[:])
Assert(t).IsNil(err, "Got an unexpected error trying to read from socket connection")
out <- buf[:n]
}()
// wait for socket to be set up
time.Sleep(1 * time.Millisecond)
// Add socket hook and log something
err = logger.AddHook(OutSocket, socket_location)
Assert(t).IsNil(err, "Got an unexpected error when adding a socket logging hook")
logger.WithFields(logrus.Fields{}).Error("some message")
// Just to make extracting json fields easy
type SocketHookLogMessage struct {
Message string `json:"msg"`
}
select {
case logMessage := <-out:
messageStruct := SocketHookLogMessage{}
err := json.Unmarshal(logMessage, &messageStruct)
Assert(t).IsNil(err, "Got an unexpected error when unmarshaling the JSON log message")
Assert(t).AreEqual(messageStruct.Message, "some message", "Did not get the expected log message on the socket")
break
case <-time.After(5 * time.Second):
Assert(t).Fail("Didn't get a message through the socket during the timeout period")
}
}
func TestAddUnrecognizedHook(t *testing.T) {
logger := NewLogger(logrus.Fields{})
err := logger.AddHook("unrecognized_type", "some_destination")
Assert(t).IsNotNil(err, "Expected an error for adding an unrecognized hook output type")
}
// TestConcurrent checks that one logger can be used concurrently without panics (e.g.,
// from concurrent map accesses) or data races (if the race detector is enabled), which
// will fail the test instead of explicit t.Error() calls.
func TestConcurrent(t *testing.T) {
logger := NewLogger(nil)
const n = 100
var wg sync.WaitGroup
wg.Add(n)
for i := 0; i < n; i++ {
i := i
go func() {
logger.Infoln(i)
wg.Done()
}()
}
wg.Wait()
}
|
package cmd
import (
"github.com/go-openapi/loads"
)
func load(filename string) (*loads.Document, error) {
d, err := loads.JSONSpec(filename)
if err != nil {
return nil, err
}
return d, nil
}
|
package tasks_test
import (
"testing"
"github.com/stretchr/testify/assert"
"go.ua-ecm.com/chaki/tasks"
)
func TestSanitize(t *testing.T) {
assert := assert.New(t)
dirty := &tasks.Config{
DBConnections: map[string]tasks.DBConnection{},
Tasks: map[string]tasks.Task{
"foo": tasks.Task{
Title: "Foo",
DB: &tasks.DBTask{
Connection: "bar",
},
},
},
}
clean := dirty.Sanitize()
assert.Nil(clean.DBConnections)
assert.NotNil(clean.Tasks)
task, ok := clean.Tasks["foo"]
assert.True(ok)
assert.Nil(task.DB)
}
|
package collectors
import (
"bufio"
"os"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
// "log"
"encoding/json"
"errors"
"strconv"
"strings"
"time"
)
const IOSTATFILE = `/proc/diskstats`
const IOSTAT_SYSFSPATH = `/sys/block`
type IOstatCollectorConfig struct {
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
}
type IOstatCollectorEntry struct {
lastValues map[string]int64
tags map[string]string
}
type IOstatCollector struct {
metricCollector
matches map[string]int
config IOstatCollectorConfig
devices map[string]IOstatCollectorEntry
}
func (m *IOstatCollector) Init(config json.RawMessage) error {
var err error
m.name = "IOstatCollector"
m.parallel = true
m.meta = map[string]string{"source": m.name, "group": "Disk"}
m.setup()
if len(config) > 0 {
err = json.Unmarshal(config, &m.config)
if err != nil {
return err
}
}
// https://www.kernel.org/doc/html/latest/admin-guide/iostats.html
matches := map[string]int{
"io_reads": 3,
"io_reads_merged": 4,
"io_read_sectors": 5,
"io_read_ms": 6,
"io_writes": 7,
"io_writes_merged": 8,
"io_writes_sectors": 9,
"io_writes_ms": 10,
"io_ioops": 11,
"io_ioops_ms": 12,
"io_ioops_weighted_ms": 13,
"io_discards": 14,
"io_discards_merged": 15,
"io_discards_sectors": 16,
"io_discards_ms": 17,
"io_flushes": 18,
"io_flushes_ms": 19,
}
m.devices = make(map[string]IOstatCollectorEntry)
m.matches = make(map[string]int)
for k, v := range matches {
if _, skip := stringArrayContains(m.config.ExcludeMetrics, k); !skip {
m.matches[k] = v
}
}
if len(m.matches) == 0 {
return errors.New("no metrics to collect")
}
file, err := os.Open(string(IOSTATFILE))
if err != nil {
cclog.ComponentError(m.name, err.Error())
return err
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
linefields := strings.Fields(line)
device := linefields[2]
if strings.Contains(device, "loop") {
continue
}
values := make(map[string]int64)
for m := range m.matches {
values[m] = 0
}
m.devices[device] = IOstatCollectorEntry{
tags: map[string]string{
"device": linefields[2],
"type": "node",
},
lastValues: values,
}
}
m.init = true
return err
}
func (m *IOstatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
file, err := os.Open(string(IOSTATFILE))
if err != nil {
cclog.ComponentError(m.name, err.Error())
return
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
if len(line) == 0 {
continue
}
linefields := strings.Fields(line)
device := linefields[2]
if strings.Contains(device, "loop") {
continue
}
if _, ok := m.devices[device]; !ok {
continue
}
entry := m.devices[device]
for name, idx := range m.matches {
if idx < len(linefields) {
x, err := strconv.ParseInt(linefields[idx], 0, 64)
if err == nil {
diff := x - entry.lastValues[name]
y, err := lp.New(name, entry.tags, m.meta, map[string]interface{}{"value": int(diff)}, time.Now())
if err == nil {
output <- y
}
}
entry.lastValues[name] = x
}
}
m.devices[device] = entry
}
}
func (m *IOstatCollector) Close() {
m.init = false
}
|
package main
import (
"fmt"
merge_trees "github.com/NGunthor/go_test/pkg/leetcode/merge-trees"
)
func main() {
tree1 := merge_trees.NewBinaryTree(1,2,3,4)
tree2 := merge_trees.NewBinaryTree(1,2,3,4)
result := merge_trees.NewTrees(tree1.GetHead(), tree2.GetHead()).MergeTrees()
fmt.Println(result)
}
|
package models
type Preference struct {
ClientID string `db:"client_id"`
Count int `db:"count"`
KindID string `db:"kind_id"`
Email bool
KindDescription string `db:"kind_description"`
SourceDescription string `db:"source_description"`
}
|
package main
import (
"context"
"flag"
"fmt"
"google.golang.org/grpc"
"time"
pb "ziyun/opstring-service/pb"
r "ziyun/opstring-service/svc/client/grpc"
)
func main() {
flag.Parse()
ctx := context.Background()
conn, err := grpc.Dial("localhost:5040", grpc.WithInsecure(), grpc.WithTimeout(1*time.Second))
if err != nil {
fmt.Println("gRPC dial err:", err)
}
defer conn.Close()
strCli, _ := r.New(conn)
result, err := strCli.Health(ctx, &pb.HealthRequest{})
if err != nil {
fmt.Println("Check error", err.Error())
}
fmt.Println("result=", result)
}
|
package primitives
type Rectf struct {
Min, Max [2]float32
}
func MakeRectf(x1, y1, x2, y2 float32) Rectf {
if x1 > x2 {
x1, x2 = x2, x1
}
if y1 > y2 {
y1, y2 = y2, y1
}
return Rectf{
Min: [2]float32{x1, y1},
Max: [2]float32{x2, y2},
}
}
|
package happening
import (
"fmt"
"io/ioutil"
"log"
"os"
"os/signal"
"strconv"
"syscall"
)
func createPidFile(pidfile string) error {
if pidString, err := ioutil.ReadFile(pidfile); err == nil {
pid, err := strconv.Atoi(string(pidString))
if err == nil {
if _, err := os.Stat(fmt.Sprintf("/proc/%d/", pid)); err == nil {
return fmt.Errorf("pid file found, ensure happening is not running or delete %s", pidfile)
}
}
}
file, err := os.Create(pidfile)
if err != nil {
log.Println(err)
return err
}
defer file.Close()
_, err = fmt.Fprintf(file, "%d", os.Getpid())
return err
}
func removePidFile(pidfile string) {
if err := os.Remove(pidfile); err != nil {
log.Printf("Error removing %s: %s", pidfile, err)
}
}
func Daemon(config *Config) error {
if err := createPidFile(config.Pidfile); err != nil {
log.Fatal(err)
}
defer removePidFile(config.Pidfile)
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, os.Kill, os.Signal(syscall.SIGTERM))
go func() {
sig := <-c
log.Printf("Received signal '%v', exiting\n", sig)
removePidFile(config.Pidfile)
os.Exit(0)
}()
return ListenAndAcknowledge()
}
|
package main
import (
"fmt"
"sync"
)
//long lived struct that should always be its own goroutine, it is initialized as the entry point for new connections and, when
//pairing is successful launches a game controller as a goroutine and sets the player connection to send packets there instead.
type matchMakingModel struct {
playerChan chan *waitingPlayer
openSpaces int
openSpacesMut sync.Mutex
disconnected map[int]bool
disconnectedMut sync.Mutex
}
type waitingPlayer struct {
connection *playerConnection
}
func startMatchmakingModel() matchMakingModel {
fmt.Println("starting match making model")
mmm := matchMakingModel{openSpaces:0}
mmm.playerChan = make(chan *waitingPlayer, 50)
mmm.disconnected = make(map[int]bool)
return mmm
}
func (mmm *matchMakingModel) acceptPlayer(connection *playerConnection){
waitingPlayer := connectionToWaitingPlayer(connection)
fmt.Println("added player to matchmaking pool with connection number",connection.client.clientNum)
mmm.playerChan <- &waitingPlayer
mmm.openSpacesMut.Lock()
if mmm.openSpaces == 0{
go mmm.runNewLobby()
mmm.openSpaces = NUMPLAYERS
}
mmm.openSpacesMut.Unlock()
}
func (mmm *matchMakingModel) runNewLobby(){
go startLobby(mmm)
}
//method may be called twice due to concurrency setup, should have no functional difference
//between one call and 2
func (mmm *matchMakingModel) disconnectPlayer(id int) {
if debug {fmt.Println("connection with id",id,"quit from matchmaking")}
mmm.disconnectedMut.Lock()
mmm.disconnected[id] = true
mmm.disconnectedMut.Unlock()
}
func (mmm *matchMakingModel) connectionIdHasDisconnected(id int) bool{
mmm.disconnectedMut.Lock()
_ , existed := mmm.disconnected[id]
mmm.disconnectedMut.Unlock()
return existed
}
func (mmm *matchMakingModel) respondTo125(in *PacketIn) {
fmt.Println("recieved 125 packet...")
mmm.disconnectPlayer(in.connectionId)
}
func connectionToWaitingPlayer(connection *playerConnection) waitingPlayer {
rtn := waitingPlayer{}
rtn.connection = connection
return rtn
}
func (mmm *matchMakingModel) decrementOpenSpaces() {
mmm.decrementOpenSpacesBy(1)
}
func (mmm *matchMakingModel) decrementOpenSpacesBy(by int) {
mmm.openSpacesMut.Lock()
mmm.openSpaces -= by
mmm.openSpacesMut.Unlock()
}
|
package buntdb
import (
"github.com/b2wdigital/goignite/pkg/config"
"log"
)
const (
Path = "transport.client.buntdb.parh"
SyncPolicy = "transport.client.buntdb.syncpolicy"
AutoShrinkPercentage = "transport.client.buntdb.autoshrink.percentage"
AutoShrinkMinSize = "transport.client.buntdb.autoshrink.minsize"
AutoShrinkDisabled = "transport.client.buntdb.autoshrink.disabled"
)
func init() {
log.Println("getting configurations for buntdb")
config.Add(Path, ":memory:", "open opens a database at the provided path")
config.Add(SyncPolicy, 1, "adjusts how often the data is synced to disk (Never: 0, EverySecond: 1, Always: 2)")
config.Add(AutoShrinkPercentage, 100, "is used by the background process to trigger a shrink of the aof file when the size of the file is larger than the percentage of the result of the previous shrunk file")
config.Add(AutoShrinkMinSize, 32*1024*102, "defines the minimum size of the aof file before an automatic shrink can occur")
config.Add(AutoShrinkDisabled, false, "turns off automatic background shrinking")
}
|
package tick
import (
"tokensky_bg_admin/conf"
"tokensky_bg_admin/models"
)
//维护用户地址维护
var tickTokenskyUserAddressUpSign bool = true
func TickTokenskyUserAddressUp() error {
if tickTokenskyUserAddressUpSign{
tickTokenskyUserAddressUpSign = false
defer func() {tickTokenskyUserAddressUpSign=true}()
for _, coinType := range conf.TOKENSKY_ADDRESS_COIN_TYPES {
total := models.TokenskyUserAddressGetNotUsedCont(coinType)
if total < conf.TBI_SERVER_ADDRESS_MAX {
models.TokenskyUserAddressAddNum(coinType, conf.TBI_SERVER_ADDRESS_MAX-total)
}
}
}
return nil
}
|
package system
import (
"github.com/labstack/echo"
"github.com/labstack/echo/middleware"
)
func Boot(address string) {
go func() {
e := echo.New()
e.Use(middleware.CORS())
SetRoutes(e)
//e.HidePort=true
e.HideBanner = true
e.Logger.Fatal(e.Start(address))
}()
}
func SetRoutes(e *echo.Echo) {
e.GET("/system/ping", Ping)
e.GET("/system/version", Version)
e.GET("/system/hash", Hash)
e.GET("/system/health", Health)
e.GET("/system/metric", Metric)
e.GET("/system/mysql/table", Table)
}
|
package main
import (
"fmt"
)
func pointer_test() {
// to specify a pointer simply use & equivalent to a var
x := 5
a := &x
//print var
fmt.Println(x)
// print value to pointer
fmt.Println(*a)
// print address
fmt.Println(&a)
}
func main() {
pointer_test()
}
|
package proxy
import "github.com/sirupsen/logrus"
func DefaultLogger() *logrus.Logger {
log := logrus.New()
log.Formatter = &logrus.JSONFormatter{}
return log
}
|
package main
import (
"flag"
"fmt"
"log"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/gholt/brimtime"
"github.com/gholt/store"
"github.com/pandemicsyn/ftls"
"github.com/pandemicsyn/oort/api"
"github.com/peterh/liner"
"github.com/spaolacci/murmur3"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
var vdirect = flag.String("vdirect", "", "Use specific direct value store ip:port instead of default SRV replicated value store")
var gdirect = flag.String("gdirect", "", "Use specific direct group store ip:port instead of default SRV replicated group store")
var groupmode = flag.Bool("g", false, "whether we're talking to a groupstore instance")
var insecureSkipVerify = flag.Bool("insecure", false, "whether or not we should verify the cert")
var mutualtls = flag.Bool("mutualtls", false, "whether or not the server expects mutual tls auth")
var certfile = flag.String("cert", "client.crt", "cert file to use")
var keyfile = flag.String("key", "client.key", "key file to use")
var cafile = flag.String("ca", "ca.pem", "ca file to use")
var (
prompt = "> "
errprompt = "┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻> "
historyf = filepath.Join(os.TempDir(), ".oort-cli-history")
cmdnames = []string{"write", "write-hash", "read", "read-hash", "read-group", "delete", "lookup", "lookup-group", "mode", "exit", "help"}
)
func lineCompleter(line string) (c []string) {
for _, n := range cmdnames {
if strings.HasPrefix(n, strings.ToLower(line)) {
c = append(c, n)
}
}
return
}
func (c *Client) printHelp() string {
if c.gmode {
return fmt.Sprintf(`
Valid cmd's are:
write <groupkey> <subkey> <some string value>
write-hash <groupkey> <subkeyhasha> <subkeyhashb> <value>
read <groupkey> <subkey>
read-hash <groupkey> <subkeyhasha> <subkeyhashb>
read-group <groupkey> <subkey>
delete <groupkey> <subkey>
lookup <groupkey> <subkey>
lookup-group <key>
mode group|value
exit
help
`)
} else {
return fmt.Sprintf(`
Valid cmd's are:
write <key> <some string value>
read <key>
delete <key>
lookup <key>
mode group|value
exit
help
`)
}
}
func (c *Client) parseValueCmd(line string) (string, error) {
if c.vconn == nil {
err := c.getValueClient()
if err != nil {
return "", err
}
}
split := strings.SplitN(line, " ", 2)
cmd := split[0]
if len(split) != 2 {
if cmd == "exit" {
return "", fmt.Errorf("Exiting..")
}
if cmd == "help" {
return c.printHelp(), nil
}
return c.printHelp(), nil
}
args := split[1]
switch cmd {
case "write":
sarg := strings.SplitN(args, " ", 2)
if len(sarg) < 2 {
return fmt.Sprintf("write needs key and value: `write somekey some value thing here`"), nil
}
keyA, keyB := murmur3.Sum128([]byte(sarg[0]))
value := []byte(sarg[1])
timestampMicro := brimtime.TimeToUnixMicro(time.Now())
oldTimestampMicro, err := c.vstore.Write(context.Background(), keyA, keyB, timestampMicro, value)
if err != nil {
return "", err
}
return fmt.Sprintf("WRITE TIMESTAMPMICRO: %d\nPREVIOUS TIMESTAMPMICRO: %d", timestampMicro, oldTimestampMicro), nil
case "read":
keyA, keyB := murmur3.Sum128([]byte(args))
timestampMicro, value, err := c.vstore.Read(context.Background(), keyA, keyB, nil)
if store.IsNotFound(err) {
return fmt.Sprintf("not found"), nil
} else if err != nil {
return "", err
}
return fmt.Sprintf("TIMESTAMPMICRO: %d\nVALUE: %s", timestampMicro, value), nil
case "delete":
keyA, keyB := murmur3.Sum128([]byte(args))
timestampMicro := brimtime.TimeToUnixMicro(time.Now())
oldTimestampMicro, err := c.vstore.Delete(context.Background(), keyA, keyB, timestampMicro)
if err != nil {
return "", err
}
return fmt.Sprintf("TIMESTAMPMICRO: %d\nOLD TIMESTAMPMICRO: %d", timestampMicro, oldTimestampMicro), nil
case "lookup":
keyA, keyB := murmur3.Sum128([]byte(args))
timestampMicro, length, err := c.vstore.Lookup(context.Background(), keyA, keyB)
if store.IsNotFound(err) {
return fmt.Sprintf("not found"), nil
} else if err != nil {
return "", err
}
return fmt.Sprintf("TIMESTAMPMICRO: %d\nLENGTH: %d", timestampMicro, length), nil
case "mode":
if args == "value" {
return fmt.Sprintf("Already in value store mode"), nil
}
if args == "group" {
c.gmode = true
return fmt.Sprintf("Switched to group mode"), nil
}
return fmt.Sprintf("Valid modes are: value | group"), nil
case "exit":
log.Println("exit")
return "", fmt.Errorf("Exiting..")
}
return c.printHelp(), nil
}
func (c *Client) parseGroupCmd(line string) (string, error) {
if c.gstore == nil {
err := c.getGroupClient()
if err != nil {
return "", err
}
}
split := strings.SplitN(line, " ", 2)
cmd := split[0]
if len(split) != 2 {
if cmd == "exit" {
return "", fmt.Errorf("Exiting..")
}
if cmd == "help" {
return c.printHelp(), nil
}
return c.printHelp(), nil
}
args := split[1]
switch cmd {
case "write":
sarg := strings.SplitN(args, " ", 3)
if len(sarg) < 3 {
return fmt.Sprintf("write needs groupkey, key, value: `write groupkey somekey some value thing here`"), nil
}
keyA, keyB := murmur3.Sum128([]byte(sarg[0]))
childKeyA, childKeyB := murmur3.Sum128([]byte(sarg[1]))
timestampMicro := brimtime.TimeToUnixMicro(time.Now())
oldTimestampMicro, err := c.gstore.Write(context.Background(), keyA, keyB, childKeyA, childKeyB, timestampMicro, []byte(sarg[2]))
if err != nil {
return "", err
}
return fmt.Sprintf("WRITE TIMESTAMPMICRO: %d\nPREVIOUS TIMESTAMPMICRO: %d", timestampMicro, oldTimestampMicro), nil
case "write-hash":
sarg := strings.SplitN(args, " ", 4)
if len(sarg) < 4 {
return fmt.Sprintf("write-hash needs groupkey, keyahash keybhash, value: `write-hash groupkey 19191919 19191919 some value thing here`"), nil
}
keyA, keyB := murmur3.Sum128([]byte(sarg[0]))
childKeyA, err := strconv.ParseUint(sarg[1], 10, 64)
if err != nil {
return "", err
}
childKeyB, err := strconv.ParseUint(sarg[2], 10, 64)
if err != nil {
return "", err
}
timestampMicro := brimtime.TimeToUnixMicro(time.Now())
oldTimestampMicro, err := c.gstore.Write(context.Background(), keyA, keyB, childKeyA, childKeyB, timestampMicro, []byte(sarg[3]))
if err != nil {
return "", err
}
return fmt.Sprintf("WRITE TIMESTAMPMICRO: %d\n PREVIOUS TIMESTAMPMICRO: %d", timestampMicro, oldTimestampMicro), nil
case "read":
sarg := strings.SplitN(args, " ", 2)
if len(sarg) < 2 {
return fmt.Sprintf("read needs groupkey, subkey"), nil
}
keyA, keyB := murmur3.Sum128([]byte(sarg[0]))
childKeyA, childKeyB := murmur3.Sum128([]byte(sarg[1]))
timestampMicro, value, err := c.gstore.Read(context.Background(), keyA, keyB, childKeyA, childKeyB, nil)
if store.IsNotFound(err) {
return fmt.Sprintf("not found"), nil
} else if err != nil {
return "", err
}
return fmt.Sprintf("TIMESTAMPMICRO: %d\nVALUE: %s", timestampMicro, value), nil
case "read-hash":
sarg := strings.SplitN(args, " ", 3)
if len(sarg) < 3 {
return fmt.Sprintf("read needs groupkey, subkeyA, subkeyB"), nil
}
keyA, keyB := murmur3.Sum128([]byte(sarg[0]))
childKeyA, err := strconv.ParseUint(sarg[1], 10, 64)
if err != nil {
return "", err
}
childKeyB, err := strconv.ParseUint(sarg[2], 10, 64)
if err != nil {
return "", err
}
timestampMicro, value, err := c.gstore.Read(context.Background(), keyA, keyB, childKeyA, childKeyB, nil)
if store.IsNotFound(err) {
return fmt.Sprintf("not found"), nil
} else if err != nil {
return "", err
}
return fmt.Sprintf("TIMESTAMPMICRO: %d\nVALUE: %s", timestampMicro, value), nil
case "read-group":
KeyA, KeyB := murmur3.Sum128([]byte(args))
items, err := c.gstore.ReadGroup(context.Background(), KeyA, KeyB)
if store.IsNotFound(err) {
return fmt.Sprintf("not found"), nil
} else if err != nil {
return "", err
}
keys := make([]string, len(items))
for k, v := range items {
keys[k] = fmt.Sprintf("TIMESTAMPMICRO: %d [ %d | %d] VALUE: %s", v.TimestampMicro, v.ChildKeyA, v.ChildKeyB, v.Value)
}
return fmt.Sprintf(strings.Join(keys, "\n")), nil
case "delete":
sarg := strings.SplitN(args, " ", 2)
if len(sarg) < 2 {
return fmt.Sprintf("delete needs groupkey, subkey"), nil
}
keyA, keyB := murmur3.Sum128([]byte(sarg[0]))
childKeyA, childKeyB := murmur3.Sum128([]byte(sarg[1]))
timestampMicro := brimtime.TimeToUnixMicro(time.Now())
oldTimestampMicro, err := c.gstore.Delete(context.Background(), keyA, keyB, childKeyA, childKeyB, timestampMicro)
if err != nil {
return "", err
}
return fmt.Sprintf("TIMESTAMPMICRO: %d\nOLD TIMESTAMPMICRO: %d", timestampMicro, oldTimestampMicro), nil
case "lookup":
sarg := strings.SplitN(args, " ", 2)
if len(sarg) < 2 {
return fmt.Sprintf("lookup needs groupkey, subkey"), nil
}
keyA, keyB := murmur3.Sum128([]byte(sarg[0]))
childKeyA, childKeyB := murmur3.Sum128([]byte(sarg[1]))
timestampMicro, length, err := c.gstore.Lookup(context.Background(), keyA, keyB, childKeyA, childKeyB)
if store.IsNotFound(err) {
return fmt.Sprintf("not found"), nil
} else if err != nil {
return "", err
}
return fmt.Sprintf("TIMESTAMPMICRO: %d\nLENGTH: %d", timestampMicro, length), nil
case "lookup-group":
keyA, keyB := murmur3.Sum128([]byte(args))
items, err := c.gstore.LookupGroup(context.Background(), keyA, keyB)
if store.IsNotFound(err) {
return fmt.Sprintf("not found"), nil
} else if err != nil {
return "", err
}
keys := make([]string, len(items))
for k, v := range items {
keys[k] = fmt.Sprintf("TIMESTAMPMICRO: %d [ %d | %d ]", v.TimestampMicro, v.ChildKeyA, v.ChildKeyB)
}
return fmt.Sprintf(strings.Join(keys, "\n")), nil
case "mode":
if args == "value" {
c.gmode = false
return fmt.Sprintf("Switched to value mode"), nil
}
if args == "group" {
return fmt.Sprintf("Already in group store mode"), nil
}
return fmt.Sprintf("Valid modes are: value | group"), nil
case "exit":
log.Println("exit")
return "", fmt.Errorf("Exiting..")
}
return c.printHelp(), nil
}
func (c *Client) getValueClient() error {
var err error
var opts []grpc.DialOption
tlsConfig := &ftls.Config{
MutualTLS: *mutualtls,
InsecureSkipVerify: *insecureSkipVerify,
CertFile: *certfile,
KeyFile: *keyfile,
CAFile: *cafile,
}
rOpts, err := ftls.NewGRPCClientDialOpt(&ftls.Config{
MutualTLS: false,
CAFile: *cafile,
})
if err != nil {
return err
}
if c.vdirect != "" {
c.vstore, err = api.NewValueStore(c.vdirect, 10, tlsConfig, opts...)
} else {
c.vstore = api.NewReplValueStore(&api.ReplValueStoreConfig{
AddressIndex: 2,
StoreFTLSConfig: tlsConfig,
GRPCOpts: opts,
RingServerGRPCOpts: []grpc.DialOption{rOpts},
})
if err := c.vstore.Startup(context.Background()); err != nil {
return fmt.Errorf("Unable to start value store client: %s", err)
}
}
if err != nil {
return fmt.Errorf("Unable to setup value store: %s", err.Error())
}
return nil
}
func (c *Client) getGroupClient() error {
var err error
var opts []grpc.DialOption
tlsConfig := &ftls.Config{
MutualTLS: *mutualtls,
InsecureSkipVerify: *insecureSkipVerify,
CertFile: *certfile,
KeyFile: *keyfile,
CAFile: *cafile,
}
rOpts, err := ftls.NewGRPCClientDialOpt(&ftls.Config{
MutualTLS: false,
CAFile: *cafile,
})
if err != nil {
return err
}
if c.gdirect != "" {
c.gstore, err = api.NewGroupStore(c.gdirect, 10, tlsConfig, opts...)
} else {
c.gstore = api.NewReplGroupStore(&api.ReplGroupStoreConfig{
AddressIndex: 2,
StoreFTLSConfig: tlsConfig,
GRPCOpts: opts,
RingServerGRPCOpts: []grpc.DialOption{rOpts},
})
if err := c.gstore.Startup(context.Background()); err != nil {
return fmt.Errorf("Unable to start group store client: %s", err)
}
}
if err != nil {
return fmt.Errorf("Unable to setup group store: %s", err.Error())
}
return nil
}
// Client ...
type Client struct {
vdirect string
gdirect string
gmode bool
vconn *grpc.ClientConn
vstore store.ValueStore
gstore store.GroupStore
}
func main() {
flag.Parse()
line := liner.NewLiner()
defer line.Close()
line.SetCtrlCAborts(true)
line.SetCompleter(lineCompleter)
if f, err := os.Open(historyf); err == nil {
line.ReadHistory(f)
f.Close()
}
client := Client{
vdirect: *vdirect,
gdirect: *gdirect,
gmode: *groupmode,
}
sm := "value"
if client.gmode {
sm = "group"
}
fmt.Printf("\u2728 oort-cli - in %s mode \u2728\n\n", sm)
for {
if cmd, err := line.Prompt(prompt); err == nil {
if client.gmode {
res, err := client.parseGroupCmd(cmd)
if err != nil {
fmt.Println(err.Error())
return
}
fmt.Println(res)
line.AppendHistory(cmd)
} else {
res, err := client.parseValueCmd(cmd)
if err != nil {
fmt.Println(err.Error())
return
}
fmt.Println(res)
line.AppendHistory(cmd)
}
} else if err == liner.ErrPromptAborted {
log.Print("Aborted")
return
} else {
log.Print("Error reading line: ", err)
return
}
if f, err := os.Create(historyf); err != nil {
log.Print("Error writing history file: ", err)
} else {
line.WriteHistory(f)
f.Close()
}
}
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//84. Largest Rectangle in Histogram
//Given n non-negative integers representing the histogram's bar height where the width of each bar is 1, find the area of largest rectangle in the histogram.
//Above is a histogram where width of each bar is 1, given height = [2,1,5,6,2,3].
//
//The largest rectangle is shown in the shaded area, which has area = 10 unit.
// Example:
//Input: [2,1,5,6,2,3]
//Output: 10
//func largestRectangleArea(heights []int) int {
//}
// Time Is Money
|
package http
import (
"context"
"io/ioutil"
"log"
"net/http"
"strconv"
"strings"
"time"
"code-cadets-2021/homework_2/task_01/internal/domain/models"
)
const axilisFeedURL2 = "http://18.193.121.232/axilis-feed-2"
type AxilisOfferFeedSecond struct {
httpClient http.Client
updates chan models.Odd
}
func NewAxilisOfferFeedSecond(
httpClient http.Client,
) *AxilisOfferFeedSecond {
return &AxilisOfferFeedSecond{
httpClient: httpClient,
updates: make(chan models.Odd),
}
}
func (a *AxilisOfferFeedSecond) Start(ctx context.Context) error {
defer close(a.updates)
defer log.Printf("shutting down %s", a)
for {
select {
case <-ctx.Done():
return nil
case <-time.After(time.Second * 3):
response, err := a.httpClient.Get(axilisFeedURL2)
if err != nil {
log.Println("axilis offer feed 2, http get", err)
continue
}
a.processResponseSecond(ctx, response)
}
}
}
func (a *AxilisOfferFeedSecond) processResponseSecond(ctx context.Context, response *http.Response) {
defer response.Body.Close()
content, err := ioutil.ReadAll(response.Body)
if err != nil {
log.Println("axilis offer feed 2, read all", err)
return
}
offerData := string(content)
rows := strings.Split(offerData, "\n")
for _, row := range rows {
offerFields := strings.Split(row, ",")
coefficient, err := strconv.ParseFloat(offerFields[3], 64)
if err != nil {
log.Println("coefficient parsing", err)
return
}
odd := models.Odd{
Id: offerFields[0],
Name: offerFields[1],
Match: offerFields[2],
Coefficient: coefficient,
Timestamp: time.Now(),
}
select {
case <-ctx.Done():
return
case a.updates <- odd:
}
}
}
func (a *AxilisOfferFeedSecond) String() string {
return "axillis offer feed TWO"
}
func (a *AxilisOfferFeedSecond) GetUpdates() chan models.Odd {
return a.updates
}
|
package main
import (
"fmt"
"net/http"
"os"
"path"
"path/filepath"
)
// AppHandlerFunc defines a function which acts as a context-aware HTTP handler
// In case of error, it returns the error which is handled separately
type AppHandlerFunc func(http.ResponseWriter, *http.Request, Context) error
// AppHandler is the application's http.Handler
type AppHandler struct {
// Request paths are considered relative to RootPath
RootPath string
}
func (h *AppHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
c := Context{
App: h,
Recursive: r.URL.Query().Get("r") != "",
Format: parseFmt(r.URL.Query().Get("fmt")),
}
err := fissBaseHandlerFunc(rw, r, c)
if err != nil {
internalErrorHandlerFunc(rw, r, c, err)
}
}
func fissBaseHandlerFunc(
rw http.ResponseWriter, r *http.Request, c Context) error {
// Fill in filesystem details
p := filepath.Join(
c.App.RootPath,
path.Clean(r.URL.Path))
p, err := filepath.Abs(p)
if err != nil {
return err
}
fileInfo, err := os.Stat(p)
if err != nil {
return err
}
c.FSPath = p
c.FSInfo = fileInfo
// Choose next handler to invoke
return routeFSHandlerFunc(rw, r, c)
}
func parseFmt(f string) ResponseFormat {
switch f {
case "json":
return FmtJSON
case "csv":
return FmtCSV
case "html":
return FmtHTML
case "dl":
return FmtForceDownload
}
return FmtAuto
}
func routeFSHandlerFunc(
rw http.ResponseWriter, r *http.Request, c Context) error {
fmt.Printf("req: %v %v\n", r.RemoteAddr, c.FSPath)
if c.FSInfo.IsDir() {
return directoryHandlerFunc(rw, r, c)
}
return fileHandlerFunc(rw, r, c)
}
func directoryHandlerFunc(
rw http.ResponseWriter, r *http.Request, c Context) error {
switch c.Format {
case FmtForceDownload:
return archiveHandlerFunc(rw, r, c)
case FmtCSV:
return recursiveDirectoryHandlerFunc(rw, r, c)
}
// FIXME: Implement JSON
// FIXME: Separate CSV from Recursive
// -- there should be non-recursive CSV and archives
// -- there should be recursive HTML and JSON
return directoryListHandlerFunc(rw, r, c)
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//65. Valid Number
//Validate if a given string is numeric.
//Some examples:
//"0" => true
//" 0.1 " => true
//"abc" => false
//"1 a" => false
//"2e10" => true
//Note: It is intended for the problem statement to be ambiguous. You should gather all requirements up front before implementing one.
//Update (2015-02-10):
//The signature of the C++ function had been updated. If you still see your function signature accepts a const char * argument, please click the reload button to reset your code definition.
//func isNumber(s string) bool {
//}
// Time Is Money
|
package models
import (
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/sqlite"
)
func Init() {
db, err := gorm.Open("sqlite3", "test.db")
if err != nil {
panic("Failed to connect database")
}
db.AutoMigrate(&Board{}, &Thread{}, &Issue{}, &Error{}, &HistoryPoint{})
db.Close()
}
func DB() *gorm.DB {
db, err := gorm.Open("sqlite3", "test.db")
if err != nil {
panic("Failed to connect database")
}
return db
}
type Board struct {
gorm.Model
Addr string
Name string
Bumplimit int
}
type Thread struct {
gorm.Model
Numbering bool
Roman bool
NumberingSymbol string
CurrentNum int
CurrentThread int
Title string
HeaderLink bool
Header string
Image string
LastPerekot int
LastPosts int
Board Board
BoardID uint
Active bool
}
type Issue struct {
gorm.Model
Title string
Text string
Link string
Active bool
}
type Error struct {
gorm.Model
Text string
Active bool
}
type HistoryPoint struct {
gorm.Model
Text string
}
|
package health
import (
"github.com/go-openapi/runtime/middleware"
"github.com/movieManagement/gen/restapi/operations"
"github.com/movieManagement/gen/restapi/operations/health"
"github.com/movieManagement/swagger"
)
// Configure setups handlers on api with Service
func Configure(api *operations.MovieServiceAPI, service Service) {
api.HealthGetHealthHandler = health.GetHealthHandlerFunc(func(params health.GetHealthParams) middleware.Responder {
result, err := service.HealthCheck(params.HTTPRequest.Context(), ¶ms)
if err != nil {
return health.NewGetHealthBadRequest().WithPayload(swagger.ErrorResponse(err))
}
return health.NewGetHealthOK().WithPayload(result)
})
}
|
package main
import "fmt"
func foo() (int, int, int){
return 1,2,3
}
func main(){
_, _, x := foo()
fmt.Println(x)
}
|
package addressbus
import (
"testing"
"github.com/KaiWalter/go6502/pkg/memory"
)
func TestOnlyRam(t *testing.T) {
const memSize = 0x200
// arrange
bus := &MultiBus{}
bus.InitBus(0x100)
ram := memory.Memory{AddressOffset: 0, AddressSpace: make([]byte, memSize)}
bus.RegisterComponent(0, len(ram.AddressSpace)-1, &ram)
// act & assert
for addr := uint16(0); addr < memSize; addr++ {
data, err := bus.Read(addr)
if err != nil {
t.Errorf("reading memory failed %v", err)
break
}
if data != 0 {
t.Errorf("failed - value actual %x / 0 expected", data)
}
}
_, err := bus.Read(memSize + 1)
if err == nil {
t.Errorf("expected AddressingError")
}
}
func TestWithRom(t *testing.T) {
// arrange
bus := &MultiBus{}
bus.InitBus(0x200)
ram := memory.Memory{AddressOffset: 0, AddressSpace: make([]byte, 0x200)}
bus.RegisterComponent(0, len(ram.AddressSpace)-1, &ram)
romContent, err := retrieveROM("dummy01.rom")
if err != nil {
t.Errorf("could not retrieve ROM: %v", err)
}
rom := memory.Memory{AddressOffset: 0x200, AddressSpace: romContent[:]}
bus.RegisterComponent(0x200, 0x200+len(romContent)-1, &rom)
// act & assert
for addr := uint16(0); addr < 0x200+uint16(len(romContent)); addr++ {
data, err := bus.Read(addr)
if err != nil {
t.Errorf("reading memory failed %v", err)
break
}
if addr < 0x200 && data != 0 {
t.Errorf("failed at address %x - value actual %x / 0 expected", addr, data)
}
if addr >= 0x200 && data != 1 {
t.Errorf("failed at address %x - value actual %x / 1 expected", addr, data)
}
}
_, err = bus.Read(0x200 + uint16(len(romContent)) + 1)
if err == nil {
t.Errorf("expected AddressingError")
}
}
|
package smtp
import (
"crypto/tls"
"fmt"
"mime"
"net"
"net/smtp"
"strings"
)
// Stubbed out for tests.
var (
netDialTimeout = net.DialTimeout
tlsClient = tls.Client
smtpNewClient = func(conn net.Conn, host string) (smtpClient, error) {
return smtp.NewClient(conn, host)
}
bEncoding = mimeEncoder{mime.BEncoding}
qEncoding = mimeEncoder{mime.QEncoding}
lastIndexByte = strings.LastIndexByte
)
func addr(host string, port int) string {
return fmt.Sprintf("%s:%d", host, port)
}
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !codes
// +build !codes
package test_driver
import (
"bytes"
"encoding/hex"
"fmt"
"math"
"strconv"
"strings"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/parser/charset"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/parser/types"
)
// Kind constants.
const (
KindNull byte = 0
KindInt64 byte = 1
KindUint64 byte = 2
KindFloat32 byte = 3
KindFloat64 byte = 4
KindString byte = 5
KindBytes byte = 6
KindBinaryLiteral byte = 7 // Used for BIT / HEX literals.
KindMysqlDecimal byte = 8
KindMysqlDuration byte = 9
KindMysqlEnum byte = 10
KindMysqlBit byte = 11 // Used for BIT table column values.
KindMysqlSet byte = 12
KindMysqlTime byte = 13
KindInterface byte = 14
KindMinNotNull byte = 15
KindMaxValue byte = 16
KindRaw byte = 17
KindMysqlJSON byte = 18
)
// Datum is a data box holds different kind of data.
// It has better performance and is easier to use than `interface{}`.
type Datum struct {
k byte // datum kind.
i int64 // i can hold int64 uint64 float64 values.
b []byte // b can hold string or []byte values.
x interface{} // x hold all other types.
}
// Kind gets the kind of the datum.
func (d *Datum) Kind() byte {
return d.k
}
// GetInt64 gets int64 value.
func (d *Datum) GetInt64() int64 {
return d.i
}
// SetInt64 sets int64 value.
func (d *Datum) SetInt64(i int64) {
d.k = KindInt64
d.i = i
}
// GetUint64 gets uint64 value.
func (d *Datum) GetUint64() uint64 {
return uint64(d.i)
}
// SetUint64 sets uint64 value.
func (d *Datum) SetUint64(i uint64) {
d.k = KindUint64
d.i = int64(i)
}
// GetFloat64 gets float64 value.
func (d *Datum) GetFloat64() float64 {
return math.Float64frombits(uint64(d.i))
}
// SetFloat64 sets float64 value.
func (d *Datum) SetFloat64(f float64) {
d.k = KindFloat64
d.i = int64(math.Float64bits(f))
}
// GetFloat32 gets float32 value.
func (d *Datum) GetFloat32() float32 {
return float32(math.Float64frombits(uint64(d.i)))
}
// SetFloat32 sets float32 value.
func (d *Datum) SetFloat32(f float32) {
d.k = KindFloat32
d.i = int64(math.Float64bits(float64(f)))
}
// GetString gets string value.
func (d *Datum) GetString() string {
return string(d.b)
}
// SetString sets string value.
func (d *Datum) SetString(s string) {
d.k = KindString
d.b = []byte(s)
}
// GetBytes gets bytes value.
func (d *Datum) GetBytes() []byte {
return d.b
}
// SetBytes sets bytes value to datum.
func (d *Datum) SetBytes(b []byte) {
d.k = KindBytes
d.b = b
}
// SetBytesAsString sets bytes value to datum as string type.
func (d *Datum) SetBytesAsString(b []byte) {
d.k = KindString
d.b = b
}
// GetInterface gets interface value.
func (d *Datum) GetInterface() interface{} {
return d.x
}
// SetInterface sets interface to datum.
func (d *Datum) SetInterface(x interface{}) {
d.k = KindInterface
d.x = x
}
// SetNull sets datum to nil.
func (d *Datum) SetNull() {
d.k = KindNull
d.x = nil
}
// GetBinaryLiteral gets Bit value
func (d *Datum) GetBinaryLiteral() BinaryLiteral {
return d.b
}
// SetBinaryLiteral sets Bit value
func (d *Datum) SetBinaryLiteral(b BinaryLiteral) {
d.k = KindBinaryLiteral
d.b = b
}
// GetMysqlDecimal gets decimal value
func (d *Datum) GetMysqlDecimal() *MyDecimal {
return d.x.(*MyDecimal)
}
// SetMysqlDecimal sets decimal value
func (d *Datum) SetMysqlDecimal(b *MyDecimal) {
d.k = KindMysqlDecimal
d.x = b
}
// GetValue gets the value of the datum of any kind.
func (d *Datum) GetValue() interface{} {
switch d.k {
case KindInt64:
return d.GetInt64()
case KindUint64:
return d.GetUint64()
case KindFloat32:
return d.GetFloat32()
case KindFloat64:
return d.GetFloat64()
case KindString:
return d.GetString()
case KindBytes:
return d.GetBytes()
case KindMysqlDecimal:
return d.GetMysqlDecimal()
case KindBinaryLiteral, KindMysqlBit:
return d.GetBinaryLiteral()
default:
return d.GetInterface()
}
}
// SetValue sets any kind of value.
func (d *Datum) SetValue(val interface{}) {
switch x := val.(type) {
case nil:
d.SetNull()
case bool:
if x {
d.SetInt64(1)
} else {
d.SetInt64(0)
}
case int:
d.SetInt64(int64(x))
case int64:
d.SetInt64(x)
case uint64:
d.SetUint64(x)
case float32:
d.SetFloat32(x)
case float64:
d.SetFloat64(x)
case string:
d.SetString(x)
case []byte:
d.SetBytes(x)
case *MyDecimal:
d.SetMysqlDecimal(x)
case BinaryLiteral:
d.SetBinaryLiteral(x)
case BitLiteral: // Store as BinaryLiteral for Bit and Hex literals
d.SetBinaryLiteral(BinaryLiteral(x))
case HexLiteral:
d.SetBinaryLiteral(BinaryLiteral(x))
default:
d.SetInterface(x)
}
}
// NewDatum creates a new Datum from an interface{}.
func NewDatum(in interface{}) (d Datum) {
switch x := in.(type) {
case []interface{}:
d.SetValue(MakeDatums(x...))
default:
d.SetValue(in)
}
return d
}
// NewBytesDatum creates a new Datum from a byte slice.
func NewBytesDatum(b []byte) (d Datum) {
d.SetBytes(b)
return d
}
// NewStringDatum creates a new Datum from a string.
func NewStringDatum(s string) (d Datum) {
d.SetString(s)
return d
}
// MakeDatums creates datum slice from interfaces.
func MakeDatums(args ...interface{}) []Datum {
datums := make([]Datum, len(args))
for i, v := range args {
datums[i] = NewDatum(v)
}
return datums
}
// BinaryLiteral is the internal type for storing bit / hex literal type.
type BinaryLiteral []byte
// BitLiteral is the bit literal type.
type BitLiteral BinaryLiteral
// HexLiteral is the hex literal type.
type HexLiteral BinaryLiteral
// ZeroBinaryLiteral is a BinaryLiteral literal with zero value.
var ZeroBinaryLiteral = BinaryLiteral{}
// String implements fmt.Stringer interface.
func (b BinaryLiteral) String() string {
if len(b) == 0 {
return ""
}
return "0x" + hex.EncodeToString(b)
}
// ToString returns the string representation for the literal.
func (b BinaryLiteral) ToString() string {
return string(b)
}
// ToBitLiteralString returns the bit literal representation for the literal.
func (b BinaryLiteral) ToBitLiteralString(trimLeadingZero bool) string {
if len(b) == 0 {
return "b''"
}
var buf bytes.Buffer
for _, data := range b {
fmt.Fprintf(&buf, "%08b", data)
}
ret := buf.Bytes()
if trimLeadingZero {
ret = bytes.TrimLeft(ret, "0")
if len(ret) == 0 {
ret = []byte{'0'}
}
}
return fmt.Sprintf("b'%s'", string(ret))
}
// ParseBitStr parses bit string.
// The string format can be b'val', B'val' or 0bval, val must be 0 or 1.
// See https://dev.mysql.com/doc/refman/5.7/en/bit-value-literals.html
func ParseBitStr(s string) (BinaryLiteral, error) {
if len(s) == 0 {
return nil, errors.Errorf("invalid empty string for parsing bit type")
}
if s[0] == 'b' || s[0] == 'B' {
// format is b'val' or B'val'
s = strings.Trim(s[1:], "'")
} else if strings.HasPrefix(s, "0b") {
s = s[2:]
} else {
// here means format is not b'val', B'val' or 0bval.
return nil, errors.Errorf("invalid bit type format %s", s)
}
if len(s) == 0 {
return ZeroBinaryLiteral, nil
}
alignedLength := (len(s) + 7) &^ 7
s = ("00000000" + s)[len(s)+8-alignedLength:] // Pad with zero (slice from `-alignedLength`)
byteLength := len(s) >> 3
buf := make([]byte, byteLength)
for i := 0; i < byteLength; i++ {
strPosition := i << 3
val, err := strconv.ParseUint(s[strPosition:strPosition+8], 2, 8)
if err != nil {
return nil, errors.Trace(err)
}
buf[i] = byte(val)
}
return buf, nil
}
// NewBitLiteral parses bit string as BitLiteral type.
func NewBitLiteral(s string) (BitLiteral, error) {
b, err := ParseBitStr(s)
if err != nil {
return BitLiteral{}, err
}
return BitLiteral(b), nil
}
// ToString implement ast.BinaryLiteral interface
func (b BitLiteral) ToString() string {
return BinaryLiteral(b).ToString()
}
// ParseHexStr parses hexadecimal string literal.
// See https://dev.mysql.com/doc/refman/5.7/en/hexadecimal-literals.html
func ParseHexStr(s string) (BinaryLiteral, error) {
if len(s) == 0 {
return nil, errors.Errorf("invalid empty string for parsing hexadecimal literal")
}
if s[0] == 'x' || s[0] == 'X' {
// format is x'val' or X'val'
s = strings.Trim(s[1:], "'")
if len(s)%2 != 0 {
return nil, errors.Errorf("invalid hexadecimal format, must even numbers, but %d", len(s))
}
} else if strings.HasPrefix(s, "0x") {
s = s[2:]
} else {
// here means format is not x'val', X'val' or 0xval.
return nil, errors.Errorf("invalid hexadecimal format %s", s)
}
if len(s) == 0 {
return ZeroBinaryLiteral, nil
}
if len(s)%2 != 0 {
s = "0" + s
}
buf, err := hex.DecodeString(s)
if err != nil {
return nil, errors.Trace(err)
}
return buf, nil
}
// NewHexLiteral parses hexadecimal string as HexLiteral type.
func NewHexLiteral(s string) (HexLiteral, error) {
h, err := ParseHexStr(s)
if err != nil {
return HexLiteral{}, err
}
return HexLiteral(h), nil
}
// ToString implement ast.BinaryLiteral interface
func (b HexLiteral) ToString() string {
return BinaryLiteral(b).ToString()
}
// SetBinChsClnFlag sets charset, collation as 'binary' and adds binaryFlag to FieldType.
func SetBinChsClnFlag(ft *types.FieldType) {
ft.SetCharset(charset.CharsetBin)
ft.SetCollate(charset.CollationBin)
ft.AddFlag(mysql.BinaryFlag)
}
// DefaultFsp is the default digit of fractional seconds part.
// MySQL use 0 as the default Fsp.
const DefaultFsp = int8(0)
// DefaultTypeForValue returns the default FieldType for the value.
func DefaultTypeForValue(value interface{}, tp *types.FieldType, charset string, collate string) {
switch x := value.(type) {
case nil:
tp.SetType(mysql.TypeNull)
tp.SetFlen(0)
tp.SetDecimal(0)
SetBinChsClnFlag(tp)
case bool:
tp.SetType(mysql.TypeLonglong)
tp.SetFlen(1)
tp.SetDecimal(0)
tp.AddFlag(mysql.IsBooleanFlag)
SetBinChsClnFlag(tp)
case int:
tp.SetType(mysql.TypeLonglong)
tp.SetFlen(StrLenOfInt64Fast(int64(x)))
tp.SetDecimal(0)
SetBinChsClnFlag(tp)
case int64:
tp.SetType(mysql.TypeLonglong)
tp.SetFlen(StrLenOfInt64Fast(x))
tp.SetDecimal(0)
SetBinChsClnFlag(tp)
case uint64:
tp.SetType(mysql.TypeLonglong)
tp.AddFlag(mysql.UnsignedFlag)
tp.SetFlen(StrLenOfUint64Fast(x))
tp.SetDecimal(0)
SetBinChsClnFlag(tp)
case string:
tp.SetType(mysql.TypeVarString)
// TODO: tp.flen should be len(x) * 3 (max bytes length of CharsetUTF8)
tp.SetFlen(len(x))
tp.SetDecimal(types.UnspecifiedLength)
tp.SetCharset(charset)
tp.SetCollate(collate)
case float32:
tp.SetType(mysql.TypeFloat)
s := strconv.FormatFloat(float64(x), 'f', -1, 32)
tp.SetFlen(len(s))
tp.SetDecimal(types.UnspecifiedLength)
SetBinChsClnFlag(tp)
case float64:
tp.SetType(mysql.TypeDouble)
s := strconv.FormatFloat(x, 'f', -1, 64)
tp.SetFlen(len(s))
tp.SetDecimal(types.UnspecifiedLength)
SetBinChsClnFlag(tp)
case []byte:
tp.SetType(mysql.TypeBlob)
tp.SetFlen(len(x))
tp.SetDecimal(types.UnspecifiedLength)
SetBinChsClnFlag(tp)
case BitLiteral:
tp.SetType(mysql.TypeVarString)
tp.SetFlen(len(x))
tp.SetDecimal(0)
SetBinChsClnFlag(tp)
case HexLiteral:
tp.SetType(mysql.TypeVarString)
tp.SetFlen(len(x) * 3)
tp.SetDecimal(0)
tp.AddFlag(mysql.UnsignedFlag)
SetBinChsClnFlag(tp)
case BinaryLiteral:
tp.SetType(mysql.TypeBit)
tp.SetFlen(len(x) * 8)
tp.SetDecimal(0)
SetBinChsClnFlag(tp)
tp.DelFlag(mysql.BinaryFlag)
tp.AddFlag(mysql.UnsignedFlag)
case *MyDecimal:
tp.SetType(mysql.TypeNewDecimal)
tp.SetFlen(len(x.ToString()))
tp.SetDecimal(int(x.digitsFrac))
SetBinChsClnFlag(tp)
default:
tp.SetType(mysql.TypeUnspecified)
tp.SetFlen(types.UnspecifiedLength)
tp.SetDecimal(types.UnspecifiedLength)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.