text stringlengths 11 4.05M |
|---|
package leetcode
import "testing"
func TestArrayPairSum(t *testing.T) {
if arrayPairSum([]int{1, 4, 3, 2}) != 4 {
t.Fatal()
}
}
|
package bigdigits
import (
"fmt"
"log"
)
func BigDigits(stringOfDigits string) {
for row := range bigDigits[0] {
line := ""
for column := range stringOfDigits {
digit := stringOfDigits[column] - '0'
if 0 <= digit && digit <= 9 {
line += bigDigits[digit][row] + " "
} else {
log.Fatal("invalid whole number")
}
}
fmt.Println(line)
}
}
|
package main
// ------------------------------- 基于快速排序的矩阵对角线排序 (原地排序) -------------------------------
func diagonalSort(mat [][]int) [][]int {
if len(mat) == 0 {
return [][]int{}
}
rows, cols := getRowsAndCols(mat)
for i := 0; i < rows; i++ {
leftX, leftY := i, 0
rightX, rightY := getRightXAndRightY(mat, leftX, leftY)
quickSortForMatrixDiagonal(mat, leftX, leftY, rightX, rightY)
}
for i := 0; i < cols; i++ {
leftX, leftY := 0, i
rightX, rightY := getRightXAndRightY(mat, leftX, leftY)
quickSortForMatrixDiagonal(mat, leftX, leftY, rightX, rightY)
}
return mat
}
func getRightXAndRightY(mat [][]int, leftX, leftY int) (int, int) {
rows, cols := getRowsAndCols(mat)
distanceToRightBound := cols - leftY - 1
distanceToDownBound := rows - leftX - 1
minDistanceToBound := min(distanceToDownBound, distanceToRightBound)
return leftX + minDistanceToBound, leftY + minDistanceToBound
// 这个函数这样写也可以
// for {
// if !isValidCoordinate(mat, leftX+1, leftY+1) {
// return leftX,leftY
// }
// leftX++
// leftY++
// }
}
func quickSortForMatrixDiagonal(mat [][]int, leftX, leftY, rightX, rightY int) {
if !isValidCoordinate(mat, leftX, leftY) || !isValidCoordinate(mat, rightX, rightY) {
return
}
if leftX > rightX || leftY > rightY {
return
}
partitionX, partitionY := partitionForMatrixDiagonal(mat, leftX, leftY, rightX, rightY)
quickSortForMatrixDiagonal(mat, leftX, leftY, partitionX-1, partitionY-1)
quickSortForMatrixDiagonal(mat, partitionX+1, partitionY+1, rightX, rightY)
}
func partitionForMatrixDiagonal(mat [][]int, leftX, leftY, rightX, rightY int) (int, int) {
guardX, guardY := leftX, leftY
guardNum := mat[guardX][guardY]
for leftX <= rightX && leftY <= rightY {
for leftX <= rightX && leftY <= rightY && mat[leftX][leftY] <= guardNum {
leftX++
leftY++
}
for leftX <= rightX && leftY <= rightY && mat[rightX][rightY] >= guardNum {
rightX--
rightY--
}
if leftX <= rightX && leftY <= rightY {
mat[rightX][rightY], mat[leftX][leftY] = mat[leftX][leftY], mat[rightX][rightY]
}
}
mat[rightX][rightY], mat[guardX][guardY] = mat[guardX][guardY], mat[rightX][rightY]
return rightX, rightY
}
func isValidCoordinate(mat [][]int, x, y int) bool {
rows, cols := getRowsAndCols(mat)
return x >= 0 && x <= rows-1 && y >= 0 && y <= cols-1
}
func getRowsAndCols(mat [][]int) (int, int) {
return len(mat), len(mat[0])
}
func min(a, b int) int {
if a > b {
return b
}
return a
}
/*
题目链接: https://leetcode-cn.com/problems/sort-the-matrix-diagonally/
总结:
1. 这题我基于快速排序,对矩阵对角线进行排序。
*/
|
package mdware
import (
"bufio"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"runtime"
"strings"
"sync/atomic"
"time"
"internal/ctxutil"
"internal/gzippool"
"internal/logger"
)
/*
Head(uuid) > Auth(auth) > Gzip() > Body() > Exec(API) > Resp() > Fail() > Tail(log)
Head(Auth(Gzip(Body(Exec(Resp(Fail(Tail)))))))
--------------------------------------------->
start:
1 -----> Head()
| 2 -----> Auth() err >-----------------+
| | 3 -----> Gzip() err >-------------|
| | | 4 -----> Body() err >---------|
| | | | 5 -----> Exec() err >-----|
| | | | | [specific API function] + ----> Helper(ctx, dbx, log), funcX(w, r) (interface{}, error) {}
| | | | | 6 -----> Resp() err >-| getParams(ctx)
| | | | | | 7 -----> Fail() <-+ getResult(dbx)
| | | | | | | 8 -----> Tail() return result, err
| | | | | | | |
| | | | | | | 8 ----- :end
| | | | | | 7 -----
| | | | | 6 -----
| | | | 5 -----
| | | 4 -----
| | 3 -----
| 2 -----
1 -----
*/
// Pipe does
type Pipe struct {
before []func(http.Handler) http.Handler
// Join(...) here
after []func(http.Handler) http.Handler
}
// BeforeJoin joins middleware handlers in order executing before Join method.
func (p *Pipe) BeforeJoin(pipes ...func(http.Handler) http.Handler) {
for i := range pipes {
p.before = append(p.before, pipes[i])
}
}
// AfterJoin joins middleware handlers in order executing before Join method.
func (p *Pipe) AfterJoin(pipes ...func(http.Handler) http.Handler) {
for i := range pipes {
p.after = append(p.after, pipes[i])
}
}
// Join joins several middlewares in one pipeline.
func (p *Pipe) Join(pipes ...func(http.Handler) http.Handler) http.Handler {
var h http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})
for i := len(p.after) - 1; i >= 0; i-- {
h = p.after[i](h)
}
for i := len(pipes) - 1; i >= 0; i-- {
h = pipes[i](h)
}
for i := len(p.before) - 1; i >= 0; i-- {
h = p.before[i](h)
}
return h
}
// NewPipe returns *Pipe with cap c.
func NewPipe(c int) *Pipe {
return &Pipe{
before: make([]func(http.Handler) http.Handler, 0, c),
after: make([]func(http.Handler) http.Handler, 0, c),
}
}
// Join joins several middlewares in one pipeline.
func Join(pipes ...func(http.Handler) http.Handler) http.Handler {
p := NewPipe(0)
return p.Join(pipes...)
}
type coder interface {
Code() int
}
type sizer interface {
Size() int
}
type responseWriter struct {
c uint64 // status
n uint64 // size
w http.ResponseWriter
}
func (w *responseWriter) Write(b []byte) (int, error) {
n, err := w.w.Write(b)
atomic.AddUint64(&w.n, uint64(n))
return n, err
}
func (w *responseWriter) Header() http.Header {
return w.w.Header()
}
func (w *responseWriter) WriteHeader(statusCode int) {
atomic.AddUint64(&w.c, uint64(statusCode))
w.w.WriteHeader(statusCode)
}
func (w *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return w.w.(http.Hijacker).Hijack()
}
func (w *responseWriter) Code() int {
return int(atomic.LoadUint64(&w.c))
}
func (w *responseWriter) Size() int {
return int(atomic.LoadUint64(&w.n))
}
// Head does some actions the first in handlers pipeline. Must be first in pipeline.
func Head(uuidFn func() string) func(http.Handler) http.Handler {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
ctx = ctxutil.WithTime(ctx, time.Now())
ctx = ctxutil.WithHost(ctx, mineHost(r))
ctx = ctxutil.WithUser(ctx, r.UserAgent())
if uuidFn != nil {
uuid := uuidFn()
ctx = ctxutil.WithUUID(ctx, uuid)
w.Header().Set("X-Request-ID", uuid)
}
w.Header().Set("X-Powered-By", runtime.Version())
r = r.WithContext(ctx)
h.ServeHTTP(w, r)
})
}
}
// Auth checks user's access to service.
func Auth(authFn func(*http.Request) (string, int, error)) func(http.Handler) http.Handler {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if authFn != nil {
ctx := r.Context()
name, code, err := authFn(r)
if err != nil {
ctx = ctxutil.WithError(ctx, err, code)
}
ctx = ctxutil.WithAuth(ctx, name)
r = r.WithContext(ctx)
}
h.ServeHTTP(w, r)
})
}
}
func mineHost(r *http.Request) string {
var v string
if v = r.Header.Get("X-Forwarded-For"); v == "" {
if v = r.Header.Get("X-Real-IP"); v == "" {
v = r.RemoteAddr
}
}
v, _, _ = net.SplitHostPort(v)
return v
}
// Gzip wraps reader and writer for decompress and ompress data.
func Gzip(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if strings.Contains(r.Header.Get("Content-Encoding"), "gzip") {
ctx := r.Context()
err := ctxutil.ErrorFrom(ctx)
if err != nil {
h.ServeHTTP(w, r)
return
}
z := gzippool.GetReader()
defer gzippool.PutReader(z)
_ = z.Reset(r.Body)
r.Body = z
}
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
z := gzippool.GetWriter()
defer gzippool.PutWriter(z)
z.Reset(w)
w = gzippool.NewResponseWriter(z, w)
w.Header().Set("Content-Encoding", "gzip")
w.Header().Add("Vary", "Accept-Encoding")
}
h.ServeHTTP(w, r)
})
}
// Body reads data from Request.Body into context.Conext.
func Body(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
err := ctxutil.ErrorFrom(ctx)
if err != nil {
h.ServeHTTP(w, r)
return
}
if !strings.HasPrefix(r.Method, "P") {
h.ServeHTTP(w, r)
return
}
b, err := ioutil.ReadAll(r.Body)
if err != nil {
ctx = ctxutil.WithError(ctx, err, http.StatusBadRequest)
}
ctx = ctxutil.WithCLen(ctx, int64(len(b)))
ctx = ctxutil.WithBody(ctx, b)
err = r.Body.Close()
if err != nil {
ctx = ctxutil.WithError(ctx, err, http.StatusBadRequest)
}
r = r.WithContext(ctx)
h.ServeHTTP(w, r)
})
}
// Exec execites main user handler for registared URL.
func Exec(v interface{}) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
err := ctxutil.ErrorFrom(ctx)
if err != nil {
next.ServeHTTP(w, r)
return
}
w = &responseWriter{w: w}
switch h := v.(type) {
case func(http.ResponseWriter, *http.Request):
h(w, r)
case http.Handler:
h.ServeHTTP(w, r)
default:
panic("unknown handler")
}
ctx = r.Context()
if v, ok := w.(coder); ok && v.Code() != 0 {
ctx = ctxutil.WithCode(ctx, v.Code())
} else if ctxutil.CodeFrom(ctx) == 0 { // if wasn't error
ctx = ctxutil.WithCode(ctx, http.StatusOK)
}
if v, ok := w.(sizer); ok && v.Size() != 0 {
ctx = ctxutil.WithSize(ctx, int64(v.Size()))
}
r = r.WithContext(ctx)
next.ServeHTTP(w, r)
})
}
}
// Resp writes result data to response.
func Resp(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
err := ctxutil.ErrorFrom(ctx)
if err != nil || ctxutil.SizeFrom(ctx) != 0 { // skip if response exists
h.ServeHTTP(w, r)
return
}
// data, if not []byte than try marshal it
res := ctxutil.ResultFrom(ctx)
var data []byte
if v, ok := res.([]byte); !ok { //
data, err = json.Marshal(res)
if err != nil {
ctx = ctxutil.WithError(ctx, err)
} else {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
}
} else {
data = v
}
if err != nil {
r = r.WithContext(ctx)
h.ServeHTTP(w, r)
return
}
w.Header().Set("Connection", "close")
if w.Header().Get("Content-Type") == "" {
w.Header().Set("Content-Type", http.DetectContentType(data))
}
// head
code := ctxutil.CodeFrom(ctx)
if code == 0 {
code = http.StatusOK
ctx = ctxutil.WithCode(ctx, code) // for logging in tail
}
w.WriteHeader(code)
// body
n, err := w.Write(data)
if err != nil {
ctx = ctxutil.WithError(ctx, err)
} else {
// prettify ?
_, err = w.Write([]byte("\n"))
if err != nil {
ctx = ctxutil.WithError(ctx, err)
} else {
n++
}
}
ctx = ctxutil.WithSize(ctx, int64(n))
r = r.WithContext(ctx)
h.ServeHTTP(w, r)
})
}
// Fail writes error message to response. Must be after resp.
func Fail(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
err := ctxutil.ErrorFrom(ctx)
if err != nil {
code := ctxutil.CodeFrom(ctx)
w.Header().Set("Connection", "close")
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.WriteHeader(code)
msg := fmt.Sprintf("%s\n", err.Error())
n, err := w.Write([]byte(msg))
if err != nil {
ctx = ctxutil.WithError(ctx, err)
}
ctx = ctxutil.WithSize(ctx, int64(n))
r = r.WithContext(ctx)
}
h.ServeHTTP(w, r)
})
}
// Tail does some last actions (logging, send metrics). Must be in the end of pipe.
func Tail(log logger.Logger) func(http.Handler) http.Handler {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if log == nil {
panic(fmt.Sprintf("%v log", log))
}
ctx := r.Context()
err := ctxutil.ErrorFrom(ctx)
var errText string
if err != nil {
errText = err.Error()[3:]
}
log.Printf(
"%s %s %s %s %s %d %d %d%s\n",
time.Since(ctxutil.TimeFrom(ctx)),
ctxutil.HostFrom(ctx),
ctxutil.UserFrom(ctx),
ctxutil.UUIDFrom(ctx),
ctxutil.AuthFrom(ctx),
ctxutil.CLenFrom(ctx),
ctxutil.SizeFrom(ctx),
ctxutil.CodeFrom(ctx),
errText,
)
h.ServeHTTP(w, r)
})
}
}
// Errc is wrapper for NotFound and MethodNotAllowed error handlers.
func Errc(code int) func(http.Handler) http.Handler {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
ctx = ctxutil.WithError(ctx, fmt.Errorf("router error"), code)
r = r.WithContext(ctx)
h.ServeHTTP(w, r)
})
}
}
// Stdh executes standard handlers regestered in http.DefaultServeMux.
func Stdh(w http.ResponseWriter, r *http.Request) {
if h, p := http.DefaultServeMux.Handler(r); p != "" {
h.ServeHTTP(w, r)
}
}
|
//go:build windows && amd64
// +build windows,amd64
package es
import _ "embed"
//go:embed Everything64.dll
var everythingDll []byte
var everythingMd5 = []byte("\xa1\xdd\xb6\x98\x1a\xc5\xe0\x80\x55\x4b\xd3\x84\xc8\x69\xf5\xf3")
|
package commands
type Project struct {
Name string
}
|
package rest
import (
mlog "github.com/jinmukeji/go-pkg/v2/log"
)
var (
// log is the package global logger
log = mlog.StandardLogger()
)
|
package main
import (
"fmt"
)
// 96. 不同的二叉搜索树
// 给定一个整数 n,求以 1 ... n 为节点组成的二叉搜索树有多少种?
// https://leetcode-cn.com/problems/unique-binary-search-trees/
func main() {
fmt.Println(numTrees(3)) // 5
fmt.Println(numTrees2(3)) // 5
}
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
// 类似题目注意是否包含空树
// 法一:动态规划
// n个整数组成的BST个数=以i为根节点的BST个数相加(1<=i<=n)
// 以i为根节点的BST,左子树是i-1个节点,右子树是n-i个节点
// 则根为i的BST集合是左子树集合和右子树集合的笛卡尔积
// O(n^2)
func numTrees(n int) int {
dp := make([]int, n+1)
dp[0], dp[1] = 1, 1
for i := 2; i <= n; i++ { // 表示拥有n个节点的BST数目统计
for j := 1; j <= i; j++ { // j表示根节点i的可选范围,统计以i为根节点的可能性之和
dp[i] += dp[j-1] * dp[i-j]
}
}
return dp[n]
}
// 法二:卡特兰通项公式
// 根据法一,举例如下:
// Taking 1~n as root respectively:
// 1 as root: # of trees = F(0) * F(n-1) // F(0) == 1
// 2 as root: # of trees = F(1) * F(n-2)
// 3 as root: # of trees = F(2) * F(n-3)
// ...
// n-1 as root: # of trees = F(n-2) * F(1)
// n as root: # of trees = F(n-1) * F(0)
// so,F(n) = F(0) * F(n-1) + F(1) * F(n-2) + F(2) * F(n-3) + ... + F(n-2) * F(1) + F(n-1) * F(0)
// 符合卡特兰公式:
// 令h(0)=1,h(1)=1。
// 卡塔兰数的递推式:h(n) = h(0)*h(n-1) + h(1)*h(n-2) + ... + h(n-1)h(0)(n>=2)
// h(n) = h(n-1)*(4*n-2) / (n+1)
func numTrees2(n int) int {
hi := 1 // n=1
for i := 2; i <= n; i++ {
hi = hi * (4*i - 2) / (i + 1)
}
return hi
}
|
/*
Package ratecounter provides a thread-safe rate-counter, for tracking counts
in an interval
Useful for implementing counters and stats of 'requests-per-second' (for example).
// We're recording events-per-1second
counter := ratecounter.NewRateCounter(1 * time.Second)
// Record an event happening
counter.Incr(1)
// get the current requests-per-second
counter.Rate()
To record an average over a longer period, you can:
// Record requests-per-minute
counter := ratecounter.NewRateCounter(60 * time.Second)
// Calculate the average requests-per-second for the last minute
counter.Rate() / 60
*/
package ratecounter
|
package main
import (
"fmt"
"strconv"
"github.com/dylandreimerink/gobpfld/ebpf"
)
// IPv4Field descibes the properties of a IPv4 field
type IPv4Field struct {
offset int
size int
}
// TODO convert IPv4Field to an interface and implement each field as a seperate struct. Reason for this is that
// some fields like Version, IHL, and flags don't align to 8 bits. These fields need to be masked and shifted
// to get a usable value for comparason.
var (
IPv4VersionIHL = IPv4Field{
offset: 0,
size: 1,
}
IPv4TOS = IPv4Field{
offset: IPv4VersionIHL.offset + IPv4VersionIHL.size,
size: 1,
}
IPv4TotalLen = IPv4Field{
offset: IPv4TOS.offset + IPv4TOS.size,
size: 2,
}
IPv4ID = IPv4Field{
offset: IPv4TotalLen.offset + IPv4TotalLen.size,
size: 2,
}
IPv4FragmetOffset = IPv4Field{
offset: IPv4ID.offset + IPv4ID.size,
size: 2,
}
IPv4TTL = IPv4Field{
offset: IPv4FragmetOffset.offset + IPv4FragmetOffset.size,
size: 1,
}
IPv4Protocol = IPv4Field{
offset: IPv4TTL.offset + IPv4TTL.size,
size: 1,
}
IPv4Checksum = IPv4Field{
offset: IPv4Protocol.offset + IPv4Protocol.size,
size: 2,
}
IPv4SourceAddress = IPv4Field{
offset: IPv4Checksum.offset + IPv4Checksum.size,
size: 4,
}
IPv4DestinationAddress = IPv4Field{
offset: IPv4SourceAddress.offset + IPv4SourceAddress.size,
size: 4,
}
)
var bytesToBPFSize = map[int]ebpf.Size{
1: ebpf.BPF_B,
2: ebpf.BPF_H,
4: ebpf.BPF_W,
8: ebpf.BPF_DW,
}
var _ Match = (*IPv4FieldMatch)(nil)
// IPv4FieldMatch can match a field in a IPv4 header.
// Like 'ipv4.src == "127.0.0.1"' or 'ipv4.len >= 50'
type IPv4FieldMatch struct {
Field IPv4Field
Op LogicOp
Value int
}
func (ifm *IPv4FieldMatch) Invert() Match {
return &IPv4FieldMatch{
Field: ifm.Field,
Op: ifm.Op.Invert(),
Value: ifm.Value,
}
}
func (ifm *IPv4FieldMatch) AssembleMatch(counter *IDCounter, nextRuleLabel, actionLabel string) ([]string, error) {
asm := []string{
"# IPv4 field match",
// Copy R6 to R1 in case R1 has been reused (R6 is always *xdp_md)
" r1 = r6",
// Load the 'cached' header location of the IPv4 header
fmt.Sprintf(" r0 = *(u64 *)(r10%+d)", headerLocationVariables[FWLibGetIPv4Header]),
// If the cached value is not -2, use the cached value and skip the call
" if r0 != -2 goto +2",
// Call FWLibGetIPv4Header
" call " + FWLibGetIPv4Header.String(),
// Cache the result from FWLibGetIPv4Header
fmt.Sprintf(" *(u64 *)(r10%+d) = r0", headerLocationVariables[FWLibGetIPv4Header]),
// Jump to next rule/after action if return < 0
// if return == -1, there is no IPv4 header, no other negative number is expected
" if r0 s< 0 goto " + nextRuleLabel,
// r2 = xdp_md.data
" r2 = *(u32 *)(r6 + 0)",
// R0 is just the offset of the IPv4 header, to get a pointer we need to
// add the xdp_md.data to the offset.
" r0 += r2",
// Load xdp_md->data_end into R1
" r1 = *(u32 *)(r6 + 4)",
// Copy R0 to R2 so we can use R2 for bounds checking
" r2 = r0",
//
fmt.Sprintf(" r2 += %d", int32(ifm.Field.offset)+int32(ifm.Field.size)+1),
// if xdp_md.data + offsetof(iphdr->{field}) + sizeof(iphdr->{field}) > xdp_md.data_end
" if r2 > r1 goto " + nextRuleLabel,
// Invert the op, since we want to jump to the next rule if the condition
// doesn't match.
}
// Invert the op. The 'action' code comes after the match, so we want to jump over the
// the action to the next rule to get the same result.
opInst := ifm.Op.Invert().Assembly(strconv.Itoa(ifm.Value), nextRuleLabel)
asm = append(asm, []string{
// Load the IPv4 field into R1
fmt.Sprintf(" r1 = *(%s *)(r0 + %d)", bytesToBPFSize[ifm.Field.size], int16(ifm.Field.offset)),
// Compare against the static value
" " + opInst,
"# End IPv4 field match",
}...)
return asm, nil
}
// getIPv4Header returns the offset from xdp_md.data to the start of the IPv4 header, or -1 if there is no IPv4 header.
func getIPv4Header() []string {
// Arguments
// r1 = xdp_md
return []string{
// TODO move L2 parsing to seperate lib function. Let the main program pass the first frame pointer
// via r2 so lib functions can lookup offsets. Add L2 offset caching.
FWLibGetIPv4Header.String() + ":",
" r0 = -1 # Set default return value to -1",
" r2 = *(u32 *) (r1 + 4) # r2 = xdp_md.data_end",
" r1 = *(u32 *) (r1 + 0) # r1 = xdp_md.data",
" r5 = 14 # r5 = sizeof(ethhdr)",
" r3 = r1 # r3 = packet bounds checking",
" r3 += r5 # r3 = xdp_md.data + sizeof(ethhdr)",
" if r3 > r2 goto get_ipv4_hdr_exit # if xdp_md.data + sizeof(ethhdr) > xdp_md.data_end",
" r4 = *(u16 *) (r1 + 12) # r4 = ethhdr.h_proto",
// TODO add 802.1ad and QinQ double tagging support
fmt.Sprintf(" if r4 != %d goto get_ipv4_hdr_iph # if ethhdr.h_proto != 0x8100 (802.1Q Virtual LAN)", ebpf.HtonU16(0x8100)),
" r5 += 4 # r5 = sizeof(ethhdr) + sizeof(vlan_hdr)",
" r3 += 4 # r3 = xdp_md.data + sizeof(ethhdr) + sizeof(vlan_hdr)",
" if r3 > r2 goto get_ipv4_hdr_exit # if xdp_md.data + sizeof(ethhdr) + sizeof(vlan_hdr) > xdp_md.data_end",
" r4 = *(u16 *) (r1 + 16) # r4 = vlan_hdr.h_vlan_encapsulated_proto",
"get_ipv4_hdr_iph:",
fmt.Sprintf(" if r4 != %d goto get_ipv4_hdr_exit # if r4 != 0x0800 (IPv4)", ebpf.HtonU16(0x0800)),
" r0 = r5",
"get_ipv4_hdr_exit:",
" exit",
}
}
|
package commands
import (
"context"
"os"
"os/signal"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/yunify/qscamel/constants"
"github.com/yunify/qscamel/migrate"
"github.com/yunify/qscamel/model"
"github.com/yunify/qscamel/utils"
)
var (
taskPath string
)
// RunCmd will provide run command for qscamel.
var RunCmd = &cobra.Command{
Use: "run [task name or task path]",
Short: "Create or resume a task",
Args: cobra.ExactArgs(1),
PreRunE: func(cmd *cobra.Command, _ []string) error {
return initContext(cmd.Flag("config").Value.String())
},
Run: func(cmd *cobra.Command, args []string) {
ctx := context.Background()
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, os.Interrupt, os.Kill)
go func() {
sig := <-sigs
logrus.Infof("Signal %v received, exit for now.", sig)
cleanUp()
os.Exit(0)
}()
// Load and check task.
t, err := model.LoadTask(args[0], taskPath)
if err != nil {
logrus.Errorf("Task load failed for %v.", err)
return
}
err = t.Check()
if err != nil {
logrus.Errorf("Task check failed for %v.", err)
return
}
ctx = utils.NewTaskContext(ctx, t.Name)
// Start migrate.
logrus.Infof("Current version: %s.", constants.Version)
logrus.Infof("Task %s migrate started.", t.Name)
err = migrate.Execute(ctx)
if err != nil {
logrus.Errorf("Migrate failed for %v.", err)
}
},
PostRunE: func(cmd *cobra.Command, args []string) error {
return cleanUp()
},
}
|
// Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package benchdaily
import (
"encoding/json"
"flag"
"log"
"os"
"reflect"
"runtime"
"strings"
"testing"
)
// BenchOutput is the json format for the final output file.
type BenchOutput struct {
Date string
Commit string
Result []BenchResult
}
// BenchResult is one benchmark result.
type BenchResult struct {
Name string
NsPerOp int64
AllocsPerOp int64
BytesPerOp int64
}
func benchmarkResultToJSON(name string, r testing.BenchmarkResult) BenchResult {
return BenchResult{
Name: name,
NsPerOp: r.NsPerOp(),
AllocsPerOp: r.AllocsPerOp(),
BytesPerOp: r.AllocedBytesPerOp(),
}
}
func callerName(f func(b *testing.B)) string {
fullName := runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name()
idx := strings.LastIndexByte(fullName, '.')
if idx > 0 && idx+1 < len(fullName) {
return fullName[idx+1:]
}
return fullName
}
var outfile = flag.String("outfile", "", "specify the output file")
// Run runs some benchmark tests, write the result to a JSON file.
func Run(tests ...func(b *testing.B)) {
if !flag.Parsed() {
flag.Parse()
}
// Avoiding slow down the CI.
if *outfile == "" {
return
}
res := make([]BenchResult, 0, len(tests))
for _, t := range tests {
name := callerName(t)
r1 := testing.Benchmark(t)
r2 := benchmarkResultToJSON(name, r1)
res = append(res, r2)
}
writeBenchResultToFile(res, *outfile)
}
// readBenchResultFromFile is used by the daily bench test.
// nolint: unused, deadcode
func readBenchResultFromFile(file string) []BenchResult {
//nolint: gosec
f, err := os.Open(file)
if err != nil {
log.Panic(err)
}
defer func() {
err := f.Close()
if err != nil {
log.Fatal(err)
}
}()
res := make([]BenchResult, 0, 100)
dec := json.NewDecoder(f)
err = dec.Decode(&res)
if err != nil {
log.Panic(err)
}
return res
}
func writeBenchResultToFile(res []BenchResult, file string) {
out, err := os.Create(file)
if err != nil {
log.Fatal(err)
}
defer func() {
err := out.Close()
if err != nil {
log.Fatal(err)
}
}()
enc := json.NewEncoder(out)
err = enc.Encode(res)
if err != nil {
log.Fatal(err)
}
}
|
package cmd
import (
"errors"
"strings"
)
func GitStatusParse(status string) ([]string, error) {
if status == "" {
return nil, errors.New("status should not be blank")
}
statusList := strings.Split(status, "\n")
var result []string
for _, v := range statusList {
if len(v) == 0 {
continue
}
if len(v) < 4 {
return nil, errors.New("status format is invalid")
}
result = append(result, v[3:])
}
return result, nil
}
func GitBranchParse(branches string) ([]string, error) {
if branches == "" {
return nil, errors.New("branches should not be blank")
}
branchList := strings.Split(branches, "\n")
var result []string
for _, b := range branchList {
if len(b) == 0 {
continue
}
if len(b) < 3 {
return nil, errors.New("executedCmdString format is invalid")
}
prefix, branchName := b[:2], b[2:]
if strings.Contains(prefix, "*") {
continue
}
result = append(result, branchName)
}
return result, nil
}
func CheckGitBranchDeleteResult(result string) error {
if result == "" {
return errors.New("result text should not be blank")
}
if len(result) < 4 {
return errors.New("executedCmdString format is invalid")
}
resultPrefix := result[:5]
if resultPrefix == "error" {
return errors.New(result[7:])
}
return nil
}
|
package cli
import (
"strconv"
"github.com/spf13/cobra"
"github.com/spf13/cast"
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/client/tx"
"github.com/octalmage/gitgood/x/gitgood/types"
)
func CmdCreateAchievement() *cobra.Command {
cmd := &cobra.Command{
Use: "create-achievement [achievementID] [owner]",
Short: "Create a new achievement",
Args: cobra.ExactArgs(3),
RunE: func(cmd *cobra.Command, args []string) error {
argsAchievementID, err := cast.ToStringE(args[0])
if err != nil {
return err
}
argsOwner, err := cast.ToStringE(args[1])
if err != nil {
return err
}
clientCtx, err := client.GetClientTxContext(cmd)
if err != nil {
return err
}
msg := types.NewMsgCreateAchievement(clientCtx.GetFromAddress().String(), argsAchievementID, argsOwner)
if err := msg.ValidateBasic(); err != nil {
return err
}
return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg)
},
}
flags.AddTxFlagsToCmd(cmd)
return cmd
}
func CmdUpdateAchievement() *cobra.Command {
cmd := &cobra.Command{
Use: "update-achievement [id] [achievementID] [owner] [createdAt]",
Short: "Update a achievement",
Args: cobra.ExactArgs(4),
RunE: func(cmd *cobra.Command, args []string) error {
id, err := strconv.ParseUint(args[0], 10, 64)
if err != nil {
return err
}
argsAchievementID, err := cast.ToStringE(args[1])
if err != nil {
return err
}
argsOwner, err := cast.ToStringE(args[2])
if err != nil {
return err
}
argsCreatedAt, err := cast.ToInt32E(args[3])
if err != nil {
return err
}
clientCtx, err := client.GetClientTxContext(cmd)
if err != nil {
return err
}
msg := types.NewMsgUpdateAchievement(clientCtx.GetFromAddress().String(), id, argsAchievementID, argsOwner, argsCreatedAt)
if err := msg.ValidateBasic(); err != nil {
return err
}
return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg)
},
}
flags.AddTxFlagsToCmd(cmd)
return cmd
}
func CmdDeleteAchievement() *cobra.Command {
cmd := &cobra.Command{
Use: "delete-achievement [id]",
Short: "Delete a achievement by id",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
id, err := strconv.ParseUint(args[0], 10, 64)
if err != nil {
return err
}
clientCtx, err := client.GetClientTxContext(cmd)
if err != nil {
return err
}
msg := types.NewMsgDeleteAchievement(clientCtx.GetFromAddress().String(), id)
if err := msg.ValidateBasic(); err != nil {
return err
}
return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg)
},
}
flags.AddTxFlagsToCmd(cmd)
return cmd
}
|
package main
import (
"fmt"
"strconv"
)
var sayCh = make(chan string, 300)
var endCh = make(chan struct{}, 1)
var intCh = make(chan int, 100)
var exitCh = make(chan bool, 1)
func main() {
go WriteData(intCh)
go ReadData(intCh)
//for {
select {
case <-exitCh:
return
}
//}
// intCh <- 122
// fmt.Println(<-intCh)
// for {
// v, ok := <-exitCh
// if !ok {
// return
// }
// fmt.Println(v)
// }
// iCh := make(chan bool, 1)
// go func() {
// for i := 0; i < 100; i++ {
// fmt.Println("1")
// }
// iCh <- true
// }()
// <-iCh
}
func WriteData(intCh chan<- int) {
for i := 0; i < 100; i++ {
intCh <- i
}
close(intCh)
}
func ReadData(intCh <-chan int) {
for {
v, ok := <-intCh
if !ok {
//fmt.Println("ReadData", intD)
break //切记这里不能用return,要用break
}
fmt.Println("ReadData", v)
}
// for v := range intCh {
// fmt.Println("ReadData", v)
// }//for range 与死循环读取,功能相似
exitCh <- true
//close(exitCh)//不关闭配合select使用
}
func catSay() {
for i := 0; i < 100; i++ {
sayCh <- "cat" + strconv.Itoa(i)
}
close(sayCh)
endCh <- struct{}{}
close(endCh)
}
func dogSay() {
fmt.Println("dog")
}
func fishSay() {
fmt.Println("fish")
endCh <- struct{}{}
}
//有三个函数,分别打印"cat","dog","fish"
//要求每个函数都起一个goroutine,请按照"cat","dog","fish"的顺序打印,100次
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//55. Jump Game
//Given an array of non-negative integers, you are initially positioned at the first index of the array.
//Each element in the array represents your maximum jump length at that position.
//Determine if you are able to reach the last index.
//Example 1:
//Input: [2,3,1,1,4]
//Output: true
//Explanation: Jump 1 step from index 0 to 1, then 3 steps to the last index.
//Example 2:
//Input: [3,2,1,0,4]
//Output: false
//Explanation: You will always arrive at index 3 no matter what. Its maximum
// jump length is 0, which makes it impossible to reach the last index.
//func canJump(nums []int) bool {
//}
// Time Is Money |
package main
import (
"fmt"
"golang.org/x/tour/tree"
"reflect"
"sync"
)
// Walk walks the tree t
// Sending all values from the tree to the channel ch.
func Walk(t *tree.Tree, ch chan int) {
walkTree(t, ch)
defer close(ch)
}
func walkTree(t *tree.Tree, ch chan int) {
// Walk the left side of the tree
if t.Left != nil {
walkTree(t.Left, ch)
}
ch <- t.Value
// Walk the right side of the tree
if t.Right != nil {
walkTree(t.Right, ch)
}
}
// Same determines whether the trees
// t1 and t2 contain the same values.
func Same(t1, t2 *tree.Tree) bool {
arr1, arr2 := []int{}, []int{}
// Create a channel for integers
ch1 := make(chan int)
ch2 := make(chan int)
var wg sync.WaitGroup
wg.Add(2)
go func(){
Walk(t1, ch1)
defer wg.Done()
}()
for {
i, ok := <- ch1
if ok == false {
break
}
arr1 = append(arr1, i)
}
go func(){
Walk(t2, ch2)
defer wg.Done()
}()
for {
j, ok := <- ch2
if ok == false {
break
}
arr2 = append(arr2, j)
}
wg.Wait()
fmt.Printf("\narr1 %+v", arr1)
fmt.Printf("\narr2 %+v", arr2)
res := reflect.DeepEqual(arr1, arr2)
return res
}
func main() {
eq := Same(tree.New(1), tree.New(1))
fmt.Printf("\nAre equal %b", eq)
}
|
package main
import (
"fmt"
"io"
"log"
"os"
"path/filepath"
"runtime"
"strconv"
)
var dirCurrent, _ = os.Getwd() // Directory that the binary is in
var dirResources = filepath.Join(dirCurrent, "resources")
func main() {
var choice int
Clr()
fmt.Println("Welcome to Grognak's Mod Patcher!")
if !CheckSafety() {
Pause(true)
}
for {
choice = MainMenu()
if choice >= 1 && choice <= 4 {
break
} else {
Clr() // and then show the main menu again
}
}
switch choice {
case 1:
// Patch all mods
StartPatch()
case 2:
// Restore unmodded game
RestoreBackups()
case 3:
// Create updated backups
UpdateBackups()
case 4:
// Exit
os.Exit(0)
}
log.Println("Operation completed successfully!")
Pause(true) // End the program
}
func CheckSafety() bool {
var fileToCheck string
switch runtime.GOOS {
case "darwin":
fmt.Println("It appears you are running OS X. Congratulations!\n")
fileToCheck = "FTL_README.html"
case "windows":
fmt.Println("Looks like you're running Windows. Great!\n")
fileToCheck = "FTLGame.exe"
case "linux":
fmt.Println("It seems like you're running Linux. Fantastic!\n")
fileToCheck = "FTL"
}
if _, err := os.Stat(fileToCheck); os.IsNotExist(err) {
fmt.Println("Sorry, but there was an error (#1):\n You need to install this binary in it's correct location.")
fmt.Println("Try consulting the included readme for more information.")
return false
}
return true
}
func MainMenu() int {
var result string
var resultint int
fmt.Println("What would you like to do?\n")
fmt.Println("1) Patch all mods")
fmt.Println("2) Restore unmodded game")
fmt.Println("3) Create updated backups")
fmt.Println("4) Exit\n")
fmt.Print(">> ")
_, err := fmt.Scanln(&result)
if err != nil {
log.Fatal(err)
}
resultint, _ = strconv.Atoi(result)
return resultint // The result converted into an int
}
func Pause(exiting bool) {
var s string
if exiting {
fmt.Println("\nPress Enter to exit...")
fmt.Scanln(&s)
os.Exit(0)
} else {
fmt.Println("\nPress Enter to continue...")
fmt.Scanln(&s)
return
}
}
func StartPatch() {
// TODO
}
func RestoreBackups() {
log.Println("Restoring data file backups...")
os.Remove(filepath.Join(dirResources, "data.dat"))
os.Remove(filepath.Join(dirResources, "resources.dat"))
_, err := CopyFile(filepath.Join(dirResources, "data.dat.bak"), filepath.Join(dirResources, "data.dat"))
_, err = CopyFile(filepath.Join(dirResources, "resource.dat.bak"), filepath.Join(dirResources, "resource.dat"))
if err != nil {
log.Println("There was an error restoring backups. Are you sure that")
log.Println("you have patched at least once?")
log.Fatal(err)
}
}
func UpdateBackups() {
// TODO
}
func Clr() {
// TODO: Refine
for i := 0; i < 32; i++ {
fmt.Println("")
}
}
func CopyFile(src, dest string) (written int64, err error) {
sf, err := os.Open(src)
if err != nil {
return 0, err
}
defer sf.Close()
df, err := os.Create(dest)
if err != nil {
return 0, err
}
defer df.Close()
return io.Copy(df, sf)
}
|
package service
import (
"net/http"
"github.com/gin-gonic/gin"
"otoboni.com.br/customer-webservice/factory"
"otoboni.com.br/customer-webservice/model"
)
func GetCustomers(c *gin.Context) {
var customers []model.Customer
customers, err := factory.GetCustomer()
if err != nil {
c.JSON(http.StatusInternalServerError,
gin.H{"status": http.StatusInternalServerError, "error": err.Error()})
} else {
c.JSON(http.StatusOK, gin.H{"status": http.StatusOK, "customers": customers})
}
}
func GetCustomerById(c *gin.Context) {
id := c.Param("id")
var customer model.Customer
customer, err := factory.GetCustomerById(id)
var statusCode = 0
if customer.Code == "" {
statusCode = 200
} else {
statusCode = 500
}
if err != nil {
c.JSON(statusCode, gin.H{"status": statusCode, "error": err.Error()})
} else {
c.JSON(http.StatusOK, gin.H{"status": http.StatusOK, "customer": customer})
}
}
func AddCustomer(c *gin.Context) {
var customer model.Customer
c.BindJSON(&customer)
err := factory.AddCustomer(customer)
if err != nil {
c.JSON(http.StatusInternalServerError,
gin.H{"status": http.StatusInternalServerError, "error": err.Error()})
} else {
c.JSON(http.StatusCreated,
gin.H{"status": http.StatusCreated, "Customer": customer.CustomerName})
}
}
func UpdateCustomer(c *gin.Context) {
var customer model.Customer
c.BindJSON(&customer)
err := factory.UpdateCustomer(customer)
if err != nil {
c.JSON(http.StatusInternalServerError,
gin.H{"status": http.StatusInternalServerError, "error": err.Error()})
} else {
c.JSON(http.StatusOK,
gin.H{"status": http.StatusOK, "Customer updated": customer.CustomerName})
}
}
func DeleteCustomer(c *gin.Context) {
var customer model.Customer
c.BindJSON(&customer)
err := factory.DeleteCustomer(customer.Code)
if err != nil {
c.JSON(http.StatusInternalServerError,
gin.H{"status": http.StatusInternalServerError, "error": err.Error()})
} else {
c.JSON(http.StatusOK,
gin.H{"status": http.StatusOK, "Customer deleted": "OK"})
}
}
|
package flagV
import (
"flag"
"fmt"
)
// PrintFlags print all parsed flags
func PrintFlags() {
// prevent users from forgetting
if !flag.Parsed() {
flag.Parse()
}
visitor := func(a *flag.Flag) {
fmt.Println("flag =", a.Name, "\t", " value =", a.Value, "\t", "default =", a.DefValue, "\t", a.Usage)
}
fmt.Println("------------------------------------------------------------------------------")
fmt.Println(" flags:")
fmt.Println("------------------------------------------------------------------------------")
flag.VisitAll(visitor)
}
|
package main
import (
"fmt"
"log"
"os"
"strconv"
"github.com/uhuaha/game-of-life/grid"
)
func main() {
x, y, err := parseArguments()
if err != nil {
log.Fatalf("error: Cannot parse arguments: %v", err)
}
grid := grid.NewGrid(x, y)
grid.Draw()
// Calculate next five generations of the grid
for i := 0; i < 3; i++ {
err := grid.CalculateNexGeneration()
if err != nil {
log.Fatalf("error: Cannot calculate next grid generation: %v", err)
}
grid.Draw()
}
}
func parseArguments() (int, int, error) {
args := os.Args
x, err := strconv.Atoi(args[1])
if err != nil {
return 0, 0, fmt.Errorf("error: Grid dimension x cannot be converted to an integer: %v", err)
}
y, err := strconv.Atoi(args[2])
if err != nil {
return 0, 0, fmt.Errorf("error: Grid dimension y cannot be converted to an integer: %v", err)
}
if x < 3 || y < 3 {
return 0, 0, fmt.Errorf("error: Grid dimension x and/or y is too small. x and y must be at least >= 3.")
}
if x > 20 || y > 20 {
return 0, 0, fmt.Errorf("error: Grid dimension x and/or y is too big for display. x and y must be <= 20.")
}
return x, y, nil
}
|
package main
import (
"fmt"
"math/rand"
"time"
)
func main() {
c := make(chan int) // チャネルを作成
callNum := 5
for i := 0; i < callNum; i++ {
go sleepyGopher(i, c) // goroutine
}
for i := 0; i < callNum; i++ {
gopherID := <-c // チャネルで値を受信
fmt.Println("gopher", gopherID, "はスリープを終えました。")
}
//time.Sleep(4 * time.Second)
}
func sleepyGopher(id int, c chan int) {
rand.Seed(time.Now().UnixNano())
randomNum := rand.Intn(4)
time.Sleep(time.Duration(randomNum) * time.Second)
fmt.Println("gopher", id, "は、", randomNum, "秒寝ます。 ... snore ...")
c <- id // チャネルに値を送信
}
|
package format
import (
"github.com/plandem/xlsx/internal/ml/primitives"
)
//List of all possible values for VAlignType
const (
_ primitives.VAlignType = iota
VAlignTop
VAlignCenter
VAlignBottom
VAlignJustify
VAlignDistributed
)
func init() {
primitives.FromVAlignType = map[primitives.VAlignType]string{
VAlignTop: "top",
VAlignCenter: "center",
VAlignBottom: "bottom",
VAlignJustify: "justify",
VAlignDistributed: "distributed",
}
primitives.ToVAlignType = make(map[string]primitives.VAlignType, len(primitives.FromVAlignType))
for k, v := range primitives.FromVAlignType {
primitives.ToVAlignType[v] = k
}
}
|
package rtrserver
import (
"bytes"
"encoding/binary"
"errors"
"github.com/cpusoft/goutil/belogs"
"github.com/cpusoft/goutil/jsonutil"
)
func ParseToAsa(buf *bytes.Reader, protocolVersion uint8) (rtrPduModel RtrPduModel, err error) {
/*
ProtocolVersion uint8 `json:"protocolVersion"`
PduType uint8 `json:"pduType"`
Zero0 uint16 `json:"zero0"`
Length uint32 `json:"length"`
Flags uint8 `json:"flags"`
Zero1 uint8 `json:"zero1"`
ProviderAsCount uint16 `json:"providerAsCount"`
CustomerAsn uint32 `json:"customerAsn"`
ProviderAsns []uint32 `json:"providerAsns"`
*/
var zero0 uint16
var length uint32
var flags uint8
var afiFlags uint8
var providerAsCount uint16
var customerAsn uint32
var providerAsns []uint32
providerAsns = make([]uint32, 0)
// get zero0
err = binary.Read(buf, binary.BigEndian, &zero0)
if err != nil {
belogs.Error("ParseToAsa(): PDU_TYPE_ASA get zero0 fail, buf:", buf, err)
rtrError := NewRtrError(
err,
true, protocolVersion, PDU_TYPE_ERROR_CODE_CORRUPT_DATA,
buf, "Fail to get zero0")
return rtrPduModel, rtrError
}
// get length
err = binary.Read(buf, binary.BigEndian, &length)
if err != nil {
belogs.Error("ParseToAsa(): PDU_TYPE_ASA get length fail, buf:", buf, err)
rtrError := NewRtrError(
err,
true, protocolVersion, PDU_TYPE_ERROR_CODE_CORRUPT_DATA,
buf, "Fail to get length")
return rtrPduModel, rtrError
}
if length < 16 {
belogs.Error("ParseToAsa():PDU_TYPE_ASA, length must be more than 16, buf:", buf, length)
rtrError := NewRtrError(
errors.New("pduType is ASA, length must be more than 16"),
true, protocolVersion, PDU_TYPE_ERROR_CODE_CORRUPT_DATA,
buf, "Fail to get length")
return rtrPduModel, rtrError
}
// get flags
err = binary.Read(buf, binary.BigEndian, &flags)
if err != nil {
belogs.Error("ParseToAsa(): PDU_TYPE_ASA get flags fail, buf:", buf, err)
rtrError := NewRtrError(
err,
true, protocolVersion, PDU_TYPE_ERROR_CODE_CORRUPT_DATA,
buf, "Fail to get flags")
return rtrPduModel, rtrError
}
/*
Bit Bit Name
---- -------------------
0 AFI (IPv4 == 0, IPv6 == 1)
1 Announce == 1, Delete == 0
2-7 Reserved, must be zero
*/
if flags != 0 && flags != 1 && flags != 2 && flags != 3 {
belogs.Error("ParseToAsa():PDU_TYPE_ASA, flags is only use bits, buf:", buf, " flags:", flags)
rtrError := NewRtrError(
errors.New("pduType is IPV4 PREFIX, flags is only use bits"),
true, protocolVersion, PDU_TYPE_ERROR_CODE_CORRUPT_DATA,
buf, "Fail to get flags")
return rtrPduModel, rtrError
}
// get afiFlags
err = binary.Read(buf, binary.BigEndian, &afiFlags)
if err != nil {
belogs.Error("ParseToAsa(): PDU_TYPE_ASA get afiFlags fail: buf:", buf, err)
rtrError := NewRtrError(
err,
true, protocolVersion, PDU_TYPE_ERROR_CODE_CORRUPT_DATA,
buf, "Fail to get zero1")
return rtrPduModel, rtrError
}
// get providerAsCount
err = binary.Read(buf, binary.BigEndian, &providerAsCount)
if err != nil {
belogs.Error("ParseToAsa(): PDU_TYPE_ASA get providerAsCount fail, buf:", buf, err)
rtrError := NewRtrError(
err,
true, protocolVersion, PDU_TYPE_ERROR_CODE_CORRUPT_DATA,
buf, "Fail to get providerAsCount")
return rtrPduModel, rtrError
}
// get customerAsn
err = binary.Read(buf, binary.BigEndian, &customerAsn)
if err != nil {
belogs.Error("ParseToAsa(): PDU_TYPE_ASA get customerAsn fail, buf:", buf, err)
rtrError := NewRtrError(
err,
true, protocolVersion, PDU_TYPE_ERROR_CODE_CORRUPT_DATA,
buf, "Fail to get customerAsn")
return rtrPduModel, rtrError
}
providerAsns = make([]uint32, 0)
for i := uint16(0); i < providerAsCount; i++ {
var providerAsn uint32
err = binary.Read(buf, binary.BigEndian, &providerAsn)
if err != nil {
belogs.Error("ParseToAsa(): PDU_TYPE_ASA get providerAsn fail, buf:", buf, err)
rtrError := NewRtrError(
err,
true, protocolVersion, PDU_TYPE_ERROR_CODE_CORRUPT_DATA,
buf, "Fail to get providerAsn")
return rtrPduModel, rtrError
}
providerAsns = append(providerAsns, providerAsn)
}
sq := NewRtrAsaModelFromParse(protocolVersion, flags, afiFlags,
customerAsn, providerAsns)
belogs.Debug("ParseToAsa():get PDU_TYPE_ASA, buf:", buf, jsonutil.MarshalJson(sq))
return sq, nil
}
|
package main
import "fmt"
func main() {
// **GOTO STATEMENT **//
//the following piece of code creates a loop like a for statement does
i := 0
loop: // label
if i < 5 {
fmt.Println(i)
i++
goto loop
}
// goto todo //ERROR it's not permitted to jump over the declaration of x
// x := 5
// todo:
// fmt.Println("something here")
}
|
package models
type User2text struct {
Userid *User `orm:"column(userid);rel(fk)"`
Textid *Activity `orm:"column(textid);rel(fk)"`
}
|
package main
import (
"ethos/syscall"
"ethos/altEthos"
"ethos/kernelTypes"
"log"
)
type queueStruct struct {
_type string
FDValue syscall.Fd
transactionID int64
variableName string
variableValue string
}
var path = "/user/" + altEthos.GetUser() + "/server/"
var pathTypeServer kernelTypes.String
var logType Databaselog
var storeType kernelTypes.String
var numTrasactions int64 = 1
var currentEventID syscall.EventId
var readLocks = make(map[string][]int64)
var writeLocks = make(map[string][]int64)
var event_fd = make(map[syscall.EventId]syscall.Fd)
var queue = make([]queueStruct, 0)
var transactionCommits = map[int64]map[string]string{}
func init() {
SetupMyRpcTransactionStartI(transactionStartI)
SetupMyRpcTransactionEndI(transactionEndI)
SetupMyRpcReadI(readI)
SetupMyRpcWriteI(writeI)
SetupMyRpcAbortI(abortI)
}
func transactionStartI() (MyRpcProcedure) {
status := "1"
transactionID := numTrasactions
numTrasactions++
transactionCommits[transactionID] = map[string]string{}
return &MyRpcTransactionStartIReply{transactionID, status}
}
func transactionEndI(id int64) (MyRpcProcedure) {
status := "1"
removeTransactionFromReadLocks(id)
removeTransactionFromWriteLocks(id)
tmap := transactionCommits[id]
if tmap == nil {
status = "-1"
log.Printf("Transaction doesnt exist\n")
processQueue()
return &MyRpcTransactionEndIReply{status}
}
if len(tmap) == 0 {
transactionCommits[id] = tmap, false
status = "2"
log.Printf("Nothing to commit, returning\n")
processQueue()
return &MyRpcTransactionEndIReply{status}
}
for k, v := range tmap {
writeToFile(k,string(v))
log.Printf("Writing to file\n")
}
transactionCommits[id] = tmap, false
processQueue()
status = "3"
return &MyRpcTransactionEndIReply{status}
}
func writeToFile (variableName string, value string) {
writeToLog(variableName, value)
log.Println("Writing to store")
writeToStore := true
fd, status := altEthos.DirectoryOpen(path + "store/")
if status != syscall.StatusOk {
log.Fatalf ("Error opening %v: %v\n", path, status)
writeToStore = false
}
var datatypeString kernelTypes.String
datatypeString = kernelTypes.String(value)
status = altEthos.WriteVar(fd, variableName, &datatypeString)
if status != syscall.StatusOk {
log.Printf ("Error Writing to %v %v\n", path + "/" + variableName , status)
writeToStore = false
}
if writeToStore == true {
deleteFromLog()
}
}
func writeToLog(variableName string, variableValue string) {
log.Println("Writing to log")
logStruct := Databaselog{variableName, variableValue}
fd, status := altEthos.DirectoryOpen(path + "log/")
if status != syscall.StatusOk {
log.Fatalf ("Error opening %v: %v\n", path, status)
}
status = altEthos.WriteStream(fd, &logStruct)
if status != syscall.StatusOk {
log.Printf ("Error Writing to %v %v\n", path + "/" + variableName , status)
}
}
func deleteFromLog() {
log.Println("Deleting from log")
FileNames, status := altEthos.SubFiles(path + "log/")
if status != syscall.StatusOk {
log.Fatalf("Error fetching files in %v\n", path)
}
i := len(FileNames) - 1
altEthos.FileRemove(path + "log/" + FileNames[i])
if status != syscall.StatusOk {
log.Fatalf("Error deleting files in %v\n", path)
}
}
func readI(id int64, variableName string) (MyRpcProcedure) {
checkRead := checkIfTransactionContainsReadLockForVar(id, variableName)
checkWrite := checkIfTransactionContainsWriteLockForVar(id, variableName)
//if the transaction contains either a read lock or a write lock
//Then it should be able to read the value
if(checkWrite == true){
value := transactionCommits[id][variableName]
log.Printf("i obtain the write lock so getting the value from there\n")
return &MyRpcReadIReply{ value, "1"}
}
if(checkRead == true){
value := readFromFile(path, variableName)
if(value == ""){
return &MyRpcReadIReply{ "-1", "Variable Doesn't Exist"}
}
log.Printf("I obtain the read lock, so getting the value from file\n")
return &MyRpcReadIReply{ value, "1"}
}
//Make sure no one else has a write lock
i := writeLocks[variableName]
currentFD := event_fd[currentEventID]
if len(i) == 0 {
//grab the read lock
readLocks[variableName] = append(readLocks[variableName] , id)
value := readFromFile(path, variableName)
if(value == ""){
return &MyRpcReadIReply{ "", "Variable Doesn't Exist"}
}
log.Printf("Obtained a new read lock\n")
return &MyRpcReadIReply{ value, "1"}
}
//add the query to the queue and return nil
newReadQ := queueStruct{"read", currentFD, id, variableName, ""}
queue = append(queue, newReadQ)
log.Printf("Some one else has a write lock for this variable\n")
return nil
}
func readFromFile(path string, filename string) (string){
_, status1 := altEthos.DirectoryOpen(path + "store/")
if status1 != syscall.StatusOk {
log.Println("Directory Create Failed ", path, status1)
return ""
}
var value kernelTypes.String
status1 = altEthos.Read(path + "store/" + filename, &value)
if status1 != syscall.StatusOk {
log.Fatalf("Error reading box file at %v/%v\n", path + "store/" + filename, filename)
return ""
}
return string(value)
}
func writeI(id int64, val string, value string) (MyRpcProcedure) {
log.Printf("Called write transaction\n")
checkWrite := checkIfTransactionContainsWriteLockForVar(id, val)
if(checkWrite == true){
//if the transaction already contains a write lock
transactionCommits[id][val] = value
return &MyRpcWriteIReply{"1"}
}
currentFD := event_fd[currentEventID]
i := writeLocks[val]
if len(i) > 0 {
//Someone else holds the write lock
newWriteQ := queueStruct{"write", currentFD, id, val, value}
queue = append(queue, newWriteQ)
log.Printf("There are write locks for this variable\n")
return nil
} else if len(i) == 0 {
i = readLocks[val]
checkRead := checkIfTransactionContainsReadLockForVar(id, val)
if len(i) == 0 {
//Obtain a write lock
writeLocks[val] = append(writeLocks[val] , id)
transactionCommits[id][val] = value
log.Printf("No read locks, obtaining a write lock\n")
return &MyRpcWriteIReply{"1"}
}
if (checkRead == true && (len(i) == 1)){
//If I am the only one who contains the read lock
upgradeReadLocktoWriteLock(id, val)
transactionCommits[id][val] = value
log.Printf("The user contains the only read lock\n")
return &MyRpcWriteIReply{"1"}
}
// if (checkRead == false || (len(i) > 1)){
// //Cant obtain a write lock
// newWriteQ := queueStruct{"write", currentFD, id, val, value}
// queue = append(queue, newWriteQ)
// return &MyRpcWriteIReply{nil}
// }
}
//Cant obtain a write lock
newWriteQ := queueStruct{"write", currentFD, id, val, value}
queue = append(queue, newWriteQ)
log.Printf("Cannot obtain a lock so waiting\n")
return nil
}
func upgradeReadLocktoWriteLock(id int64, val string) {
v := readLocks[val]
for i, a := range v {
if a == id {
readLocks[val] = removeIDFromSlice(v, i)
break
}
}
writeLocks[val] = append(writeLocks[val] , id)
}
func abortI(id int64) (MyRpcProcedure) {
//var status string
removeTransactionFromReadLocks(id)
removeTransactionFromWriteLocks(id)
transactionCommits[id] = transactionCommits[id], false
processQueue()
return &MyRpcAbortIReply{"1"}
}
func removeTransactionFromReadLocks(id int64) {
for k, v := range readLocks {
for i, a := range v {
if a == id {
readLocks[k] = removeIDFromSlice(v, i)
break
}
}
}
}
func removeTransactionFromWriteLocks(id int64) {
for k, v := range writeLocks {
for i, a := range v {
if a == id {
writeLocks[k] = removeIDFromSlice(v, i)
break
}
}
}
}
func checkIfTransactionContainsReadLockForVar(id int64, _var string) bool {
v := readLocks[_var]
for _, a := range v {
if a == id {
return true
}
}
return false
}
func checkIfTransactionContainsWriteLockForVar(id int64, _var string) bool {
v := writeLocks[_var]
for _, a := range v {
if a == id {
return true
}
}
return false
}
func removeIDFromSlice(s []int64, i int) []int64 {
s[i] = s[len(s)-1]
return s[:len(s)-1]
}
func removeStructFromSlice(s []queueStruct, i int) []queueStruct{
s[i] = s[len(s)-1]
return s[:len(s)-1]
}
func sendAnRPCReadReply(FDValue syscall.Fd, value string, status string) () {
status1 := altEthos.WriteStream(FDValue, &MyRpcReadIReply{value, status})
if status1 != syscall.StatusOk {
log.Fatalf("Error returning status\n")
}
}
func sendAnRPCWriteReply(FDValue syscall.Fd, status string) () {
status1 := altEthos.WriteStream(FDValue, &MyRpcWriteIReply{status})
if status1 != syscall.StatusOk {
log.Fatalf("Error returning status\n")
}
}
func processQueue() {
for _, q := range queue {
if (q._type == "read"){
i := writeLocks[q.variableName]
if len(i) == 0 {
checkRead := checkIfTransactionContainsReadLockForVar(q.transactionID, q.variableName)
if (checkRead == false){
readLocks[q.variableName] = append(readLocks[q.variableName] , q.transactionID)
}
q._type = "finished"
value := readFromFile(path, q.variableName)
if(value == ""){
sendAnRPCReadReply(q.FDValue, "-1","-1")
}
sendAnRPCReadReply(q.FDValue, value, "1")
} else {
continue
}
} else if (q._type == "write") {
//if the transaction holds the write lock
checkWrite := checkIfTransactionContainsWriteLockForVar(q.transactionID, q.variableName)
if(checkWrite == true){
//if the transaction already contains a write lock
transactionCommits[q.transactionID][q.variableName] = q.variableValue
sendAnRPCWriteReply(q.FDValue, "1")
}
i := writeLocks[q.variableName]
if len(i) == 0 {
i = readLocks[q.variableName]
checkRead := checkIfTransactionContainsReadLockForVar(q.transactionID, q.variableName)
if len(i) == 0 {
//Obtain a write lock
writeLocks[q.variableName] = append(writeLocks[q.variableName] , q.transactionID)
transactionCommits[q.transactionID][q.variableName] = q.variableValue
q._type = "finished"
sendAnRPCWriteReply(q.FDValue , "1")
}
if (checkRead == true && (len(i) == 1)){
//If I am the only one who contains the read lock
upgradeReadLocktoWriteLock(q.transactionID, q.variableName)
transactionCommits[q.transactionID][q.variableName] = q.variableValue
q._type = "finished"
sendAnRPCWriteReply(q.FDValue, "1")
}
// if (checkRead == false || (len(i) > 1)){
// //Cant obtain a write lock
// newWriteQ := queueStruct{"write", currentFD, id, val, value}
// queue = append(queue, newWriteQ)
// return &MyRpcWriteIReply{nil}
// }
} else {
continue
}
}
}
}
func recoverFromLog(){
tmap := make(map[string]string)
//Recovering from the store
FileNames, status := altEthos.SubFiles(path + "store/")
if status != syscall.StatusOk {
log.Fatalf("Error fetching files in %v\n", path)
}
for i := 0; i < len(FileNames); i++ {
log.Printf(path, FileNames[i])
var newString kernelTypes.String
status = altEthos.Read(path + "store/" + FileNames[i], &newString)
if status != syscall.StatusOk {
log.Fatalf("Error reading box file at %v/%v\n", path, FileNames[i])
}
tmap[FileNames[i]] = string(newString)
}
//Recovering from the log
FileNames, status = altEthos.SubFiles(path + "log/")
if status != syscall.StatusOk {
log.Fatalf("Error fetching files in %v\n", path)
}
for i := 0; i < len(FileNames); i++ {
log.Printf(path, FileNames[i])
var newLog Databaselog
status = altEthos.Read(path + "log/" + FileNames[i], &newLog)
if status != syscall.StatusOk {
log.Fatalf("Error reading box file at %v/%v\n", path, FileNames[i])
}
tmap[newLog.name] = string(newLog.value)
}
//deleting all the files
for i := 0; i < len(FileNames); i++ {
altEthos.FileRemove(path + "log/" + FileNames[i])
if status != syscall.StatusOk {
log.Fatalf("Error deleting files in %v\n", path)
}
}
//writing to the store
for k, v := range tmap {
writeToFile(k,string(v))
log.Printf("Writing to file\n")
}
}
func main () {
altEthos.LogToDirectory("test/myRpcServer")
log.Printf("Database Server: Initializing...\n")
listeningFd, status := altEthos.Advertise("myRpc")
if status != syscall.StatusOk {
log.Printf("Advertising service failed: %s\n", status)
altEthos.Exit(status)
}
log.Printf("Database Server: Done advertising...\n")
checkDirectory := altEthos.IsDirectory(path)
if checkDirectory == false {
log.Println("Directory does not exist ", path, checkDirectory)
log.Println("Creating Directory")
log.Printf("Database Server: Creating server directory...\n")
status = altEthos.DirectoryCreate(path, &pathTypeServer, "all")
if status != syscall.StatusOk {
log.Println("Directory Create Failed ", path, status)
altEthos.Exit(status)
}
log.Printf("Database Server: Creating log directory...\n")
status = altEthos.DirectoryCreate(path + "log/" , &logType, "all")
if status != syscall.StatusOk {
log.Println("Directory Create Failed ", path, status)
altEthos.Exit(status)
}
log.Printf("Database Server: Creating store directory...\n")
status = altEthos.DirectoryCreate(path + "store/" , &storeType, "all")
if status != syscall.StatusOk {
log.Println("Directory Create Failed ", path, status)
altEthos.Exit(status)
}
} else {
recoverFromLog()
}
var tree altEthos.EventTreeSlice
var next []syscall.EventId
t := MyRpc{}
event, status := altEthos.ImportAsync(listeningFd, &t, CustomHandleImport)
if status != syscall.StatusOk {
log.Println("Import failed")
return
}
next = append(next, event)
tree = altEthos.WaitTreeCreateOr(next)
for {
tree, _ = altEthos.Block(tree)
completed, pending := altEthos.GetTreeEvents(tree)
for _, eventId := range completed {
eventInfo, status := altEthos.OnComplete(eventId)
if status != syscall.StatusOk {
log.Println("OnComplete failed", eventInfo, status)
return
}
currentEventID = eventId
eventInfo.Do()
}
next = nil
next = append(next, pending...)
next = append(next, altEthos.RetrievePostedEvents()...)
tree = altEthos.WaitTreeCreateOr(next)
}
}
// HandleImport handles multiple connections concurrently
//
// When an event occurs:
// 1. start a handle on that event
// 2. import the next connection
func CustomHandleImport(eventInfo altEthos.ImportEventInfo) {
// start up the read on the imported netFd
event, status := altEthos.ReadRpcStreamAsync(eventInfo.ReturnedFd, eventInfo.I, altEthos.HandleRpc)
if status != syscall.StatusOk {
log.Println("Read Failed")
return
}
event_fd[event] = eventInfo.ReturnedFd
altEthos.PostEvent(event)
// start up a new import
event, status = altEthos.ImportAsync(eventInfo.Fd, eventInfo.I, CustomHandleImport)
if status != syscall.StatusOk {
log.Println("Import failed")
return
}
altEthos.PostEvent(event)
} |
package camt
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document05200101 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:camt.052.001.01 Document"`
Message *BankToCustomerAccountReportV01 `xml:"BkToCstmrAcctRptV01"`
}
func (d *Document05200101) AddMessage() *BankToCustomerAccountReportV01 {
d.Message = new(BankToCustomerAccountReportV01)
return d.Message
}
// Scope
// The Bank-to-Customer Account Report message is sent by the account servicer to an account owner or to a party authorised by the account owner to receive the message. It can be used to inform the account owner, or authorised party, of the entries reported to the account, and/or to provide the owner with balance information on the account at a given point in time.
// Usage
// The Bank-to-Customer Account Report message can contain reports for more than 1 account. It provides information for cash management and/or reconciliation. It can be used to :
// - report pending and booked items;
// - provide balance information
// It can include underlying details of transactions that have been included in the entry.
// It is possible that the receiver of the message is not the account owner, but a party entitled by the account owner to receive the account information (also known as recipient).
// For a statement that is required due to local legal stipulations, the Bank-to-Customer Account Statement message should be used.
type BankToCustomerAccountReportV01 struct {
// Common information for the message.
GroupHeader *iso20022.GroupHeader23 `xml:"GrpHdr"`
// Reports on a cash account.
Report []*iso20022.AccountReport9 `xml:"Rpt"`
}
func (b *BankToCustomerAccountReportV01) AddGroupHeader() *iso20022.GroupHeader23 {
b.GroupHeader = new(iso20022.GroupHeader23)
return b.GroupHeader
}
func (b *BankToCustomerAccountReportV01) AddReport() *iso20022.AccountReport9 {
newValue := new(iso20022.AccountReport9)
b.Report = append(b.Report, newValue)
return newValue
}
|
package caaa
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document01500103 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:caaa.015.001.03 Document"`
Message *AcceptorRejectionV03 `xml:"AccptrRjctn"`
}
func (d *Document01500103) AddMessage() *AcceptorRejectionV03 {
d.Message = new(AcceptorRejectionV03)
return d.Message
}
// The AcceptorRejection message is sent by the acquirer (or its agent) to reject a message request or advice sent by an acceptor (or its agent), to indicate that the received message could not be processed.
type AcceptorRejectionV03 struct {
// Rejection message management information.
Header *iso20022.Header9 `xml:"Hdr"`
// Information related to the reject.
Reject *iso20022.AcceptorRejection2 `xml:"Rjct"`
}
func (a *AcceptorRejectionV03) AddHeader() *iso20022.Header9 {
a.Header = new(iso20022.Header9)
return a.Header
}
func (a *AcceptorRejectionV03) AddReject() *iso20022.AcceptorRejection2 {
a.Reject = new(iso20022.AcceptorRejection2)
return a.Reject
}
|
package main
import "fmt"
func main() {
// 1、bool 类型的默认值为 false
var a bool
fmt.Println("a = ", a)
a = true
fmt.Println("a = ", a)
// 2、自动推导类型
var b = false
fmt.Println("b = ", b)
c := false
fmt.Println("c = ", c)
} |
package type__test
import (
"github.com/chaitya62/noobdb/tests/helpers"
"github.com/chaitya62/noobdb/type"
"testing"
)
func TestVarchar(t *testing.T) {
varchar := &type_.Varchar{}
t.Run("Implements Type interface", func(t *testing.T) {
_, ok := interface{}(varchar).(type_.Type)
if ok != true {
t.Errorf("Varchar does not implement type_.Type")
}
})
t.Run("Serialize", func(t *testing.T) {
expected := []byte{83, 0, 0, 0, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 90, 89, 90, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 40, 41, 45, 44, 34, 39, 33, 64, 35, 36, 37, 94, 38, 42, 91, 93, 123, 125, 92, 124, 96}
varchar.SetValue("ABCDEFGHIJKLMNOPQRSTUVWZYZabcdefghijklmnopqrstuvwxyz1234567890()-,\"'!@#$%^&*[]{}\\|`")
got := varchar.Serialize()
helpers.EqualSlices(t, expected, got)
})
t.Run("Deserialize", func(t *testing.T) {
input := []byte{4, 0, 0, 0, 96, 65, 66, 67}
expected := "`ABC"
varchar.Deserialize(input)
got := varchar.GetValue()
if got != expected {
t.Errorf("Got %v, Expect: %v", got, expected)
}
})
}
|
package sharding
import (
"tantan-demo/util"
"tantan-demo/model"
"fmt"
)
func ListRelations(userId int) (model.Relations, error) {
db, tableName := util.CaculateDbAndTable(userId)
var relations model.Relations
query := fmt.Sprintf("SELECT * FROM %s WHERE user_id = ?", tableName)
_, err := db.Query(&relations, query, userId)
if err != nil {
panic(err)
}
return relations, err
}
func UpdateRelations(relation *model.Relation) (*model.Relation, error) {
db, tableName := util.CaculateDbAndTable(relation.UserId)
query := fmt.Sprintf("UPDATE %s SET state = ?state WHERE id = ?id", tableName)
_, err := db.Exec(query, relation)
if err != nil {
panic(err)
}
return relation, err
}
func InsertRelations(relation *model.Relation) (*model.Relation, error) {
db, tableName := util.CaculateDbAndTable(relation.UserId)
query := fmt.Sprintf("INSERT INTO %s (id, user_id, other_user_id, state) VALUES (?id, ?user_id, ?other_user_id, ?state)", tableName)
_, err := db.Exec(query, relation)
if err != nil {
panic(err)
}
return relation, err
}
func QueryOneRelation(relation *model.Relation) error {
db, tableName := util.CaculateDbAndTable(relation.UserId)
query := fmt.Sprintf("SELECT * FROM %s WHERE user_id = ?user_id AND other_user_id = ?other_user_id", tableName)
var relations model.Relations
_, err := db.Query(&relations, query, relation)
if err != nil {
panic(err)
}
if len(relations) == 1 {
relation.Id = relations[0].Id
relation.State = relations[0].State
}
return nil
}
|
package app
import (
"fmt"
"log"
"net"
"github.com/piotrpersona/saga/broker"
"github.com/piotrpersona/saga/config"
"github.com/piotrpersona/saga/order"
"github.com/piotrpersona/saga/service"
"google.golang.org/grpc"
)
func createBroker(config config.Config, brokerName string) (b broker.Broker, err error) {
b, err = broker.New(brokerName, config)
return
}
func runOrderService(config config.Config, broker broker.Broker) {
grpcListenAddress := fmt.Sprintf("%s:%d", config.OrdersConfig.Host, config.OrdersConfig.Port)
lis, err := net.Listen("tcp", grpcListenAddress)
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
log.Printf("gRPC listening on: %s\n", grpcListenAddress)
var opts []grpc.ServerOption
grpcServer := grpc.NewServer(opts...)
order.RegisterOrderServer(grpcServer, service.NewOrderService(broker))
grpcServer.Serve(lis)
}
|
package RateLimiter
type LimiterGroupOr struct {
limiters []ILimiter
}
func NewLimiterGroupOr(limiters ...ILimiter) ILimiter {
return &LimiterGroupOr{limiters: limiters}
}
func (l LimiterGroupOr) AddTally() bool {
for _, ls := range l.limiters {
if ls.TryAddTally() {
return l.HasRemainingTally()
}
}
if v := l.limiters[0]; v != nil {
v.AddTally()
}
return l.HasRemainingTally()
}
func (l LimiterGroupOr) TryAddTally() bool {
if !l.HasRemainingTally() {
return false
}
for _, ls := range l.limiters {
if ls.TryAddTally() {
return true
}
}
return false
}
func (l LimiterGroupOr) HasRemainingTally() bool {
for _, ls := range l.limiters {
if ls.HasRemainingTally() {
return true
}
}
return false
}
func (l LimiterGroupOr) RemainingTally() int {
rem := 0
for _, ls := range l.limiters {
rem += ls.RemainingTally()
}
return rem
}
func (l LimiterGroupOr) CleanUp() {
for _, ls := range l.limiters {
ls.CleanUp()
}
}
func (l LimiterGroupOr) GetGroups() []ILimiter {
return l.limiters
}
func (l LimiterGroupOr) RemainingTallyAsGroup() []int {
var s []int
for _, ls := range l.limiters {
s = append(s, ls.RemainingTally())
}
return s
}
func (l *LimiterGroupOr) GetStorableData() interface{} {
return commonGetStorable(l)
}
func (l *LimiterGroupOr) SetStorableData(i interface{}) bool {
return commonSetStorable(l, i)
}
func (l *LimiterGroupOr) GetDebug() interface{} {
return commonGetDebug(l, "OR")
}
|
// Copyright 2018 Kuei-chun Chen. All rights reserved.
package mdb
import (
"bufio"
"bytes"
"context"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"time"
"github.com/simagix/gox"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
// IndexStats holder indexes reader struct
type IndexStats struct {
Databases []Database `bson:"databases"`
Logger *gox.Logger `bson:"keyhole"`
fastMode bool
filename string
nocolor bool
verbose bool
version string
}
// Accesses stores index accesses
type Accesses struct {
Ops int `json:"ops" bson:"ops"`
Since time.Time `json:"since" bson:"since"`
}
// IndexUsage stores index accesses
type IndexUsage struct {
Accesses Accesses `json:"accesses" bson:"accesses"`
Host string `json:"host" bson:"host"`
Name string `json:"name" bson:"name"`
Shard string `json:"shard" bson:"shard"`
}
// Index stores indexes stats
type Index struct {
Background bool `json:"background" bson:"background"`
Collation bson.D `json:"collation" bson:"collation"`
ExpireAfterSeconds int32 `json:"expireAfterSeconds" bson:"expireAfterSeconds,truncate,omitempty"`
Key bson.D `json:"key" bson:"key"`
Name string `json:"name" bson:"name,truncate"`
PartialFilterExpression bson.D `json:"partialFilterExpression" bson:"partialFilterExpression"`
Sparse bool `json:"sparse" bson:"sparse"`
Unique bool `json:"unique" bson:"unique"`
Version int32 `json:"v" bson:"v,truncate"`
Weights bson.D `json:"weights" bson:"weights"`
EffectiveKey string `json:"effectiveKey" bson:"effectiveKey"`
Fields []string `json:"fields" bson:"fields"`
IsDupped bool `json:"isDupped" bson:"isDupped"`
IsShardKey bool `json:"isShardkey" bson:"isShardkey"`
KeyString string `json:"keyString" bson:"keyString"`
TotalOps int `json:"totalOps" bson:"totalOps"`
Usage []IndexUsage `json:"usage" bson:"usage"`
}
const (
indexExt = "-index.bson.gz"
)
// NewIndexStats establish seeding parameters
func NewIndexStats(version string) *IndexStats {
hostname, _ := os.Hostname()
return &IndexStats{version: version, Logger: gox.GetLogger(version),
filename: hostname + indexExt, Databases: []Database{}}
}
// SetFastMode sets fastMode mode
func (ix *IndexStats) SetFastMode(fastMode bool) {
ix.fastMode = fastMode
}
// SetFilename sets output file name
func (ix *IndexStats) SetFilename(filename string) {
ix.filename = strings.Replace(filename, ":", "_", -1)
}
// SetClusterDetailsFromFile File sets cluster details from a file
func (ix *IndexStats) SetClusterDetailsFromFile(filename string) error {
if !strings.HasSuffix(filename, indexExt) && !strings.HasSuffix(filename, "-stats.bson.gz") {
return errors.New("unsupported file type")
}
var data []byte
var err error
var fd *bufio.Reader
if fd, err = gox.NewFileReader(filename); err != nil {
return err
}
if data, err = ioutil.ReadAll(fd); err != nil {
return err
}
return bson.Unmarshal(data, &ix)
}
// SetNoColor set nocolor flag
func (ix *IndexStats) SetNoColor(nocolor bool) {
ix.nocolor = nocolor
}
// SetVerbose sets verbose level
func (ix *IndexStats) SetVerbose(verbose bool) {
ix.verbose = verbose
if verbose && ix.Logger != nil {
ix.Logger.SetLoggerLevel(gox.Debug)
}
}
// GetIndexes list all indexes of collections of databases
func (ix *IndexStats) GetIndexes(client *mongo.Client) ([]Database, error) {
var err error
var dbNames []string
var collections []Collection
ix.Databases = []Database{}
var databases []Database
if dbNames, err = GetDatabaseNames(client); err != nil {
return databases, err
}
cnt := 0
for _, name := range dbNames {
if name == "admin" || name == "config" || name == "local" {
ix.Logger.Debug("skip ", name)
continue
}
cnt++
ix.Logger.Debug("checking ", name)
if collections, err = ix.GetIndexesFromDB(client, name); err != nil {
return ix.Databases, err
}
ix.Databases = append(ix.Databases, Database{Name: name, Collections: collections})
}
if cnt == 0 && ix.verbose {
ix.Logger.Info("No database is available")
}
ix.Logger.Info(`GetIndexes ends`)
return ix.Databases, err
}
// GetIndexesFromDB list all indexes of collections of a database
func (ix *IndexStats) GetIndexesFromDB(client *mongo.Client, db string) ([]Collection, error) {
var err error
var cur *mongo.Cursor
var ctx = context.Background()
var collections []Collection
ix.Logger.Debugf(`GetIndexesFromDB(%v)`, db)
if cur, err = client.Database(db).ListCollections(ctx, bson.M{}); err != nil {
return collections, err
}
defer cur.Close(ctx)
collectionNames := []string{}
for cur.Next(ctx) {
var elem struct {
Name string `bson:"name"`
Type string `bson:"type"`
}
if err = cur.Decode(&elem); err != nil {
continue
}
if strings.HasPrefix(elem.Name, "system.") || (elem.Type != "" && elem.Type != "collection") {
ix.Logger.Debug("skip ", elem.Name)
continue
}
collectionNames = append(collectionNames, elem.Name)
}
sort.Strings(collectionNames)
for _, v := range collectionNames {
var collection = Collection{NS: db + "." + v, Name: v}
if collection.Indexes, err = ix.GetIndexesFromCollection(client, client.Database(db).Collection(v)); err != nil {
return collections, err
}
collections = append(collections, collection)
}
return collections, nil
}
// GetIndexesFromCollection gets indexes from a collection
func (ix *IndexStats) GetIndexesFromCollection(client *mongo.Client, collection *mongo.Collection) ([]Index, error) {
var err error
var ctx = context.Background()
var pipeline = MongoPipeline(`{"$indexStats": {}}`)
var list []Index
var icur *mongo.Cursor
var scur *mongo.Cursor
db := collection.Database().Name()
ix.Logger.Debugf(`GetIndexesFromCollection from %v.%v`, db, collection.Name())
if strings.HasPrefix(collection.Name(), "system.") {
ix.Logger.Debug("skip ", collection.Name())
return list, nil
}
var indexStats = []IndexUsage{}
if scur, err = collection.Aggregate(ctx, pipeline); err != nil {
ix.Logger.Error(err)
} else {
for scur.Next(ctx) {
var result IndexUsage
if err = scur.Decode(&result); err != nil {
ix.Logger.Error(err)
continue
}
indexStats = append(indexStats, result)
}
scur.Close(ctx)
}
cmd := bson.D{{Key: "listIndexes", Value: collection.Name()}}
if icur, err = client.Database(db).RunCommandCursor(ctx, cmd); err != nil {
ix.Logger.Error(err)
return list, err
}
defer icur.Close(ctx)
indexesFound := map[int]bool{}
for icur.Next(ctx) {
o := Index{ExpireAfterSeconds: -1}
if err = icur.Decode(&o); err != nil {
ix.Logger.Error(err)
continue
}
var strbuf bytes.Buffer
fields := []string{}
for n, value := range o.Key {
fields = append(fields, value.Key)
if n == 0 {
strbuf.WriteString("{ ")
}
strbuf.WriteString(value.Key + ": " + fmt.Sprint(value.Value))
if n == len(o.Key)-1 {
strbuf.WriteString(" }")
} else {
strbuf.WriteString(", ")
}
}
o.Fields = fields
o.KeyString = strbuf.String()
// Check shard keys
var v map[string]interface{}
ns := collection.Database().Name() + "." + collection.Name()
ix.Logger.Debug("GetIndexesFromCollection ", ns, o.KeyString)
if !ix.fastMode {
if err = client.Database("config").Collection("collections").FindOne(ctx, bson.M{"_id": ns, "key": o.Key}).Decode(&v); err == nil {
o.IsShardKey = true
}
}
o.EffectiveKey = strings.Replace(o.KeyString[2:len(o.KeyString)-2], ": -1", ": 1", -1)
o.Usage = []IndexUsage{}
for i, result := range indexStats {
if result.Name == o.Name {
indexesFound[i] = true
o.TotalOps += result.Accesses.Ops
o.Usage = append(o.Usage, result)
}
}
list = append(list, o)
}
sort.Slice(list, func(i, j int) bool { return (list[i].EffectiveKey < list[j].EffectiveKey) })
for i, o := range list {
if o.KeyString != "{ _id: 1 }" && !o.IsShardKey {
list[i].IsDupped = checkIfDupped(o, list)
}
}
if len(indexesFound) != len(indexStats) {
for i := 0; i < len(indexStats); i++ {
if !indexesFound[i] {
ns := collection.Database().Name() + "." + collection.Name()
ix.Logger.Warnf(`inconsistent index '%v' of namespace '%v' on shard '%v'`,
indexStats[i].Name, ns, indexStats[i].Shard)
}
}
}
return list, nil
}
// check if an index is a dup of others
func checkIfDupped(doc Index, list []Index) bool {
if strings.Index(doc.KeyString, "2dsphere") > 0 {
return false
}
for _, o := range list {
if strings.Index(o.KeyString, "2dsphere") > 0 {
continue
} else if !o.IsDupped && doc.Fields[0] == o.Fields[0] && doc.KeyString != o.KeyString && len(o.Fields) >= len(doc.Fields) {
// check indexes if not marked as dupped, has the same first field, and more or equal number of fields
nmatched := 0
for i, fld := range doc.Fields {
if i == 0 {
continue
}
for j, field := range o.Fields {
if j > 0 && fld == field {
nmatched++
break
}
}
}
if nmatched == len(doc.Fields)-1 {
return true
}
}
}
return false
}
// OutputBSON writes index stats bson to a file
func (ix *IndexStats) OutputBSON() (string, []byte, error) {
var err error
var bsond bson.D
var data []byte
var ofile string
if data, err = bson.Marshal(ix); err != nil {
return ofile, data, err
}
bson.Unmarshal(data, &bsond)
if data, err = bson.Marshal(bsond); err != nil {
return ofile, data, err
}
os.Mkdir(outdir, 0755)
idx := strings.Index(ix.filename, indexExt)
basename := ix.filename[:idx]
ofile = fmt.Sprintf(`%v/%v%v`, outdir, basename, indexExt)
i := 1
for DoesFileExist(ofile) {
ofile = fmt.Sprintf(`%v/%v.%d%v`, outdir, basename, i, indexExt)
i++
}
if err = gox.OutputGzipped(data, ofile); err == nil {
fmt.Println("Index stats is written to", ofile)
}
return ofile, data, err
}
// OutputJSON writes json data to a file
func (ix *IndexStats) OutputJSON() error {
var err error
var data []byte
if data, err = bson.MarshalExtJSON(ix, false, false); err != nil {
return err
}
os.Mkdir(outdir, 0755)
ofile := fmt.Sprintf("%v/%v", outdir, strings.ReplaceAll(filepath.Base(ix.filename), "bson.gz", "json"))
ioutil.WriteFile(ofile, data, 0644)
fmt.Println("json data written to", ofile)
return err
}
// Print prints indexes
func (ix *IndexStats) Print() {
ix.PrintIndexesOf(ix.Databases)
}
// PrintIndexesOf prints indexes
func (ix *IndexStats) PrintIndexesOf(databases []Database) {
for _, db := range databases {
for _, coll := range db.Collections {
var buffer bytes.Buffer
ns := coll.NS
buffer.WriteString("\n")
buffer.WriteString(ns)
buffer.WriteString(":\n")
for _, o := range coll.Indexes {
font := CodeDefault
tailCode := CodeDefault
if ix.nocolor {
font = ""
tailCode = ""
}
if o.KeyString == "{ _id: 1 }" {
buffer.WriteString(fmt.Sprintf("%v %v%v", font, o.KeyString, tailCode))
} else if o.IsShardKey {
buffer.WriteString(fmt.Sprintf("%v* %v%v", font, o.KeyString, tailCode))
} else if o.IsDupped {
if !ix.nocolor {
font = CodeRed
}
buffer.WriteString(fmt.Sprintf("%vx %v%v", font, o.KeyString, tailCode))
} else if o.TotalOps == 0 && o.ExpireAfterSeconds < 0 {
if !ix.nocolor {
font = CodeBlue
}
buffer.WriteString(fmt.Sprintf("%v? %v%v", font, o.KeyString, tailCode))
} else {
buffer.WriteString(fmt.Sprintf(" %v", o.KeyString))
}
for _, u := range o.Usage {
buffer.Write([]byte("\n\thost: " + u.Host + ", ops: " + fmt.Sprintf("%v", u.Accesses.Ops) + ", since: " + fmt.Sprintf("%v", u.Accesses.Since)))
}
buffer.WriteString("\n")
}
fmt.Println(buffer.String())
}
}
}
// IndexNS defines from and to namespaces
type IndexNS struct {
From string
To string
}
// CreateIndexes creates indexes
func (ix *IndexStats) CreateIndexes(client *mongo.Client, namespaces ...[]string) error {
indexNamespaces := []IndexNS{}
if len(namespaces) > 0 {
for _, v := range namespaces[0] {
indexNamespaces = append(indexNamespaces, IndexNS{From: v, To: v})
}
}
return ix.CopyIndexesWithDest(client, indexNamespaces, true)
}
// CopyIndexes copies indexes
func (ix *IndexStats) CopyIndexes(client *mongo.Client, isDrop bool, namespaces ...[]string) error {
indexNamespaces := []IndexNS{}
if len(namespaces) > 0 {
for _, v := range namespaces[0] {
indexNamespaces = append(indexNamespaces, IndexNS{From: v, To: v})
}
}
return ix.CopyIndexesWithDest(client, indexNamespaces, isDrop)
}
// CopyIndexesWithDest copies indexes
func (ix *IndexStats) CopyIndexesWithDest(client *mongo.Client, namespaces []IndexNS, isDrop bool) error {
var ctx = context.Background()
var err error
namespaceMap := map[string]bool{}
indexMap := map[string]string{}
if len(namespaces) > 0 {
for _, ns := range namespaces {
namespaceMap[ns.From] = true
indexMap[ns.From] = ns.To
}
}
for _, db := range ix.Databases {
for _, coll := range db.Collections {
dbName := db.Name
collName := coll.Name
ns := dbName + "." + collName
if SkipNamespace(ns, namespaceMap) {
continue
}
if indexMap[ns] != "" {
dbName, collName = SplitNamespace(indexMap[ns])
ns = dbName + "." + collName
}
if isDrop {
var doc bson.M
cmd := bson.D{{Key: "dropIndexes", Value: collName}, {Key: "index", Value: "*"}}
client.Database(dbName).RunCommand(ctx, cmd).Decode(&doc)
}
collection := client.Database(dbName).Collection(collName)
indexes := []mongo.IndexModel{}
for _, o := range coll.Indexes {
opt := options.Index()
// opt.SetVersion(o.Version)
opt.SetName(o.Name)
if o.Background {
opt.SetBackground(o.Background)
}
if o.ExpireAfterSeconds >= 0 {
opt.SetExpireAfterSeconds(o.ExpireAfterSeconds)
}
if o.Unique {
opt.SetUnique(o.Unique)
}
if o.Sparse {
opt.SetSparse(o.Sparse)
}
var collation *options.Collation
if o.Collation != nil {
data, err := bson.Marshal(o.Collation)
if err != nil {
return err
}
bson.Unmarshal(data, &collation)
opt.SetCollation(collation)
}
if o.PartialFilterExpression != nil {
opt.SetPartialFilterExpression(o.PartialFilterExpression)
}
if o.Weights != nil {
opt.SetWeights(o.Weights)
}
if ix.verbose {
ix.Logger.Info(fmt.Sprintf(`creating index %v on %v `, o.KeyString, ns))
}
indexes = append(indexes, mongo.IndexModel{Keys: o.Key, Options: opt})
if o.Key.Map()["_id"] != nil {
collNames, _ := client.Database(dbName).ListCollectionNames(ctx, bson.D{})
var exists bool
for _, name := range collNames {
if name == coll.Name {
exists = true
break
}
}
if !exists {
collOpts := options.CreateCollection()
if collation != nil {
collOpts.SetCollation(collation)
}
if err = client.Database(dbName).CreateCollection(ctx, collName, collOpts); err != nil {
return err
}
if _, err = collection.Indexes().CreateOne(ctx, mongo.IndexModel{Keys: o.Key, Options: opt}); err != nil {
return err
}
}
}
}
if _, err = collection.Indexes().CreateMany(ctx, indexes); err != nil {
return err
}
}
}
return err
}
// GetDatabaseNames gets all database names
func GetDatabaseNames(client *mongo.Client) ([]string, error) {
var err error
var names []string
var result mongo.ListDatabasesResult
if result, err = client.ListDatabases(context.Background(), bson.M{}); err != nil {
return names, err
}
for _, db := range result.Databases {
names = append(names, db.Name)
}
return names, err
}
|
package main
import (
"fmt"
"pribadi/reflect/library"
"reflect"
)
/*
Reflect
teknik untuk inspeksi sebuah variabel,
mengambil informasi dari variabel tersebut atau bahkan memanipulasinya.
Cakupan informasi yang bisa didapatkan lewat reflection sangat luas,
seperti melihat struktur variabel, tipe, nilai pointer, dan banyak lagi.
reflect.ValueOf() => mengembalikan objek dalam tipe reflect.Value , yang berisikan informasi
yang berhubungan dengan nilai pada variabel yang dicari
reflect.TypeOf() => mengembalikan objek dalam tipe reflect.Type . Objek tersebut berisikan informasi
yang berhubungan dengan tipe data variabel yang dicari
Berikut adalah konstanta tipe data dan method yang bisa digunakan dalam refleksi di Golang:
Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr,
Float32, Float64, Complex64, Complex128, Array, Chan, Func, Interface, Map, Ptr, Slice, String,
Struct, UnsafePointer
*/
func main() {
number := 23
reflectValue := reflect.ValueOf(number)
var tipeVariableNumber = reflectValue.Type() // mengembalikan tipe data dalam bentuk string
fmt.Println("Tipe Variable :", tipeVariableNumber)
if reflectValue.Kind() == reflect.Int {
fmt.Println("Nilai Variable :", reflectValue.Int()) // reflectValue.Int() mengembalikan nilai variable
}
/*
PengaksesaN nilai dalam bentuk interface{}
*/
fmt.Println()
fmt.Println(reflectValue.Interface())
fmt.Println(reflectValue.Interface().(int))
var s1 = &library.Student{Name: "Rahmatulah Sidik", Age: 27, Class: 12}
s1.GetPropertyInfo()
s2 := library.Student{}
// cara memanggil method
s2.SetName("John Druce")
var reflectValue2 = reflect.ValueOf(s1)
// cara memanggil method dengan reflect
var methodSetName = reflectValue2.MethodByName("SetName")
methodSetName.Call([]reflect.Value{
reflect.ValueOf("John Wick"),
})
fmt.Println(s1.Name)
fmt.Println(s2.Name)
}
|
// Copyright (c) 2014-2015 José Carlos Nieto, https://menteslibres.net/xiam
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package cache
import (
"math/rand"
"sync"
"time"
)
const (
maxCachedObjects = 1024 * 8
mapCleanDivisor = 1000
mapCleanProbability = 1
)
func init() {
rand.Seed(time.Now().UnixNano())
}
// Cache holds a map of volatile key -> values.
type Cache struct {
cache map[string]interface{}
mu sync.RWMutex
}
// NewCache initializes a new caching space.
func NewCache() (c *Cache) {
return &Cache{
cache: make(map[string]interface{}),
}
}
// Read attempts to retrieve a cached value from memory. If the value does not
// exists returns an empty string and false.
func (c *Cache) Read(ob Hashable) (string, bool) {
c.mu.RLock()
data, ok := c.cache[ob.Hash()]
c.mu.RUnlock()
if ok {
if s, ok := data.(string); ok {
return s, true
}
}
return "", false
}
func (c *Cache) ReadRaw(ob Hashable) (interface{}, bool) {
c.mu.RLock()
data, ok := c.cache[ob.Hash()]
c.mu.RUnlock()
return data, ok
}
// Write stores a value in memory. If the value already exists its overwritten.
func (c *Cache) Write(ob Hashable, v interface{}) {
c.mu.Lock()
l := len(c.cache)
c.mu.Unlock()
if maxCachedObjects > 0 && maxCachedObjects < l {
c.Clear()
} else if rand.Intn(mapCleanDivisor) <= mapCleanProbability {
c.Clear()
}
c.mu.Lock()
c.cache[ob.Hash()] = v
c.mu.Unlock()
}
// Clear generates a new memory space, leaving the old memory unreferenced, so
// it can be claimed by the garbage collector.
func (c *Cache) Clear() {
c.mu.Lock()
c.cache = make(map[string]interface{})
c.mu.Unlock()
}
|
package controller
import (
"context"
"image-clone-controller/pkg/utility"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"testing"
)
func TestReconcileDeployment_Reconcile(t *testing.T) {
image := "image:latest"
dep := &appsv1.Deployment{
ObjectMeta: v12.ObjectMeta{
Name: "dep",
Namespace: "default",
},
Spec: appsv1.DeploymentSpec{
Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{Containers: []v1.Container{{Image: "docker.io/repo/" + image}}},
},
},
}
obj := []runtime.Object{dep}
cl := fake.NewClientBuilder().WithRuntimeObjects(obj...).Build()
r := ReconcileDeployment{cl}
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: "dep",
Namespace: "default",
},
}
old := utility.CacheFunc
defer func() { utility.CacheFunc = old }()
utility.CacheFunc = func(src string, dst string) error {
return nil
}
temp := utility.NewRegistry
defer func() { utility.NewRegistry = temp }()
utility.NewRegistry = "quay.io/repo"
_, err := r.Reconcile(context.TODO(), req)
if err != nil {
t.Fatalf("reconcile: (%v)", err)
}
dep1 := &appsv1.Deployment{}
err = r.Client.Get(context.TODO(), req.NamespacedName, dep1)
if err != nil {
t.Fatalf("get deployment: (%v)", err)
}
actualImage := dep1.Spec.Template.Spec.Containers[0].Image
if actualImage != utility.NewRegistry+"/"+image {
t.Errorf("Expected Image: %s is not the acuaul Image: %s", utility.NewRegistry, actualImage)
}
}
func TestReconcileDaemonSet_Reconcile(t *testing.T) {
image := "image:latest"
daemon := &appsv1.DaemonSet{
ObjectMeta: v12.ObjectMeta{
Name: "daemon",
Namespace: "default",
},
Spec: appsv1.DaemonSetSpec{
Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{Containers: []v1.Container{{Image: "docker.io/repo/" + image}}},
},
},
}
obj := []runtime.Object{daemon}
cl := fake.NewClientBuilder().WithRuntimeObjects(obj...).Build()
r := ReconcileDaemonSet{cl}
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: "daemon",
Namespace: "default",
},
}
old := utility.CacheFunc
defer func() { utility.CacheFunc = old }()
utility.CacheFunc = func(src string, dst string) error {
return nil
}
temp := utility.NewRegistry
defer func() { utility.NewRegistry = temp }()
utility.NewRegistry = "quay.io/repo"
_, err := r.Reconcile(context.TODO(), req)
if err != nil {
t.Fatalf("reconcile: (%v)", err)
}
daemon1 := &appsv1.DaemonSet{}
err = r.Client.Get(context.TODO(), req.NamespacedName, daemon1)
if err != nil {
t.Fatalf("get deployment: (%v)", err)
}
actualImage := daemon1.Spec.Template.Spec.Containers[0].Image
if actualImage != utility.NewRegistry+"/"+image {
t.Errorf("Expected Image: %s is not the acuaul Image: %s", utility.NewRegistry, actualImage)
}
}
|
// Copyright 2017 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package notify
import (
"context"
"testing"
"time"
"go.chromium.org/gae/impl/memory"
"go.chromium.org/gae/service/datastore"
"go.chromium.org/gae/service/mail"
"go.chromium.org/gae/service/user"
"go.chromium.org/luci/appengine/tq"
"go.chromium.org/luci/appengine/tq/tqtesting"
"go.chromium.org/luci/buildbucket/proto"
"go.chromium.org/luci/common/clock"
"go.chromium.org/luci/common/clock/testclock"
"go.chromium.org/luci/common/data/stringset"
notifyConfig "go.chromium.org/luci/luci_notify/api/config"
"go.chromium.org/luci/luci_notify/config"
"go.chromium.org/luci/luci_notify/internal"
"go.chromium.org/luci/luci_notify/testutil"
. "github.com/smartystreets/goconvey/convey"
)
func extractNotifiers(c context.Context, projectID string, cfg *notifyConfig.ProjectConfig) []*config.Notifier {
var notifiers []*config.Notifier
parentKey := datastore.MakeKey(c, "Project", projectID)
for _, n := range cfg.Notifiers {
notifiers = append(notifiers, config.NewNotifier(parentKey, n))
}
return notifiers
}
func notifyDummyBuild(status buildbucketpb.Status, notifyEmails ...string) *Build {
var build Build
build.Build = *testutil.TestBuild("test", "hello", "test-builder", status)
build.EmailNotify = notifyEmails
return &build
}
func verifyStringSliceResembles(actual, expect []string) {
// Put the strings into sets so prevent flakiness.
actualSet := stringset.NewFromSlice(actual...)
expectSet := stringset.NewFromSlice(expect...)
So(actualSet, ShouldResemble, expectSet)
}
func createMockTaskQueue(c context.Context) (*tq.Dispatcher, tqtesting.Testable) {
dispatcher := &tq.Dispatcher{}
InitDispatcher(dispatcher)
taskqueue := tqtesting.GetTestable(c, dispatcher)
taskqueue.CreateQueues()
return dispatcher, taskqueue
}
func verifyTasksAndMessages(c context.Context, taskqueue tqtesting.Testable, emailExpect []string) {
// Make sure a task either was or wasn't scheduled.
tasks := taskqueue.GetScheduledTasks()
if len(emailExpect) == 0 {
So(len(tasks), ShouldEqual, 0)
return
}
So(len(tasks), ShouldEqual, 1)
// Extract and check the task.
task, ok := tasks[0].Payload.(*internal.EmailTask)
So(ok, ShouldEqual, true)
verifyStringSliceResembles(task.Recipients, emailExpect)
// Simulate running the tasks.
done, pending, err := taskqueue.RunSimulation(c, nil)
So(err, ShouldBeNil)
// Check to see if any messages were sent.
messages := mail.GetTestable(c).SentMessages()
if len(emailExpect) == 0 {
So(len(done), ShouldEqual, 1)
So(len(pending), ShouldEqual, 0)
So(len(messages), ShouldEqual, 0)
} else {
verifyStringSliceResembles(messages[0].To, emailExpect)
}
// Reset messages sent for other tasks.
mail.GetTestable(c).Reset()
}
func TestNotify(t *testing.T) {
t.Parallel()
Convey(`Test Environment for Notify`, t, func() {
cfgName := "basic"
cfg, err := testutil.LoadProjectConfig(cfgName)
So(err, ShouldBeNil)
c := memory.UseWithAppID(context.Background(), "luci-notify-test")
c = clock.Set(c, testclock.New(time.Now()))
user.GetTestable(c).Login("noreply@luci-notify-test.appspotmail.com", "", false)
// Get notifiers from test config.
notifiers := extractNotifiers(c, cfgName, cfg)
// Re-usable builds and builders for running Notify.
goodBuild := notifyDummyBuild(buildbucketpb.Status_SUCCESS)
goodEmailBuild := notifyDummyBuild(buildbucketpb.Status_SUCCESS, "property@google.com", "bogus@gmail.com")
badBuild := notifyDummyBuild(buildbucketpb.Status_FAILURE)
badEmailBuild := notifyDummyBuild(buildbucketpb.Status_FAILURE, "property@google.com", "bogus@gmail.com")
goodBuilder := &Builder{
StatusBuildTime: time.Date(2015, 2, 3, 12, 54, 3, 0, time.UTC),
Status: buildbucketpb.Status_SUCCESS,
}
badBuilder := &Builder{
StatusBuildTime: time.Date(2015, 2, 3, 12, 54, 3, 0, time.UTC),
Status: buildbucketpb.Status_FAILURE,
}
dispatcher, taskqueue := createMockTaskQueue(c)
test := func(notifiers []*config.Notifier, build *Build, builder *Builder, emailExpect ...string) {
// Test Notify.
err := Notify(c, dispatcher, notifiers, builder.Status, build)
So(err, ShouldBeNil)
// Verify sent messages.
verifyTasksAndMessages(c, taskqueue, emailExpect)
}
Convey(`empty`, func() {
var noNotifiers []*config.Notifier
test(noNotifiers, goodBuild, goodBuilder)
test(noNotifiers, goodBuild, badBuilder)
test(noNotifiers, badBuild, goodBuilder)
test(noNotifiers, badBuild, badBuilder)
test(noNotifiers, goodEmailBuild, goodBuilder, "property@google.com")
test(noNotifiers, badEmailBuild, goodBuilder, "property@google.com")
})
Convey(`on success`, func() {
test(
notifiers,
goodBuild,
goodBuilder,
"test-example-success@google.com",
)
test(
notifiers,
goodEmailBuild,
goodBuilder,
"test-example-success@google.com",
"property@google.com",
)
})
Convey(`on failure`, func() {
test(
notifiers,
badBuild,
badBuilder,
"test-example-failure@google.com",
)
test(
notifiers,
badEmailBuild,
badBuilder,
"test-example-failure@google.com",
"property@google.com",
)
})
Convey(`on change to failure`, func() {
test(
notifiers,
badBuild,
goodBuilder,
"test-example-failure@google.com",
"test-example-change@google.com",
)
test(
notifiers,
badEmailBuild,
goodBuilder,
"test-example-failure@google.com",
"test-example-change@google.com",
"property@google.com",
)
})
Convey(`on change to success`, func() {
test(
notifiers,
goodBuild,
badBuilder,
"test-example-success@google.com",
"test-example-change@google.com",
)
test(
notifiers,
goodEmailBuild,
badBuilder,
"test-example-success@google.com",
"test-example-change@google.com",
"property@google.com",
)
})
})
}
|
// +build rpi
package main
import (
// Modules
_ "github.com/djthorpe/gopi-hw/sys/gpio"
_ "github.com/djthorpe/gopi-hw/sys/hw"
_ "github.com/djthorpe/gopi-hw/sys/metrics"
_ "github.com/djthorpe/gopi-hw/sys/spi"
_ "github.com/djthorpe/gopi/sys/logger"
_ "github.com/djthorpe/sensors/protocol/ook"
_ "github.com/djthorpe/sensors/protocol/openthings"
_ "github.com/djthorpe/sensors/sys/ener314rt"
_ "github.com/djthorpe/sensors/sys/mihome"
_ "github.com/djthorpe/sensors/sys/rfm69"
_ "github.com/djthorpe/sensors/sys/sensordb"
)
|
package template
import (
"reflect"
"testing"
"github.com/AlecAivazis/survey"
)
func TestNewTemplate(t *testing.T) {
testCases := [...]struct {
name string
input string
expected Template
hasError bool
}{
{
name: "Basic Template",
input: "test_fixture/basic",
expected: Template{
baseDir: "test_fixture/basic",
Meta: meta{
Description: "A basic template",
},
},
},
{
name: "Invalid Template - Missing Directory",
input: "no/such/template",
expected: Template{baseDir: "no/such/template"},
hasError: true,
},
{
name: "Invalid Template - Empty Dir",
input: "test_fixture/empty",
expected: Template{baseDir: "test_fixture/empty"},
hasError: true,
},
{
name: "Invalid Config - Missing fields",
input: "test_fixture/invalid_config",
expected: Template{baseDir: "test_fixture/invalid_config"},
hasError: true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result, err := New(tc.input)
if tc.hasError {
if err == nil {
t.Fatalf("Expected an error but didn't get one!")
}
} else {
if err != nil {
t.Fatalf("Got an unexpected error %q", err)
}
}
if !reflect.DeepEqual(result.Meta, tc.expected.Meta) {
t.Fatalf("Meta Data is different. Expected %+v but got %+v",
tc.expected.Meta,
result.Meta,
)
}
if !reflect.DeepEqual(result.Questions, tc.expected.Questions) &&
len(tc.expected.Questions) > 0 {
t.Fatalf("Questions are different. Expected %+v but got %+v",
tc.expected.Questions,
result.Questions,
)
}
})
}
}
func TestGetQuestions(t *testing.T) {
testCases := [...]struct {
name string
template Template
expected []*survey.Question
}{
{
name: "Single Question",
template: Template{
Questions: []question{
{
Name: "Test",
Type: "input",
Message: "Message",
},
},
},
expected: []*survey.Question{
{
Name: "Test",
Prompt: &survey.Input{
Message: "Message",
},
},
},
},
{
name: "Multiple Question",
template: Template{
Questions: []question{
{
Name: "Test 1",
Type: "input",
Message: "Message",
},
{
Name: "Test 2",
Type: "input",
Message: "Message",
},
},
},
expected: []*survey.Question{
{
Name: "Test 1",
Prompt: &survey.Input{
Message: "Message",
},
},
{
Name: "Test 2",
Prompt: &survey.Input{
Message: "Message",
},
},
},
},
{
name: "Dummy Type",
template: Template{
Questions: []question{
{
Type: "Dummy",
},
},
},
expected: []*survey.Question{},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result := tc.template.GetQuestions()
if len(tc.expected) != len(result) {
t.Fatalf("Expected %d questions, but got %d", len(tc.expected), len(result))
}
for i, q := range tc.expected {
if result[i].Name != q.Name {
t.Errorf("Names do not match got %q but wanted %q", result[i].Name, q.Name)
}
if !reflect.DeepEqual(result[i].Prompt, q.Prompt) {
t.Errorf("Prompts do not match got %+v but wanted %+v", result[i].Prompt, q.Prompt)
}
}
})
}
}
|
package main
import (
. "github.com/protosam/go-libnss"
. "github.com/protosam/go-libnss/structs"
)
// Placeholder main() stub is neccessary for compile.
func main() {}
func init(){
// We set our implementation to "TestImpl", so that go-libnss will use the methods we create
SetImpl(TestImpl{})
}
// We're creating a struct that implements LIBNSS stub methods.
type TestImpl struct { LIBNSS }
////////////////////////////////////////////////////////////////
// Passwd Methods
////////////////////////////////////////////////////////////////
// PasswdAll() will populate all entries for libnss
func (self TestImpl) PasswdAll() (Status, []Passwd) {
if len(dbtest_passwd) == 0 {
return StatusUnavail, []Passwd{}
}
return StatusSuccess, dbtest_passwd
}
// PasswdByName() returns a single entry by name.
func (self TestImpl) PasswdByName(name string) (Status, Passwd) {
for _, entry := range dbtest_passwd {
if entry.Username == name {
return StatusSuccess, entry
}
}
return StatusNotfound, Passwd{}
}
// PasswdByUid() returns a single entry by uid.
func (self TestImpl) PasswdByUid(uid uint) (Status, Passwd) {
for _, entry := range dbtest_passwd {
if entry.UID == uid {
return StatusSuccess, entry
}
}
return StatusNotfound, Passwd{}
}
////////////////////////////////////////////////////////////////
// Group Methods
////////////////////////////////////////////////////////////////
// endgrent
func (self TestImpl) GroupAll() (Status, []Group) {
if len(dbtest_group) == 0 {
return StatusUnavail, []Group{}
}
return StatusSuccess, dbtest_group
}
// getgrent
func (self TestImpl) GroupByName(name string) (Status, Group) {
for _, entry := range dbtest_group {
if entry.Groupname == name {
return StatusSuccess, entry
}
}
return StatusNotfound, Group{}
}
// getgrnam
func (self TestImpl) GroupByGid(gid uint) (Status, Group) {
for _, entry := range dbtest_group {
if entry.GID == gid {
return StatusSuccess, entry
}
}
return StatusNotfound, Group{}
}
////////////////////////////////////////////////////////////////
// Shadow Methods
////////////////////////////////////////////////////////////////
// endspent
func (self TestImpl) ShadowAll() (Status, []Shadow) {
if len(dbtest_shadow) == 0 {
return StatusUnavail, []Shadow{}
}
return StatusSuccess, dbtest_shadow
}
// getspent
func (self TestImpl) ShadowByName(name string) (Status, Shadow) {
for _, entry := range dbtest_shadow {
if entry.Username == name {
return StatusSuccess, entry
}
}
return StatusNotfound, Shadow{}
}
|
package main
import (
"fmt"
"time"
"github.com/tideland/golib/redis"
)
var (
rds *redis.Database
rdsCircularBuffer string
rdsGetIPCache string
rdsSetIPCache string
)
// how many log lines to buffer for the scrollback
const CHATLOGLINES = 150
func redisGetConn() *redis.Connection {
again:
conn, err := rds.Connection()
if err != nil {
D("Error getting a redis connection", err)
if conn != nil {
conn.Return()
}
time.Sleep(500 * time.Millisecond)
goto again
}
return conn
}
func initRedis(addr string, db int64, pw string) {
var err error
rds, err = redis.Open(
redis.TcpConnection(addr, 1*time.Second),
redis.Index(int(db), pw),
redis.PoolSize(50),
)
if err != nil {
F("Error making the redis pool", err)
}
conn := redisGetConn()
defer conn.Return()
rdsCircularBuffer, err = conn.DoString("SCRIPT", "LOAD", `
local key = KEYS[1]
local maxlength = tonumber(ARGV[1])
local payload = ARGV[2]
if not key then
return {err = "INVALID KEY"}
end
if not payload then
return {err = "INVALID PAYLOAD"}
end
if not maxlength then
return {err = "INVALID MAXLENGTH"}
end
-- push the payload onto the end
redis.call("RPUSH", key, payload)
local delcount = 0
-- get rid of excess lines from the front
local numlines = redis.call("LLEN", key)
for _ = numlines - 1, maxlength, -1 do
redis.call("LPOP", key)
delcount = delcount + 1
end
return delcount
`)
if err != nil {
F("Circular buffer script loading error", err)
}
rdsGetIPCache, err = conn.DoString("SCRIPT", "LOAD", `
local key = KEYS[1]
return redis.call("ZRANGEBYSCORE", key, 1, 3)
`)
if err != nil {
F("Get IP Cache script loading error", err)
}
rdsSetIPCache, err = conn.DoString("SCRIPT", "LOAD", `
local key, value, maxlength = KEYS[1], ARGV[1], 3
local count = redis.call("ZCOUNT", key, 1, maxlength)
local existingscore = redis.call("ZSCORE", key, value)
if existingscore then
-- renumber all the elements and make this one the last
local elements = redis.call("ZRANGEBYSCORE", key, 1, maxlength)
local i = 1
for _, v in ipairs(elements) do
if v == value then
redis.call("ZADD", key, count, v)
else
redis.call("ZADD", key, i, v)
i = i + 1
end
end
return
end
if count == maxlength then
-- delete the first element, modify the other elements score down
-- and add the new one to the end
redis.call("ZREMRANGEBYSCORE", key, 1, 1)
local elements = redis.call("ZRANGEBYSCORE", key, 2, maxlength)
local i = 1
for _, v in ipairs(elements) do
redis.call("ZADD", key, i, v)
i = i + 1
end
return redis.call("ZADD", key, count, value)
else
-- otherwise just insert it with the next score
return redis.call("ZADD", key, count + 1, value)
end
`)
if err != nil {
F("Set IP Cache script loading error", err)
}
}
func cacheIPForUser(userid Userid, ip string) {
if ip == "127.0.0.1" {
return
}
conn := redisGetConn()
defer conn.Return()
_, err := conn.Do("EVALSHA", rdsSetIPCache, 1, fmt.Sprintf("CHAT:userips-%d", userid), ip)
if err != nil {
D("cacheIPForUser redis error", err)
}
}
func getIPCacheForUser(userid Userid) []string {
conn := redisGetConn()
defer conn.Return()
ips, err := conn.DoStrings("EVALSHA", rdsGetIPCache, 1, fmt.Sprintf("CHAT:userips-%d", userid))
if err != nil {
D("getIPCacheForUser redis error", err)
}
return ips
}
func isSubErr(sub *redis.Subscription, err error) bool {
if err != nil {
D("Getting a subscription failed with error", err)
if sub != nil {
sub.Close()
}
time.Sleep(500 * time.Millisecond)
return true
}
return false
}
func setupRedisSubscription(channel string, redisdb int64, cb func(*redis.PublishedValue)) {
again:
sub, err := rds.Subscription()
if isSubErr(sub, err) {
goto again
}
err = sub.Subscribe(fmt.Sprintf("%s-%d", channel, redisdb))
if isSubErr(sub, err) {
goto again
}
for {
result, err := sub.Pop()
if isSubErr(sub, err) {
goto again
}
if result.Value.IsNil() {
continue
}
cb(result)
}
}
func redisGetBytes(key string) ([]byte, error) {
conn := redisGetConn()
defer conn.Return()
result, err := conn.Do("GET", key)
if err != nil {
return []byte{}, err
}
value, err := result.ValueAt(0)
if err != nil {
return []byte{}, err
}
return value.Bytes(), err
}
func cacheChatEvent(msg *message) {
conn := redisGetConn()
defer conn.Return()
data, err := Pack(msg.event, msg.data.([]byte))
if err != nil {
D("cacheChatEvent pack error", err)
return
}
_, err = conn.Do(
"EVALSHA",
rdsCircularBuffer,
1,
"CHAT:chatlog",
CHATLOGLINES,
data,
)
if err != nil {
D("cacheChatEvent redis error", err)
}
}
func cacheConnectedUsers(marshallednames []byte) {
conn := redisGetConn()
defer conn.Return()
_, err := conn.DoOK("SET", "CHAT:connectedUsers", marshallednames)
if err != nil {
D("Error caching connected users.", err)
}
} |
// All material is licensed under the Apache License Version 2.0, January 2004
// http://www.apache.org/licenses/LICENSE-2.0
// This program shows how to launch a web server then shut it down gracefully.
package main
import (
"context"
"log"
"math/rand"
"net/http"
"os"
"os/signal"
"time"
)
// app is our application handler. We log requests when they start so we can
// see them happening then log again when they're over. We have a random sleep
// from 800-1200 milliseconds so everything goes slow enough to see.
func app(res http.ResponseWriter, req *http.Request) {
id := time.Now().Nanosecond()
log.Printf("app : Start %d", id)
sleep := rand.Intn(400) + 800
time.Sleep(time.Duration(sleep) * time.Millisecond)
log.Printf("app : End %d", id)
}
func main() {
log.Println("main : Started")
// Create a new server and set timeout values.
server := http.Server{
Addr: ":3000",
Handler: http.HandlerFunc(app),
ReadTimeout: 5 * time.Second,
WriteTimeout: 10 * time.Second,
MaxHeaderBytes: 1 << 20,
}
// Start listening for requests. We do this in a goroutine so our main func
// can be blocked waiting on the shutdown code.
go func() {
log.Println("listener : Started : Listening on :3000")
err := server.ListenAndServe()
log.Printf("listener : Completed : %v", err)
}()
// Block until there's an interrupt then shut the server down. The main
// func must not return before this process is complete or in-flight
// requests will be aborted.
shutdownOnInterrupt(&server)
log.Println("main : Completed")
}
func shutdownOnInterrupt(server *http.Server) {
// Set up channel to receive interrupt signals.
// We must use a buffered channel or risk missing the signal
// if we're not ready to receive when the signal is sent.
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
// Block until a signal is received.
log.Println("closer : Waiting for a shutdown signal")
<-c
log.Println("closer : Signal received. Attempting graceful shut down...")
// Create a context with a 5 second timeout. If the server can't
// gracefully shut down in that time we'll kill it.
timeout := 5 * time.Second
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
// Try a graceful shutdown. If there's no error we're done.
err := server.Shutdown(ctx)
if err == nil {
return
}
// Try a forceful shutdown
log.Printf("closer : Graceful shutdown did not complete in %v : %v", timeout, err)
log.Println("closer : Killing server.")
err = server.Close()
if err != nil {
log.Printf("closer : Errors killing server : %v", err)
}
}
|
package origin
import (
"testing"
"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address/signaturescheme"
valuetransaction "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
"github.com/iotaledger/goshimmer/dapps/waspconn/packages/utxodb"
"github.com/iotaledger/wasp/packages/sctransaction"
"github.com/stretchr/testify/assert"
)
const (
scAddrStr = "behcN8An9CQDU3rcHSUucamZreVWfJepmiuBoSpomfi1"
dscr = "test test sc"
)
func TestReadWrite(t *testing.T) {
u := utxodb.New()
scAddr, err := address.FromBase58(scAddrStr)
assert.NoError(t, err)
ownerSigScheme := signaturescheme.RandBLS()
_, err = u.RequestFunds(ownerSigScheme.Address())
assert.NoError(t, err)
tx, err := NewOriginTransaction(NewOriginTransactionParams{
OriginAddress: scAddr,
OriginatorSignatureScheme: ownerSigScheme,
AllInputs: u.GetAddressOutputs(ownerSigScheme.Address()),
})
assert.NoError(t, err)
t.Logf("created transaction txid = %s", tx.ID().String())
data := tx.Bytes()
vtx, _, err := valuetransaction.FromBytes(data)
assert.NoError(t, err)
txback, err := sctransaction.ParseValueTransaction(vtx)
assert.NoError(t, err)
assert.EqualValues(t, tx.ID(), txback.ID())
}
|
package main
import (
"context"
"fmt"
"log"
"net"
"net/http"
pb "proto-example/pb"
"strings"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
const (
grpcAddress = 10000
httpAddress = 9000
)
type server struct {
pb.UnimplementedAuthServer
}
func (s *server) Login(ctx context.Context, in *pb.UserRequest) (*pb.UserResponse, error) {
log.Printf("Received: %v", in.GetUsername())
return &pb.UserResponse{AccessToken: "123"}, nil
}
func (s *server) GetUser(ctx context.Context, in *pb.GetUserRequest) (*pb.GetUserResponse, error) {
return &pb.GetUserResponse{
UserId: int64(1000),
}, nil
}
func RunGRPCGateway() (err error) {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
mux := runtime.NewServeMux(runtime.WithMarshalerOption(runtime.MIMEWildcard, &runtime.JSONPb{OrigName: true, EmitDefaults: true}))
opts := []grpc.DialOption{grpc.WithInsecure()}
err = pb.RegisterAuthHandlerFromEndpoint(ctx, mux, fmt.Sprintf(":%d", grpcAddress), opts)
if err != nil {
return err
}
muxHttp := http.NewServeMux()
muxHttp.Handle("/", forwardAccessToken(mux))
return http.ListenAndServe(fmt.Sprintf(":%d", httpAddress), muxHttp)
}
func forwardAccessToken(next http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
md := make(metadata.MD)
for k := range r.Header {
k2 := strings.ToLower(k)
md[k2] = []string{r.Header.Get(k)}
}
ctx := metadata.NewIncomingContext(r.Context(), md)
r = r.WithContext(ctx)
next.ServeHTTP(w, r)
}
}
func main() {
go func() {
RunGRPCGateway()
}()
s := grpc.NewServer()
lis, err := net.Listen("tcp", fmt.Sprintf(":%d", grpcAddress))
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
pb.RegisterAuthServer(s, &server{})
if err := s.Serve(lis); err != nil {
log.Fatalf("failed to serve: %v", err)
}
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2020-08-16 13:16
# @File : buble_sort.go
# @Description :
# @Attention :
*/
package sort
func BubbleSort(data []int) {
for i := 0; i < len(data); i++ {
for j := 0; j < len(data)-1-i; j++ {
if data[j] > data[j+1] {
data[j], data[j+1] = data[j+1], data[j]
}
}
}
}
|
package main
import (
"fmt"
"os"
"github.com/Cloud-Foundations/Dominator/imageserver/client"
"github.com/Cloud-Foundations/Dominator/lib/log"
)
func checkImageSubcommand(args []string, logger log.DebugLogger) error {
imageSClient, _ := getClients()
imageExists, err := client.CheckImage(imageSClient, args[0])
if err != nil {
return fmt.Errorf("error checking image: %s", err)
}
if imageExists {
return nil
}
os.Exit(1)
panic("impossible")
}
|
package main
/*
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <unistd.h>
#include <string.h>
#include <netinet/in.h>
#include <linux/if.h>
#include <linux/if_tun.h>
int tun_alloc(char *dev, int flags) {
struct ifreq ifr;
int fd, err;
char *clonedev = "/dev/net/tun";
if ((fd = open(clonedev, O_RDWR)) < 0) {
printf("cannot open clone device\n");
return fd;
}
memset(&ifr, 0, sizeof(ifr));
ifr.ifr_flags = flags;
if (*dev) {
strncpy(ifr.ifr_name, dev, IFNAMSIZ);
}
if ((err = ioctl(fd, TUNSETIFF, (void *)&ifr)) < 0) {
close(fd);
printf("cannot create tun device\n");
return err;
}
strcpy(dev, ifr.ifr_name);
return fd;
}
char* make_empty_name() {
char *name = malloc(IFNAMSIZ);
name[0] = '\0';
return name;
}
*/
import "C"
import (
"flag"
"fmt"
"log"
"net"
"os"
"os/exec"
"strconv"
"strings"
"time"
"unsafe"
)
const MTU = 1400
var node = flag.Int("node", 1, "node number")
var listen = flag.String("listen", ":38848", "listen address")
var remote = flag.String("remote", "none", "remote address")
var gateway = flag.Bool("gateway", false, "set as default gateway")
var allow = flag.String("allow", "127.0.0.1", "allowed ips")
func init() {
flag.Parse()
}
func NewTun() (fd C.int, name string) {
tun_name := C.make_empty_name()
defer C.free(unsafe.Pointer(tun_name))
fd = C.tun_alloc(tun_name, C.IFF_TUN)
if fd < C.int(0) {
log.Fatal("cannot create tun device")
}
name = C.GoString(tun_name)
fmt.Printf("fd %d, name %s\n", fd, name)
run("ip", "link", "set", name, "up")
ip := fmt.Sprintf("10.10.10.%d", *node)
run("ip", "addr", "add", ip+"/24", "dev", name)
run("ip", "link", "set", "dev", name, "mtu", strconv.Itoa(MTU))
if *gateway {
split := strings.Split(ip, ".")
split[len(split)-1] = "0"
network := strings.Join(split, ".")
run("ip", "route", "change", network+"/24", "via", ip)
}
return fd, name
}
func run(cmd string, args ...string) {
out, err := exec.Command(cmd, args...).Output()
if err != nil {
log.Fatalf("error on running command %s %s\n>> %s <<",
cmd,
strings.Join(args, " "),
out)
}
}
func main() {
fd, name := NewTun()
file := os.NewFile(uintptr(fd), name)
remotes := make(map[string]*net.UDPAddr)
start := time.Now()
allows := make(map[string]bool)
for _, ip := range strings.Split(*allow, ",") {
allows[ip] = true
}
addr, err := net.ResolveUDPAddr("udp", *listen)
if err != nil {
log.Fatal(err)
}
conn, err := net.ListenUDP("udp", addr)
if err != nil {
log.Fatal(err)
}
go func() {
buffer := make([]byte, MTU*2)
var count int
var err error
var remoteAddr *net.UDPAddr
fmt.Printf("listening %v\n", addr)
for {
count, remoteAddr, err = conn.ReadFromUDP(buffer)
var host string
host, _, err = net.SplitHostPort(remoteAddr.String())
if err != nil || (*allow != "all" && allows[host] != true) {
fmt.Printf("block %v\n", host)
continue
}
fmt.Printf("%v read from udp %v %v\n",
time.Now().Sub(start),
remoteAddr,
count)
if err != nil {
break
}
if remotes[remoteAddr.String()] == nil {
remotes[remoteAddr.String()] = remoteAddr
}
fmt.Printf("write to tun %d\n", count)
file.Write(buffer[:count])
}
}()
if *remote != "none" {
remoteAddr, err := net.ResolveUDPAddr("udp", *remote)
if err != nil {
log.Fatal(err)
}
remotes[remoteAddr.String()] = remoteAddr
}
buffer := make([]byte, MTU*2)
var count int
for {
count, err = file.Read(buffer)
fmt.Printf("%v read from tun %v\n",
time.Now().Sub(start),
count)
if err != nil {
break
}
for _, remoteAddr := range remotes {
fmt.Printf("write to %v\n", remoteAddr)
conn.WriteToUDP(buffer[:count], remoteAddr)
}
}
}
|
package models
import (
"GOLANG/entities"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"errors"
)
var (
listUser = make([]*entities.User, 0) // make a slice with init len(listUser) = 0
)
func HashString(s string) string {
h := sha256.New()
h.Write([]byte(s))
sha256_hash := hex.EncodeToString(h.Sum(nil))
return sha256_hash
}
func EncodeString(s string) string {
data := []byte(s)
encode := base64.StdEncoding.EncodeToString(data)
return encode
}
func CreateUser(user *entities.User) bool {
if user.Id != "" && user.Name != "" && user.Password != "" {
if userF, _ := FindUser(user.Id); userF == nil {
listUser = append(listUser, user)
return true
}
}
return false
}
func UpdateUser(eUser *entities.User) bool {
for index, user := range listUser {
if user.Id == eUser.Id {
listUser[index] = eUser
return true
}
}
return false
}
func FindUser(id string) (*entities.User, error) {
for _, user := range listUser {
if user.Id == id {
return user, nil
}
}
return nil, errors.New("User does not exist")
}
func DeleteUser(id string) bool {
for index, user := range listUser {
if user.Id == id {
copy(listUser[index:], listUser[index+1:])
listUser[len(listUser)-1] = &entities.User{}
listUser = listUser[:len(listUser)-1]
return true
}
}
return false
}
func GetAllUser() []*entities.User {
return listUser
}
|
// Implement atoi to convert a string to an integer.
func myAtoi(str string) int {
i, n := 0, len(str)
if n == 0 {
return 0
}
c_space, c_0, c_9 := " "[0], int("0"[0]), int("9"[0])
for i < n && str[i] == c_space {
i += 1
}
first, signal, re := str[i], 1, 0
if first == '+' {
i += 1
} else if first == '-' {
i += 1
signal = -1
}
for i < n {
c := int(str[i])
if c >= c_0 && c <= c_9 {
re = re * 10 + signal * (c - c_0)
} else {
break
}
if re > 0x7fffffff {
return 0x7fffffff
} else if re < -0x80000000 {
return -0x80000000
}
i += 1
}
return re
}
|
package telex
import (
"strconv"
)
type TelexParseError struct {
err error
line int
context string
}
func (tpe TelexParseError) Error() string {
return strconv.Itoa(tpe.line) + ": " + tpe.err.Error() + " While parsing: " + tpe.context
}
|
package service
import (
"fmt"
"io/ioutil"
"os"
"reflect"
"sort"
"strings"
e "sample.com/book/error"
"sample.com/book/model"
"sample.com/book/util"
)
// closure function to represent true:1 and false:-1
// due to use in sorting of []Book
var boolToInteger = func() func(bool) int {
innerMap := map[bool]int{
true: 1,
false: -1,
}
return func(b bool) int {
return innerMap[b]
}
}()
type Align struct {
As string // asc (default), desc
Field string // field of Book
}
func FindAllBooks(align *Align) ([]model.Book, error) {
books := make([]model.Book, 0)
txts, err := ioutil.ReadDir(util.DataPath)
if err != nil {
return books, err
}
for _, txt := range txts {
bs, _ := ioutil.ReadFile(util.DataPath + txt.Name())
bookData := string(bs)
book := model.Book{}
book.FromString(bookData, txt.Name())
books = append(books, book)
}
// if align == nil {
// return books, nil
// }
if align.Field == "" {
return books, nil
}
if !util.Includes(model.BookColumns, align.Field) {
return books, nil
}
var c int
switch align.As {
case "desc":
c = -1
default:
c = 1
}
switch align.Field {
case "Id":
sort.Slice(books, func(i, j int) bool {
return books[i].Id*c > books[j].Id*c
})
default:
sort.Slice(books, func(i, j int) bool {
fi := reflect.Indirect(reflect.ValueOf(books[i])).FieldByName(align.Field).String()
fj := reflect.Indirect(reflect.ValueOf(books[j])).FieldByName(align.Field).String()
return boolToInteger(fi > fj)*c > 0
})
}
return books, nil
}
type FileNotFoundError struct {
Message string
}
func (e *FileNotFoundError) Error() string {
return e.Message
}
func fileExists(filename string) bool {
// defer func() {
// recover()
// }()
// info, err := os.Stat(filename)
// if os.IsNotExist(err) {
// return false
// }
// return !info.IsDir()
_, err := os.Open(filename)
return err != nil
}
func FindBook(id int) (model.Book, error) {
filename := util.PaddedFilename(id)
path := util.DataPath + filename
book := model.Book{}
var e error
// if !fileExists(path) {
// e := FileNotFoundError{
// Message: fmt.Sprintf("'%s' not exists", path),
// }
// return book, &e
// }
defer func() {
recover()
e = &FileNotFoundError{
Message: path + " not found",
}
}()
bs, err := ioutil.ReadFile(path)
if err != nil {
return book, err
}
book.FromIdAndString(string(bs), id)
return book, e
}
// todo: apply goroutine
// implementation signleton to handle this function, not using file i/o
func FindBookByName(name string, except int) (model.Book, error) {
txts, err := ioutil.ReadDir(util.DataPath)
book := model.Book{}
if err != nil {
return book, err
}
for _, txt := range txts {
bs, _ := ioutil.ReadFile(util.DataPath + txt.Name())
s := util.Trim(string(bs))
// get the name of this book
// first line means name
firstLine := s[:strings.Index(s, "\n")]
if firstLine == name {
id := util.IdFromFilename(txt.Name())
if id != except {
book.FromIdAndString(s, id)
return book, nil
}
}
}
return book, &e.NameNotFoundError{Name: name}
}
// todo: apply utf8
func CreateBook(book *model.Book) error {
// get file list
txts, _ := ioutil.ReadDir(util.DataPath)
var newId int
switch len(txts) {
case 0:
// set first id to 1
newId = 1
default:
// add 1 to the latest filename
newId = util.IdFromFilename(txts[len(txts)-1].Name()) + 1
}
// create new filename
filename := util.PaddedFilename(newId)
f, err := os.Create(util.DataPath + filename)
if err != nil {
return err
}
defer f.Close()
fmt.Fprint(f, book.String())
book.Id = newId
return nil
}
func ModifyBook(book *model.Book) error {
filename := util.PaddedFilename(book.Id)
f, err := os.Open(util.DataPath + filename)
if err != nil {
return err
}
defer f.Close()
fmt.Fprint(f, book.String())
return nil
}
func RemoveBook(id int) error {
filename := util.PaddedFilename(id)
return os.Remove(util.DataPath + filename)
}
|
package main
import "log"
func myLog(format string, args ...interface{}) {
const prefix = "[my]"
log.Printf(prefix+format, args...)
}
|
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package attributes
import (
"go.opentelemetry.io/collector/consumer/pdata"
"go.opentelemetry.io/collector/translator/conventions"
)
// TagsFromAttributes converts a selected list of attributes
// to a tag list that can be added to metrics.
func TagsFromAttributes(attrs pdata.AttributeMap) []string {
tags := make([]string, 0, attrs.Len())
var processAttributes processAttributes
var systemAttributes systemAttributes
attrs.ForEach(func(key string, value pdata.AttributeValue) {
switch key {
// Process attributes
case conventions.AttributeProcessExecutableName:
processAttributes.ExecutableName = value.StringVal()
case conventions.AttributeProcessExecutablePath:
processAttributes.ExecutablePath = value.StringVal()
case conventions.AttributeProcessCommand:
processAttributes.Command = value.StringVal()
case conventions.AttributeProcessCommandLine:
processAttributes.CommandLine = value.StringVal()
case conventions.AttributeProcessID:
processAttributes.PID = value.IntVal()
case conventions.AttributeProcessOwner:
processAttributes.Owner = value.StringVal()
// System attributes
case conventions.AttributeOSType:
systemAttributes.OSType = value.StringVal()
}
})
tags = append(tags, processAttributes.extractTags()...)
tags = append(tags, systemAttributes.extractTags()...)
return tags
}
|
package encio
import (
"crypto/aes"
"crypto/cipher"
"fmt"
"io/ioutil"
"log"
"os"
)
func ExampleHello() {
block, err := aes.NewCipher(make([]byte, 16))
if err != nil {
log.Fatalf("Failed to make AES: %s", err)
}
aead, err := cipher.NewGCM(block)
if err != nil {
log.Fatalf("Failed to make AEAD: %s", err)
}
backend, err := ioutil.TempFile("", "test-encio-hello-")
if err != nil {
log.Fatalf("Failed to create temp file: %s.", err)
}
defer os.Remove(backend.Name())
defer backend.Close()
if err != nil {
log.Fatalf("Failed to open backend: %s", err)
}
frontend := NewAppender(aead, backend, 4096)
_, err = frontend.WriteAt([]byte("Hello"), 0)
if err != nil {
log.Fatalf("Failed to write: %s", err)
}
_, err = frontend.WriteAt([]byte(" world"), 5)
if err != nil {
log.Fatalf("Failed to write: %s", err)
}
b := make([]byte, 11)
_, err = frontend.ReadAt(b, 0)
if err != nil {
log.Fatalf("Failed to write: %s", err)
}
fmt.Println(string(b))
// Output: Hello world
}
|
package models
import (
"testing"
"github.com/google/go-cmp/cmp"
)
func TestRenderCommitStatusTemplates(t *testing.T) {
cases := []struct {
name string
inputTemplate CommitStatusTemplate
inputData NotificationData
expectedOutput *RenderedCommitStatus
}{
{
name: "No template variables",
inputTemplate: CommitStatusTemplate{
Description: "Hello world",
TargetURL: "https://google.com",
},
inputData: NotificationData{
EnvName: "test",
},
expectedOutput: &RenderedCommitStatus{
Description: "Hello world",
TargetURL: "https://google.com",
},
},
{
name: "One template variable",
inputTemplate: CommitStatusTemplate{
Description: "Hello {{.EnvName}}",
TargetURL: "https://{{.EnvName}}-google.com",
},
inputData: NotificationData{
EnvName: "world",
},
expectedOutput: &RenderedCommitStatus{
Description: "Hello world",
TargetURL: "https://world-google.com",
},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
out, err := c.inputTemplate.Render(c.inputData)
if err != nil {
t.Errorf("Didn't expect error: %v", err)
}
if diff := cmp.Diff(out, c.expectedOutput); diff != "" {
t.Errorf("Render() mismatch (-expected +got):\n%s", diff)
}
})
}
}
|
// Package blockingreader implements an io.Reader that can block the first
// read for an arbitrary amount of time.
package blockingreader
import (
"context"
"errors"
"io"
"math/rand"
"sync"
"time"
)
// BlockingReader is a reader that blocks on read calls until a receive
// completes from a given channel.
type BlockingReader struct {
once sync.Once
cancelErr error
ctx context.Context
cancel context.CancelFunc
mu sync.Mutex
r io.Reader
}
func init() {
rand.Seed(time.Now().UnixNano())
}
// Read reads from the underlying reader once a receive completes from the
// channel.
func (br *BlockingReader) Read(p []byte) (int, error) {
// Only block the first time.
br.once.Do(func() {
br.mu.Lock()
defer br.mu.Unlock()
<-br.ctx.Done()
if br.ctx.Err() == context.Canceled {
br.cancelErr = errCancelled
}
})
br.mu.Lock()
defer br.mu.Unlock()
if br.cancelErr != nil {
return 0, br.cancelErr
}
return br.r.Read(p)
}
var errCancelled = errors.New("cancelled")
// Cancel stops the BlockingReader early and makes all reads return.
func (br *BlockingReader) Cancel() {
br.cancel()
}
// NewBlockingReader creates a new BlockingReader.
func NewBlockingReader(allowAfter time.Duration, r io.Reader) *BlockingReader {
ctx, cancelFunc := context.WithTimeout(context.Background(), allowAfter)
return &BlockingReader{
ctx: ctx,
r: r,
cancel: cancelFunc,
}
}
|
package 深度优先搜索
import "sort"
func makesquare(nums []int) bool {
sum := 0
for i := 0; i < len(nums); i++ {
sum += nums[i]
}
if sum%4 != 0 || sum == 0 {
return false
}
// 因为是形成4根,所以下面要传入 []int{0,0,0,0}
return makeSequareExec(nums, []int{0, 0, 0, 0})
}
// nums: 表示原数组 (DFS过程中不变)
// lengthSet: 表示正方形边的情况 (DFS过程中会改变)
// 这个代码会超时
func makeSequareExec(nums []int, lengthSet []int) bool {
if len(nums) == 0 {
for i := 1; i < len(lengthSet); i++ {
if lengthSet[i] != lengthSet[i-1] {
return false
}
}
return true
}
ans := false
for i := 0; i < len(lengthSet); i++ {
lengthSet[i] += nums[len(nums)-1]
// 这其实还可以进行剪枝的,比如比较此时lengthSet[i]的长度是否大于了要形成的正方形的边长,大于就返回false
// 但是这样的话,就要传入一个 要形成的正方形的边长 参数了。
// 下面的函数就实现了这一步剪枝
ans = ans || makeSequareExec(nums[:len(nums)-1], lengthSet)
lengthSet[i] -= nums[len(nums)-1]
}
return ans
}
// ---------------------------------------------------- 以下是剪枝的代码 ------------------------------
// 以下是剪枝的代码
func makesquare(nums []int) bool {
sum := 0
for i := 0; i < len(nums); i++ {
sum += nums[i]
}
if sum%4 != 0 || sum == 0 {
return false
}
sort.Ints(nums) // 这一句能极大的降低时间花费
return makeSequareExec(nums, sum/4, []int{0, 0, 0, 0})
}
// 参数添加了targetLength字段,表示要组成的正方形的边长
func makeSequareExec(nums []int, targetLength int, lengthSet []int) bool {
if len(nums) == 0 {
for i := 1; i < len(lengthSet); i++ {
if lengthSet[i] != lengthSet[i-1] {
return false
}
}
return true
}
ans := false
for i := 0; i < len(lengthSet); i++ {
// 剪枝
if lengthSet[i]+nums[len(nums)-1] > targetLength {
continue
}
lengthSet[i] += nums[len(nums)-1]
ans = ans || makeSequareExec(nums[:len(nums)-1], targetLength, lengthSet)
lengthSet[i] -= nums[len(nums)-1]
}
return ans
}
/*
总结
1. 这个递归代码更加简洁,而且也没使用到外部变量,我觉得很好。 (这是借鉴了大佬的代码后自己写出来的)
2. 使用这个解法速度不太宽,但是内存消耗少了一些。 时空花费: 368 ms 2.1 MB
在添加了sort.Ints(nums)函数后,时间花费少了很多。 时空花费: 20 ms 2.1 MB
*/
|
package routes
import (
"controller"
"reflect"
"regexp"
)
type Route struct {
Regex *regexp.Regexp
Methods map[string]string
Params map[int]string
ControllerType reflect.Type
}
type app interface {
AddRoute(pattern string, m map[string]string, c controller.ControllerInterface)
}
func AddRoutes(a app) {
a.AddRoute("/", map[string]string{
"GET": "Index",
}, &controller.PostController{})
a.AddRoute("/posts/:post_id([0-9]+)", map[string]string{
"GET": "Show",
}, &controller.PostController{})
}
|
package crawler
import (
"github.com/PuerkitoBio/goquery"
"fmt"
"log"
"regexp"
"strconv"
)
type NovelContentType int
type Novel struct {
Tcode string `json:"tcode"`
ContentList []NovelContent `json:"content_list"`
}
const ( // NovelContent Type
Chapter NovelContentType = iota
Sublist
)
type NovelContent struct {
Ctype NovelContentType `json:"ctype"`
Text string `json:"text"`
SublistId int `json:"sublist_id"`
Content string `json:"content"`
}
func GetNovel(ncode string) (Novel, error) {
doc, err := goquery.NewDocument(fmt.Sprintf("http://ncode.syosetu.com/%s/", ncode));
if err != nil {
log.Fatal(err);
}
n := Novel{}
doc.Find("#novel_footer ul li").Each( func(i int, s *goquery.Selection) {
if s.Find("a").Text() == "TXTダウンロード" {
href,_ := s.Find("a").Attr("href")
re, _ := regexp.Compile("[0-9]{6}")
tcode := string(re.Find([]byte(href)))
n.Tcode = tcode
} else {
}
})
doc.Find("div.index_box").Children().Each(func(i int, s *goquery.Selection) {
if s.HasClass("chapter_title") {
c := NovelContent{}
c.Ctype = Chapter
c.Text = s.Text()
n.ContentList = append(n.ContentList, c)
}
if s.HasClass("novel_sublist2") {
subtitle := s.Find(".novel_sublist2 dd.subtitle")
url,_ := s.Find(".novel_sublist2 a").Attr("href")
re, _ := regexp.Compile("/([0-9]+)/")
sublist_id,_ := strconv.Atoi(re.FindStringSubmatch(url)[1])
c := NovelContent{}
c.Ctype = Sublist
c.Text = subtitle.Text()
c.SublistId = sublist_id
n.ContentList = append(n.ContentList, c)
}
});
return n, err
}
func GetNovelContent(ncode string ,chapter_id int) NovelContent {
url := fmt.Sprintf("http://ncode.syosetu.com/%s/%d", ncode, chapter_id)
doc, err := goquery.NewDocument(url)
log.Println("get content " + url)
if err != nil {
log.Fatal(err);
}
content_title := doc.Find(".novel_subtitle").Text()
content := doc.Find("#novel_honbun").Text()
c := NovelContent{}
c.Text = content_title
c.Content = content
return c
}
|
// Cobra commandline console driver
package main
import (
"fmt"
"log"
"os"
"github.com/grrtrr/clcv2/examples/clconsole/cmd"
"github.com/spf13/cobra"
)
func main() {
// Logging format - we don't need date/file
log.SetFlags(log.Ltime)
// Do sort the commands alphabetically
cobra.EnableCommandSorting = true
defer cmd.ExitHandler()
if err := cmd.Root.Execute(); err != nil {
fmt.Fprintln(os.Stderr, err.Error())
}
}
|
package tls
import (
"crypto/x509"
"crypto/x509/pkix"
"github.com/openshift/installer/pkg/asset"
)
// RootCA contains the private key and the cert that acts as a certificate
// authority, which is in turn really only used to generate a certificate
// for the Machine Config Server. More in
// https://docs.openshift.com/container-platform/4.13/security/certificate_types_descriptions/machine-config-operator-certificates.html
// and
// https://github.com/openshift/api/tree/master/tls/docs/MachineConfig%20Operator%20Certificates
// This logic dates back to the very creation of OpenShift 4 and the initial code for this project.
// The private key is (as best we know) completely discarded after an installation is complete.
type RootCA struct {
SelfSignedCertKey
}
var _ asset.WritableAsset = (*RootCA)(nil)
// Dependencies returns nothing.
func (c *RootCA) Dependencies() []asset.Asset {
return []asset.Asset{}
}
// Generate generates the MCS/Ignition CA.
func (c *RootCA) Generate(parents asset.Parents) error {
cfg := &CertCfg{
Subject: pkix.Name{CommonName: "root-ca", OrganizationalUnit: []string{"openshift"}},
KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
Validity: ValidityTenYears,
IsCA: true,
}
return c.SelfSignedCertKey.Generate(cfg, "root-ca")
}
// Name returns the human-friendly name of the asset.
func (c *RootCA) Name() string {
return "Machine Config Server Root CA"
}
|
/*
Tencent is pleased to support the open source community by making Basic Service Configuration Platform available.
Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except
in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under
the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied. See the License for the specific language governing permissions and
limitations under the License.
*/
package search
import (
"fmt"
"strings"
"gorm.io/gen/field"
"bscp.io/pkg/dal/gen"
)
// TableName is table name which support search.
type TableName string
const (
// TemplateSpace is template space table
TemplateSpace TableName = "template_spaces"
// Template is template table
Template TableName = "templates"
// TemplateRevision is template revision table
TemplateRevision TableName = "template_revisions"
// TemplateSet is template set table
TemplateSet TableName = "template_sets"
// TemplateVariable is template space table
TemplateVariable TableName = "template_variables"
// ReleasedAppTemplate is released app template table
ReleasedAppTemplate TableName = "released_app_templates"
)
// supportedFields is supported search fields of tables
var supportedFields = map[TableName][]string{
TemplateSpace: {"name", "memo", "creator", "reviser"},
Template: {"name", "path", "memo", "creator", "reviser"},
TemplateRevision: {"revision_name", "revision_memo", "name", "path", "creator"},
TemplateSet: {"name", "memo", "creator", "reviser"},
TemplateVariable: {"name", "memo", "creator", "reviser"},
ReleasedAppTemplate: {"revision_name", "revision_memo", "name", "path", "creator"},
}
// supportedFieldsMap is supported search fields map of tables
var supportedFieldsMap = map[TableName]map[string]struct{}{
TemplateSpace: {"name": {}, "memo": {}, "creator": {}, "reviser": {}},
Template: {"name": {}, "path": {}, "memo": {}, "creator": {}, "reviser": {}},
TemplateRevision: {"revision_name": {}, "revision_memo": {}, "name": {}, "path": {}, "creator": {}},
TemplateSet: {"name": {}, "memo": {}, "creator": {}, "reviser": {}},
TemplateVariable: {"name": {}, "memo": {}, "creator": {}, "reviser": {}},
ReleasedAppTemplate: {"revision_name": {}, "revision_memo": {}, "name": {}, "path": {}, "creator": {}},
}
// defaultFields is default search fields when field is not specified
var defaultFields = map[TableName][]string{
TemplateSpace: {"name"},
Template: {"name"},
TemplateRevision: {"revision_name"},
TemplateSet: {"name"},
TemplateVariable: {"name"},
ReleasedAppTemplate: {"revision_name"},
}
// getGenFieldsMap get the map for `table column name` => `gorm/gen field object`
func getGenFieldsMap(q *gen.Query) map[TableName]map[string]field.String {
return map[TableName]map[string]field.String{
TemplateSpace: {
"name": q.TemplateSpace.Name,
"memo": q.TemplateSpace.Memo,
"creator": q.TemplateSpace.Creator,
"reviser": q.TemplateSpace.Reviser,
},
Template: {
"name": q.Template.Name,
"path": q.Template.Path,
"memo": q.Template.Memo,
"creator": q.Template.Creator,
"reviser": q.Template.Reviser,
},
TemplateRevision: {
"revision_name": q.TemplateRevision.RevisionName,
"revision_memo": q.TemplateRevision.RevisionMemo,
"name": q.TemplateRevision.Name,
"path": q.TemplateRevision.Path,
"creator": q.TemplateRevision.Creator,
},
TemplateSet: {
"name": q.TemplateSet.Name,
"memo": q.TemplateSet.Memo,
"creator": q.TemplateSet.Creator,
"reviser": q.TemplateSet.Reviser,
},
TemplateVariable: {
"name": q.TemplateVariable.Name,
"memo": q.TemplateVariable.Memo,
"creator": q.TemplateVariable.Creator,
"reviser": q.TemplateVariable.Reviser,
},
ReleasedAppTemplate: {
"revision_name": q.ReleasedAppTemplate.TemplateRevisionName,
"revision_memo": q.ReleasedAppTemplate.TemplateRevisionMemo,
"name": q.ReleasedAppTemplate.Name,
"path": q.ReleasedAppTemplate.Path,
"creator": q.ReleasedAppTemplate.Creator,
},
}
}
// Searcher is the interface for search
type Searcher interface {
SearchExprs(q *gen.Query) []field.Expr
SearchFields() []string
}
// searcher implements the Searcher interface
type searcher struct {
fields []string
value string
tableName TableName
supportedFields []string
}
// NewSearcher new a Searcher
func NewSearcher(fieldsStr string, value string, table TableName) (Searcher, error) {
fields := make([]string, 0)
if fieldsStr != "" {
fields = strings.Split(fieldsStr, ",")
}
// validate the fields
supported := supportedFieldsMap[table]
var badFields []string
for _, field := range fields {
if _, ok := supported[field]; !ok {
badFields = append(badFields, field)
}
}
if len(badFields) > 0 {
return nil, fmt.Errorf("not support field in %v, supported fields is %v", badFields, supportedFields[table])
}
return &searcher{
fields: fields,
value: value,
tableName: table,
}, nil
}
// SearchExprs implements the interface method
func (s *searcher) SearchExprs(q *gen.Query) []field.Expr {
// if search value is not set, no need to search
if s.value == "" {
return []field.Expr{}
}
exprs := make([]field.Expr, 0)
// if search fields is not specified, use the default search fields
if len(s.fields) == 0 {
for _, f := range defaultFields[s.tableName] {
exprs = append(exprs, getGenFieldsMap(q)[s.tableName][f].Regexp("(?i)"+s.value))
}
return exprs
}
for _, f := range s.fields {
exprs = append(exprs, getGenFieldsMap(q)[s.tableName][f].Regexp("(?i)"+s.value))
}
return exprs
}
// SearchFields implements the interface method
func (s *searcher) SearchFields() []string {
// if search value is not set, no need to search
if s.value == "" {
return []string{}
}
// if search fields is not specified, use the default search fields
if len(s.fields) == 0 {
return defaultFields[s.tableName]
}
return s.fields
}
|
package web
import (
"fmt"
"log"
"math/rand"
"os"
"os/exec"
"time"
)
func failOnError(err error, msg string) {
if err != nil {
log.Fatalf("%s: %s", msg, err)
}
}
const letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
func RandString(n uint8) string {
buf := make([]byte, n)
for i := range buf {
buf[i] = letters[rand.Intn(len(letters))]
}
return string(buf)
}
func CreateTmpScript() (*os.File, error) {
scriptName := fmt.Sprintf("tmp_script_%s.sh", RandString(9))
file, err := os.Create(scriptName)
if err != nil {
return nil, err
}
return file, nil
}
func main() {
rand.Seed(time.Now().UnixNano())
script := fmt.Sprintf("tmp_script_%s.sh", RandString(9))
file, err := os.Create(script)
defer file.Close()
defer os.Remove(script)
failOnError(err, "")
file.WriteString("cat<<EOF>a.txt\nhello world\nEOF\n")
cmd := exec.Command("sh", script)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
e := cmd.Run()
failOnError(e, "")
}
|
package main
import "fmt"
//接口
type Usb interface {
//声明两个没有实现的接口的方法
Start()
Stop()
//Test方法,Phone,Camer都没有实现,如果再用Phone去调用接口的方法,将会报错
//Test()
}
type Phone struct {
}
//结构体Phone实现Usb接口的方法,Phone感觉不到实现了Usb接口,所有不存在显式实现
func (p Phone) Start() {
fmt.Println("Phone start.......")
}
func (p Phone) Stop() {
fmt.Println("Phone stop.......")
}
type Camer struct {
}
//结构体Camer实现Usb接口的方法
func (c Camer) Start() {
fmt.Println("Camer start.......")
}
func (c Camer) Stop() {
fmt.Println("Camer stop.......")
}
type Computer struct {
}
//Working接受一个usb接口类型的变量,只要实现了usb接口(实现了usb接口等于实现了usb接口声明的所有方法)
func (cop Computer) Working(usb Usb) { //usb变量会根据传入的实参来判断传入的是phone还是camer
//通过usb接口变量,调用start和stop方法
usb.Start()
usb.Stop()
}
func main() {
//创建结构体变量
var cop Computer
var camer Camer
var phone Phone
//调用,这里可以传进去,是因为phone和camer的结构体实现了usb接口
cop.Working(phone)
cop.Working(camer)
}
|
package eod
import (
_ "embed"
"strings"
"sync"
eodb "github.com/Nv7-Github/Nv7Haven/db"
"github.com/Nv7-Github/Nv7Haven/eod/base"
"github.com/Nv7-Github/Nv7Haven/eod/basecmds"
"github.com/Nv7-Github/Nv7Haven/eod/categories"
"github.com/Nv7-Github/Nv7Haven/eod/elements"
"github.com/Nv7-Github/Nv7Haven/eod/polls"
"github.com/Nv7-Github/Nv7Haven/eod/treecmds"
"github.com/Nv7-Github/Nv7Haven/eod/types"
"github.com/bwmarrin/discordgo"
)
const (
clientID = "819076922867712031"
status = "Use /help to view the bot's commands!"
)
//go:embed token.txt
var token string
var bot EoD
var lock = &sync.RWMutex{}
// EoD contains the data for an EoD bot
type EoD struct {
dg *discordgo.Session
db *eodb.DB
dat map[string]types.ServerData // map[guild]data
// Subsystems
base *base.Base
treecmds *treecmds.TreeCmds
polls *polls.Polls
basecmds *basecmds.BaseCmds
categories *categories.Categories
elements *elements.Elements
}
// InitEoD initializes the EoD bot
func InitEoD(db *eodb.DB) EoD {
// Discord bot
dg, err := discordgo.New("Bot " + strings.TrimSpace(token))
if err != nil {
panic(err)
}
dg.Identify.Intents = discordgo.MakeIntent(discordgo.IntentsGuildMessages | discordgo.IntentsGuildMessageReactions | discordgo.IntentsGuildMembers | discordgo.IntentsGuilds)
err = dg.Open()
if err != nil {
panic(err)
}
bot = EoD{
dg: dg,
db: db,
dat: make(map[string]types.ServerData),
}
dg.UpdateGameStatus(0, status)
bot.init()
// FOOLS
bot.base.InitFools(foolsRaw)
if base.IsFoolsMode {
types.MaxComboLength = 2
}
return bot
}
// Close cleans up
func (b *EoD) Close() {
b.db.Close()
b.dg.Close()
}
|
package graph
// 图的连通分量
// 引用interface{},充当泛型
type I interface {
VersNum() int
EdgeNum() int
AddEdge(v1, v2 int)
//hasEdge(v1, v2 int) bool
AdjVertexs(v int) (slice []int)
}
type Component struct {
graph I // 图
visited []bool // 节点是否被访问过
ccount int // 连通分量个数
id []int // 两节点相连:id相同
}
func NewComponent(graph I) *Component {
g := new(Component)
vers := graph.VersNum()
g.graph = graph
g.visited = make([]bool, vers)
g.id = make([]int, vers)
for i, _ := range g.id {
g.id[i] = -1
}
g.ccount = 0
for i := 0; i < vers; i++ {
if !g.visited[i] {
// 未被遍历过
g.dfs(i)
g.ccount++
}
}
return g
}
// 将和i相连接的节点遍历,没遍历的节点在另外的连接分量中
func (c *Component) dfs(i int) {
c.visited[i] = true
c.id[i] = c.ccount // 遍历相同的点,ccount相同 0 1 2 ..
for _, v := range c.graph.AdjVertexs(i) {
if !c.visited[v] {
c.dfs(v)
}
}
}
func (c Component) Count() int {
return c.ccount
}
func (c Component) IsConnected(m, n int) bool {
if (m >= 0 && m < c.graph.VersNum()) && (n >= 0 && m < c.graph.VersNum()) {
return c.id[m] == c.id[n]
}
return false
}
|
package main
import "fmt"
const (
_ = iota
alcool
gasolina
diesel
fim
)
var nomes = []string{" ", "Alcool", "Gasolina", "Diesel"}
func main() {
var escolha int
var qntd [4]int
for {
fmt.Scanf("%d", &escolha)
if escolha < 1 || escolha > 4 {
continue
} else if escolha == fim {
break
}
switch escolha {
case alcool:
qntd[alcool]++
case gasolina:
qntd[gasolina]++
case diesel:
qntd[diesel]++
}
}
// Apresenta resultados
fmt.Println("MUITO OBRIGADO")
for i := 1; i < 4; i++ {
fmt.Printf("%s: %d\n", nomes[i], qntd[i])
}
}
|
/**
* @Author: DollarKiller
* @Description:
* @Github: https://github.com/dollarkillerx
* @Date: Create in 23:05 2019-09-17
*/
package config
import (
"gopkg.in/yaml.v2"
"io/ioutil"
)
type basisConf struct {
App struct {
Corn string `yaml:"corn"`
Email string `yaml:"email"`
}
Mysql struct {
Dsn string `yaml:"dsn"`
Cache bool `yaml:"cache"`
}
}
var (
Basis *basisConf
)
func init() {
Basis = &basisConf{}
bytes, e := ioutil.ReadFile("./config.yml")
if e != nil {
panic(e.Error())
}
e = yaml.Unmarshal(bytes, Basis)
if e != nil {
panic(e.Error())
}
}
|
package main
import (
"fmt"
"io"
"log"
"net"
"os"
"os/exec"
"os/signal"
"syscall"
"github.com/creack/pty"
)
func handleConn(conn net.Conn) {
ptmx, tty, _ := pty.Open()
// Handle pty size. 否则类似于 htop 的命令,图像界面就无法正常显示
ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGWINCH)
go func() {
for range ch {
if err := pty.InheritSize(os.Stdin, ptmx); err != nil {
log.Printf("error resizing pty: %s", err)
}
}
}()
ch <- syscall.SIGWINCH // Initial resize.
go func() {
c := exec.Command("/bin/bash")
c.SysProcAttr = &syscall.SysProcAttr{}
c.SysProcAttr.Setsid = true
c.SysProcAttr.Setctty = true
c.Stdin = tty
c.Stdout = tty
c.Stderr = tty
_ = c.Start()
_ = c.Wait()
_ = ptmx.Close()
signal.Stop(ch)
close(ch)
conn.Close()
}()
go func() {
_, _ = io.Copy(ptmx, conn)
}()
_, _ = io.Copy(conn, ptmx)
}
func main() {
listen, err := net.Listen("tcp", "0.0.0.0:30000")
if err != nil {
fmt.Println("listen failed, err:", err)
return
}
defer listen.Close()
conn, err := listen.Accept()
if err != nil {
fmt.Println("accept failed, err:", err)
return
}
handleConn(conn)
}
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Code generated from the elasticsearch-specification DO NOT EDIT.
// https://github.com/elastic/elasticsearch-specification/tree/33e8a1c9cad22a5946ac735c4fba31af2da2cec2
package types
import (
"bytes"
"encoding/json"
"errors"
"io"
)
// ComponentTemplateSummary type.
//
// https://github.com/elastic/elasticsearch-specification/blob/33e8a1c9cad22a5946ac735c4fba31af2da2cec2/specification/cluster/_types/ComponentTemplate.ts#L42-L54
type ComponentTemplateSummary struct {
Aliases map[string]AliasDefinition `json:"aliases,omitempty"`
Lifecycle *DataLifecycleWithRollover `json:"lifecycle,omitempty"`
Mappings *TypeMapping `json:"mappings,omitempty"`
Meta_ Metadata `json:"_meta,omitempty"`
Settings map[string]IndexSettings `json:"settings,omitempty"`
Version *int64 `json:"version,omitempty"`
}
func (s *ComponentTemplateSummary) UnmarshalJSON(data []byte) error {
dec := json.NewDecoder(bytes.NewReader(data))
for {
t, err := dec.Token()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return err
}
switch t {
case "aliases":
if s.Aliases == nil {
s.Aliases = make(map[string]AliasDefinition, 0)
}
if err := dec.Decode(&s.Aliases); err != nil {
return err
}
case "lifecycle":
if err := dec.Decode(&s.Lifecycle); err != nil {
return err
}
case "mappings":
if err := dec.Decode(&s.Mappings); err != nil {
return err
}
case "_meta":
if err := dec.Decode(&s.Meta_); err != nil {
return err
}
case "settings":
if s.Settings == nil {
s.Settings = make(map[string]IndexSettings, 0)
}
if err := dec.Decode(&s.Settings); err != nil {
return err
}
case "version":
if err := dec.Decode(&s.Version); err != nil {
return err
}
}
}
return nil
}
// NewComponentTemplateSummary returns a ComponentTemplateSummary.
func NewComponentTemplateSummary() *ComponentTemplateSummary {
r := &ComponentTemplateSummary{
Aliases: make(map[string]AliasDefinition, 0),
Settings: make(map[string]IndexSettings, 0),
}
return r
}
|
// Note: the example only works with the code within the same release/branch.
package others
import (
"context"
"flag"
"fmt"
"k8s.io/apimachinery/pkg/fields"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
"os"
"path/filepath"
"time"
)
func main() {
var (
clientset *kubernetes.Clientset
)
kubeconfig := flag.String("kubeconfig", filepath.Join(os.Getenv("HOME"), ".kube", "config"), "(optional) absolute path to the kubeconfig file")
flag.Parse()
// use the current context in kubeconfig
config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
if err != nil {
panic(err.Error())
}
if clientset, err = kubernetes.NewForConfig(config); err != nil {
panic(err.Error())
}
useInformer(clientset)
//useNewListWatchFromClient(clientset)
}
func useNewListWatchFromClient(clientset *kubernetes.Clientset) {
var (
pod v1.Pod
)
podList, err := clientset.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{})
if err != nil {
panic(err.Error())
}
fmt.Printf("There are %d pods in the cluster\n", len(podList.Items))
for _, pod = range podList.Items {
fmt.Printf("Pod %d\n", pod.Status.Phase)
}
watchlist := cache.NewListWatchFromClient(clientset.CoreV1().RESTClient(), "pods", v1.NamespaceDefault,
fields.Everything())
_, controller := cache.NewInformer(
watchlist,
&v1.Pod{},
time.Second*0,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
fmt.Printf("add: %s \n", obj)
},
DeleteFunc: func(obj interface{}) {
fmt.Printf("delete: %s \n", obj)
},
UpdateFunc: func(oldObj, newObj interface{}) {
fmt.Printf("old: %s, new: %s \n", oldObj, newObj)
},
},
)
stop := make(chan struct{})
go controller.Run(stop)
for {
time.Sleep(time.Second)
}
}
/*
Preferred way to use Informer instead of watcher
*/
func useInformer(clientset *kubernetes.Clientset) {
informerFactory := informers.NewSharedInformerFactory(clientset, time.Second*30)
podInformer := informerFactory.Core().V1().Pods()
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
fmt.Printf("add: %s \n", obj)
},
DeleteFunc: func(obj interface{}) {
fmt.Printf("delete: %s \n", obj)
},
UpdateFunc: func(oldObj, newObj interface{}) {
fmt.Printf("old: %s, new: %s \n", oldObj, newObj)
},
})
informerFactory.Start(wait.NeverStop)
informerFactory.WaitForCacheSync(wait.NeverStop)
pod, _ := podInformer.Lister().Pods(v1.NamespaceDefault).Get("kubia-hbcbk")
fmt.Println(pod.Name)
}
|
func countPrimes(n int) int {
return sol1(n)
}
// time: O(?), space: O(n)
func sol1(n int) int {
if n < 2 {
return 0
}
nums := make([]bool, n)
nums[0] = true
nums[1] = true
for i := 2; i*i < n; i++ {
if nums[i] == true {
continue
}
for j := i*i; j < n; j += i {
nums[j] = true
}
}
res := 0
for _, x := range nums {
if x == false {
res++
}
}
return res
}
|
package main
import (
"github.com/lnhote/hello-thrift/gen-go/bill"
"git.apache.org/thrift.git/lib/go/thrift"
"log"
"fmt"
"os"
"context"
)
const (
NetworkAddr = "127.0.0.1:9090"
)
type BillImpl struct {
}
func (b *BillImpl) GetBillList(ctx context.Context, userID string) ([]*bill.BillInfo, error) {
bills := []*bill.BillInfo{{"1", "201803", 100, userID}, {"2", "201804", 200, userID}}
log.Printf("GetBillList(%s)", userID)
return bills, nil
}
func main() {
transportFactory := thrift.NewTFramedTransportFactory(thrift.NewTTransportFactory())
protocolFactory := thrift.NewTBinaryProtocolFactoryDefault()
serverTransport, err := thrift.NewTServerSocket(NetworkAddr)
if err != nil {
fmt.Println("Error!", err)
os.Exit(1)
}
handler := &BillImpl{}
processor := bill.NewBillServiceProcessor(handler)
server := thrift.NewTSimpleServer4(processor, serverTransport, transportFactory, protocolFactory)
fmt.Println("thrift server in", NetworkAddr)
server.Serve()
} |
package spa
import (
"errors"
"os"
)
type Config struct {
SPADirectory string
NPMScript string
}
var (
defaultScript = "start"
)
func (c *Config) validate() error {
if _, err := os.Stat(c.SPADirectory); os.IsNotExist(err) {
return errors.New("spa directory does not exist")
}
return nil
}
func newConfig(config *Config) (*Config, error) {
spaConfig := &Config{NPMScript: defaultScript, SPADirectory: config.SPADirectory}
if config.NPMScript != "" {
spaConfig.NPMScript = config.NPMScript
}
err := spaConfig.validate()
if err != nil {
return nil, err
}
return spaConfig, nil
}
|
//go:build go1.21
// +build go1.21
package log
import (
"context"
runtimeext "github.com/go-playground/pkg/v5/runtime"
"log/slog"
"runtime"
)
var _ slog.Handler = (*slogHandler)(nil)
type slogHandler struct {
// List of Groups, each subsequent group belongs to the previous group, except the first
// which are the top level fields fields before any grouping.
groups []Field
}
// Enabled returns if the current logging level is enabled. In the case of this log package in this Level has a
// handler registered.
func (s *slogHandler) Enabled(_ context.Context, level slog.Level) bool {
rw.RLock()
_, enabled := logHandlers[convertSlogLevel(level)]
rw.RUnlock()
return enabled
}
func (s *slogHandler) Handle(ctx context.Context, record slog.Record) error {
var current Field
if len(s.groups) == 0 {
current = G("")
} else {
group := s.groups[len(s.groups)-1]
last := group.Value.([]Field)
fields := make([]Field, len(last), len(last)+record.NumAttrs()+1)
copy(fields, last)
current = F(group.Key, fields)
}
if record.NumAttrs() > 0 {
record.Attrs(func(attr slog.Attr) bool {
current.Value = s.convertAttrToField(current.Value.([]Field), attr)
return true
})
}
if record.Level >= slog.LevelError && record.PC != 0 {
fs := runtime.CallersFrames([]uintptr{record.PC})
f, _ := fs.Next()
sourceBuff := BytePool().Get()
sourceBuff.B = extractSource(sourceBuff.B, runtimeext.Frame{Frame: f})
current.Value = append(current.Value.([]Field), F(slog.SourceKey, string(sourceBuff.B[:len(sourceBuff.B)-1])))
BytePool().Put(sourceBuff)
}
for i := len(s.groups) - 2; i >= 0; i-- {
group := s.groups[i]
gf := group.Value.([]Field)
copied := make([]Field, len(gf), len(gf)+1)
copy(copied, gf)
current = G(group.Key, append(copied, current)...)
}
var e Entry
if current.Key == "" {
e = Entry{Fields: current.Value.([]Field)}
} else {
e = Entry{Fields: []Field{current}}
}
e.Message = record.Message
e.Level = convertSlogLevel(record.Level)
e.Timestamp = record.Time
HandleEntry(e)
return nil
}
func (s *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
var groups []Field
if len(s.groups) == 0 {
groups = []Field{G("", s.convertAttrsToFields(nil, attrs)...)}
} else {
groups = make([]Field, len(s.groups))
copy(groups, s.groups)
l := len(groups) - 1
current := groups[l]
currentFields := current.Value.([]Field)
copiedFields := make([]Field, len(currentFields), len(currentFields)+len(attrs))
copy(copiedFields, currentFields)
groups[l].Value = s.convertAttrsToFields(copiedFields, attrs)
}
return &slogHandler{
groups: groups,
}
}
func (s *slogHandler) convertAttrsToFields(fields []Field, attrs []slog.Attr) []Field {
for _, attr := range attrs {
if attr.Key == "" {
continue
}
if attr.Key == slog.TimeKey && attr.Value.Time().IsZero() {
continue
}
fields = s.convertAttrToField(fields, attr)
}
return fields
}
func (s *slogHandler) convertAttrToField(fields []Field, attr slog.Attr) []Field {
var value any
switch attr.Value.Kind() {
case slog.KindLogValuer:
return s.convertAttrToField(fields, slog.Attr{Key: attr.Key, Value: attr.Value.LogValuer().LogValue()})
case slog.KindGroup:
attrs := attr.Value.Group()
groupedFields := make([]Field, 0, len(attrs))
value = s.convertAttrsToFields(groupedFields, attrs)
default:
value = attr.Value.Any()
}
return append(fields, F(attr.Key, value))
}
func (s *slogHandler) WithGroup(name string) slog.Handler {
groups := make([]Field, len(s.groups), len(s.groups)+1)
copy(groups, s.groups)
return &slogHandler{
groups: append(groups, G(name)),
}
}
func convertSlogLevel(level slog.Level) Level {
switch level {
case slog.LevelDebug:
return DebugLevel
case slog.LevelInfo:
return InfoLevel
case SlogNoticeLevel:
return NoticeLevel
case slog.LevelWarn:
return WarnLevel
case slog.LevelError:
return ErrorLevel
case SlogPanicLevel:
return PanicLevel
case SlogAlertLevel:
return AlertLevel
case SlogFatalLevel:
return FatalLevel
default:
switch {
case level > slog.LevelInfo && level < slog.LevelWarn:
return NoticeLevel
case level > slog.LevelError && level <= SlogPanicLevel:
return PanicLevel
case level > SlogPanicLevel && level <= SlogAlertLevel:
return AlertLevel
case level > SlogAlertLevel && level <= SlogFatalLevel:
return FatalLevel
}
return InfoLevel
}
}
var (
prevSlogLogger *slog.Logger
)
// RedirectGoStdLog is used to redirect Go's internal std log output to this logger AND registers a handler for slog
// that redirects slog output to this logger.
//
// If you intend to use this log interface with another slog handler then you should not use this function and instead
// register a handler with slog directly and register the slog redirect, found under the handlers package or other
// custom redirect handler with this logger.
func RedirectGoStdLog(redirect bool) {
if redirect {
prevSlogLogger = slog.Default()
slog.SetDefault(slog.New(&slogHandler{}))
} else if prevSlogLogger != nil {
slog.SetDefault(prevSlogLogger)
prevSlogLogger = nil
}
}
// slog log levels.
const (
SlogDebugLevel slog.Level = slog.LevelDebug
SlogInfoLevel slog.Level = slog.LevelInfo
SlogWarnLevel slog.Level = slog.LevelWarn
SlogErrorLevel slog.Level = slog.LevelError
SlogNoticeLevel slog.Level = slog.LevelInfo + 2
SlogPanicLevel slog.Level = slog.LevelError + 4
SlogAlertLevel slog.Level = SlogPanicLevel + 4
SlogFatalLevel slog.Level = SlogAlertLevel + 4 // same as syslog CRITICAL
)
|
package state
import "errors"
// Process switches on operation type
// Then does work
func Process(wr *WorkRequest) *WorkResponse {
resp := WorkResponse{Wr: wr}
switch wr.Operation {
case Add:
resp.Result = wr.Value1 + wr.Value2
case Subtract:
resp.Result = wr.Value1 - wr.Value2
case Multiply:
resp.Result = wr.Value1 * wr.Value2
case Divide:
if wr.Value2 == 0 {
resp.Err = errors.New("divide by 0")
break
}
resp.Result = wr.Value1 / wr.Value2
default:
resp.Err = errors.New("unsupported operation")
}
return &resp
}
|
package mockgen
import (
"fmt"
"testing"
gomock "github.com/golang/mock/gomock"
mock_mockgen "github.com/mitooos/thesis/mockgen/mocks"
"github.com/mitooos/thesis/mockgen/model"
)
func TestInsertUser(t *testing.T) {
t.Run("inserts user succesfully", func(t *testing.T) {
usr := &model.User{
Email: "email@email.com",
Username: "mock-usrname",
Password: "pwd",
}
ctrl := gomock.NewController(t)
mockSecurityHelper := mock_mockgen.NewMockSecurityHelper(ctrl)
mockSecurityHelper.EXPECT().HashPassword(usr.Password).Return("hashed", nil)
mockRepository := mock_mockgen.NewMockUserRepository(ctrl)
mockRepository.EXPECT().InsertUser(usr).Return(nil)
service := new(service)
service.securtiyHelper = mockSecurityHelper
service.repository = mockRepository
got := service.InsertUser(usr)
if got != nil {
t.Errorf("got not nil reponse: %v", got)
}
if "hashed" != usr.Password {
t.Errorf("got %v, want %v", usr.Password, "hashed")
}
})
t.Run("returns error if hashing password fail", func(t *testing.T) {
usr := &model.User{
Email: "email@email.com",
Username: "mock-usrname",
Password: "pwd",
}
ctrl := gomock.NewController(t)
mockSecurityHelper := mock_mockgen.NewMockSecurityHelper(ctrl)
mockSecurityHelper.EXPECT().HashPassword(usr.Password).Return("hashed", nil)
mockRepository := mock_mockgen.NewMockUserRepository(ctrl)
mockRepository.EXPECT().InsertUser(usr).Return(nil)
service := new(service)
service.securtiyHelper = mockSecurityHelper
service.repository = mockRepository
got := service.InsertUser(usr)
if got != nil && got.Error() != "error hashing" {
t.Errorf("got %v, want %v", got.Error(), "error hashing")
}
})
t.Run("returns an error if InsertUser from repository returns one", func(t *testing.T) {
usr := &model.User{
Email: "email@email.com",
Username: "mock-usrname",
Password: "pwd",
}
ctrl := gomock.NewController(t)
mockSecurityHelper := mock_mockgen.NewMockSecurityHelper(ctrl)
mockSecurityHelper.EXPECT().HashPassword(usr.Password).Return("hashed", nil)
mockRepository := mock_mockgen.NewMockUserRepository(ctrl)
mockRepository.EXPECT().InsertUser(usr).Return(fmt.Errorf("error inserting"))
service := new(service)
service.securtiyHelper = mockSecurityHelper
service.repository = mockRepository
got := service.InsertUser(usr)
if got.Error() != "error inserting" {
t.Errorf("got %v, want %v", got.Error(), "error inserting")
}
})
}
|
package config
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestConfig(t *testing.T) {
conf, err := NewConfiguration("config", "..")
assert.NoError(t, err)
t.Log(conf)
}
|
package solution_test
import (
"testing"
"github.com/dnogueir/golang-dojo/solution"
"github.com/stretchr/testify/require"
)
func TestSolution(t *testing.T) {
sum := solution.Multi_threaded()
require.Equal(t, 319600, sum)
}
|
package boshio_test
import (
"errors"
"io/ioutil"
"net/http"
"net/http/httptest"
"strings"
"time"
"github.com/concourse/bosh-io-stemcell-resource/boshio"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
type tempError struct {
error
}
func (te tempError) Temporary() bool {
return true
}
func (te tempError) Timeout() bool {
return false
}
type fakeTransport struct {
count int
}
func (f *fakeTransport) RoundTrip(req *http.Request) (*http.Response, error) {
if f.count != 1 {
f.count++
return nil, tempError{errors.New("boom")}
}
return &http.Response{StatusCode: http.StatusOK}, nil
}
var _ = Describe("HTTPClient", func() {
const waitTime = 10 * time.Millisecond
Describe("Do", func() {
It("makes an http request", func() {
var (
receivedRequest *http.Request
requestBody []byte
)
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
receivedRequest = req
var err error
requestBody, err = ioutil.ReadAll(req.Body)
Expect(err).NotTo(HaveOccurred())
}))
client := boshio.NewHTTPClient(server.URL, waitTime)
request, err := http.NewRequest("POST", "/more/path", strings.NewReader(`{"test": "something"}`))
Expect(err).NotTo(HaveOccurred())
request.Header.Add("something", "some-value")
response, err := client.Do(request)
Expect(err).NotTo(HaveOccurred())
Expect(response.StatusCode).To(Equal(http.StatusOK))
Expect(receivedRequest.Method).To(Equal("POST"))
Expect(receivedRequest.URL.String()).To(Equal("/more/path"))
Expect(receivedRequest.Header.Get("something")).To(Equal("some-value"))
Expect(requestBody).To(MatchJSON(`{"test": "something"}`))
})
Context("when the request already has its host sett", func() {
It("doesn't modify the host", func() {
stemcells := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {}))
amazon := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(http.StatusTeapot)
}))
client := boshio.NewHTTPClient(stemcells.URL, waitTime)
request, err := http.NewRequest("POST", amazon.URL, strings.NewReader(`{"test": "something"}`))
Expect(err).NotTo(HaveOccurred())
response, err := client.Do(request)
Expect(err).NotTo(HaveOccurred())
Expect(response.StatusCode).To(Equal(http.StatusTeapot))
})
})
Context("when the request has a temporary error", func() {
It("retries the request", func() {
client := boshio.HTTPClient{
Host: "example.com",
Wait: waitTime,
Client: &http.Client{Transport: &fakeTransport{}},
}
request, err := http.NewRequest("GET", "/different/path", nil)
Expect(err).NotTo(HaveOccurred())
response, err := client.Do(request)
Expect(err).NotTo(HaveOccurred())
Expect(response.StatusCode).To(Equal(http.StatusOK))
})
})
Context("when an error occurs", func() {
Context("when the host cannot be parsed", func() {
It("returns an error", func() {
client := boshio.NewHTTPClient("%%%%%%", waitTime)
_, err := client.Do(&http.Request{})
Expect(err).To(MatchError(ContainSubstring("failed to parse URL")))
})
})
})
})
})
|
package cfglite
import (
"bufio"
"os"
"strings"
)
type smplConfStruct struct {
cfgPath string
params map[string]string
}
func ReadConf(path string) (*smplConfStruct, error) {
var config = new(smplConfStruct)
cfgFile, err := os.Open(path)
if err != nil {
return config, nil
}
defer cfgFile.Close()
config.params = make(map[string]string)
scanner := bufio.NewScanner(cfgFile)
for scanner.Scan() {
config.addParam(scanner.Text())
}
if err := scanner.Err(); err != nil {
return config, nil
}
return config, err
}
func (conf *smplConfStruct) addParam(line string) {
nPos := strings.Index(line, "=")
conf.params[line[:nPos]] = line[nPos+1:]
}
|
package parsing
import (
"github.com/s2gatev/sqlmorph/ast"
)
const UpdateWithoutTargetError = "UPDATE statement must be followed by a target class."
// UpdateState parses UPDATE SQL clauses along with the target table.
// UPDATE User u ...
type UpdateState struct {
BaseState
}
func (s *UpdateState) Name() string {
return "UPDATE"
}
func (s *UpdateState) Parse(result ast.Node, tokenizer *Tokenizer) (ast.Node, bool) {
target := ast.NewUpdate()
if token, _ := tokenizer.ReadToken(); token != UPDATE {
tokenizer.UnreadToken()
return result, false
}
table := &ast.Table{}
if token, tableName := tokenizer.ReadToken(); token == LITERAL {
table.Name = tableName
} else {
wrongTokenPanic(UpdateWithoutTargetError, tableName)
}
if token, tableAlias := tokenizer.ReadToken(); token == LITERAL {
table.Alias = tableAlias
} else {
tokenizer.UnreadToken()
}
target.SetTable(table)
return target, true
}
|
package SLB
import (
"fmt"
"testing"
)
func TestRandom(t *testing.T) {
nodes := make(map[int]string)
nodes[0] = "0"
nodes[1] = "1"
nodes[2] = "2"
res := make(map[string]int)
for i :=0; i < 10; i++ {
node := Random(nodes)
if _, ok := res[node]; ok {
res[node] += 1
} else {
res[node] = 1
}
}
for k, v := range res {
fmt.Printf("key = %s; volume =%d \n", k, v)
}
}
func TestRandomWeight(t *testing.T) {
nodes := make(map[int]map[string]interface{})
nodes[0] = map[string]interface{}{
"ip": "0",
"weight": 5,
}
nodes[1] = map[string]interface{}{
"ip": "1",
"weight": 1,
}
nodes[2] = map[string]interface{}{
"ip": "2",
"weight": 4,
}
next := RandomWeight(nodes)
count := map[string]int{}
for i := 0; i < 10000; i++ {
count[next()]++
}
fmt.Printf("%#v", count)
} |
/*
Copyright (C) 2018 Expedia Group.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package events
import (
"github.com/HotelsDotCom/flyte-client/flyte"
)
var AddEventSuccessEventDef = flyte.EventDef{Name: "AddEventsSuccess"}
var AddEventErrorEventDef = flyte.EventDef{Name: "AddEventsFailure"}
func NewAddEventsSuccessEvent(tags, what, data string) flyte.Event {
return flyte.Event{
EventDef: AddEventSuccessEventDef,
Payload: AddEventsSuccessPayload{tags, what, data},
}
}
func NewAddEventsFailureEvent(tags, what, data, error string) flyte.Event {
return flyte.Event{
EventDef: AddEventErrorEventDef,
Payload: AddEventsFailurePayload{tags, what, data, error},
}
}
type AddEventsSuccessPayload struct {
Tags string `json:"tags"`
What string `json:"what"`
Data string `json:"data"`
}
type AddEventsFailurePayload struct {
Tags string `json:"tags"`
What string `json:"what"`
Data string `json:"data"`
Error string `json:"error"`
}
|
package entity
import (
"bytes"
"fmt"
"io"
"strings"
// "io/ioutil"
"github.com/fabric-lab/hyperledger-fabric-manager/server/pkg/client"
"github.com/fabric-lab/hyperledger-fabric-manager/server/pkg/util"
"os"
"os/exec"
"path/filepath"
"strconv"
)
const (
ALLINFO = iota
OUTINFO
ERRINFO
)
type CMD interface {
Exec(map[string]string) string
}
func ExecChannel(cmdInfo map[string]string) string {
cmd := cmdInfo["Cmd"]
channelId := cmdInfo["ChannelId"]
ordererEndpoint := cmdInfo["OrdererEndpoint"]
//seek :=cmdInfo["Seek"]
channelPath := filepath.Join(channelDir, channelId)
peerBin := WindowsBin(peerBin)
switch cmd {
case "CHANNEL_CREATE":
genesisBlock := channelId + ".block"
dest := filepath.Join(channelPath, genesisBlock)
cmd := exec.Command(peerBin, "channel", "create", "-c", channelId, "-o", ordererEndpoint)
msg := run(true, ALLINFO, cmd, channelPath)
util.Copy(genesisBlock,dest)
os.RemoveAll(genesisBlock)
return msg
}
return ""
}
func ExecPeer(cmdInfo map[string]string) string {
cmd := cmdInfo["Cmd"]
nodeName := cmdInfo["NodeName"]
channelId := cmdInfo["ChannelId"]
version := cmdInfo["Version"]
lang := cmdInfo["Lang"]
path := cmdInfo["Path"]
name := cmdInfo["Name"]
json := cmdInfo["Json"]
ordererEndpoint := cmdInfo["OrdererEndpoint"]
ordererName := cmdInfo["OrdererName"]
peerPath := filepath.Join(peerDir, nodeName)
channelPath := filepath.Join(channelDir, channelId)
cacheNodeName := peers + "." + nodeName
cache := util.Caches.Get(cacheNodeName)
if cache != nil && cmd == "NODE_START" {
return "node_already_run"
} else if cache == nil && cmd == "NODE_STOP" {
return "node_already_stop"
} else if cache == nil && cmd != "NODE_START" && cmd != "NODE_STOP" {
return "node_must_run"
}
ordererCacheNodeName := orderers + "." + ordererName
orderCache := util.Caches.Get(ordererCacheNodeName)
peerBin := WindowsBin(peerBin)
switch cmd {
case "NODE_START":
cmd := exec.Command(peerBin, "node", "start", "--peer-chaincodedev=true")
util.Caches.Set(cacheNodeName, cmd)
return run(false, ALLINFO, cmd, peerPath)
case "NODE_STOP":
v := cache.Value
if _, ok := v.(*exec.Cmd); ok {
err := v.(*exec.Cmd).Process.Kill()
if err != nil {
return err.Error()
}
}
util.Caches.Delete(cacheNodeName)
return "node_stop_ok"
case "CHANNEL_LIST":
cmd := exec.Command(peerBin, "channel", "list")
return run(true, ALLINFO, cmd, peerPath)
case "CHANNEL_JOIN":
genesisBlock := channelId + ".block"
genesisBlock = filepath.Join(channelPath, genesisBlock)
cmd := exec.Command(peerBin, "channel", "join", "-b", genesisBlock)
msg := run(true, ALLINFO, cmd, peerPath)
return msg
case "CHANNEL_GETINFO":
cmd := exec.Command(peerBin, "channel", "getinfo", "-c", channelId)
msg := run(true, ALLINFO, cmd, peerPath)
return msg
case "CHAINCODE_INSTALL":
dir := filepath.Dir(path)
cmd := exec.Command(peerBin, "chaincode", "install", "-n", name, "-v", version, "-l", lang, "-p", dir)
msg := run(true, ALLINFO, cmd, peerPath)
return msg
case "CHAINCODE_LIST":
cmd := exec.Command(peerBin, "chaincode", "list", "--installed")
msg := run(true, ALLINFO, cmd, peerPath)
return msg
case "CHAINCODE_INIT":
if orderCache == nil {
return "desc_3"+"|" + ordererName
}
cmd := exec.Command(peerBin, "chaincode", "instantiate", "-n", name, "-v", version, "-c", json, "-C", channelId, "-o", ordererEndpoint)
msg := run(true, ALLINFO, cmd, peerPath)
return msg
case "CHAINCODE_INVOKE":
if orderCache == nil {
return "desc_3"+"|" + ordererName
}
cmd := exec.Command(peerBin, "chaincode", "invoke", "-n", name, "-c", json, "-C", channelId, "-o", ordererEndpoint)
msg := run(true, ALLINFO, cmd, peerPath)
return msg
case "CHAINCODE_QUERY":
if orderCache == nil {
return "desc_3"+"|" + ordererName
}
cmd := exec.Command(peerBin, "chaincode", "query", "-n", name, "-c", json, "-C", channelId, "-o", ordererEndpoint)
msg := run(true, ALLINFO, cmd, peerPath)
return msg
}
return ""
}
func ExecOrderer(cmdInfo map[string]string) string {
cmd := cmdInfo["Cmd"]
nodeName := cmdInfo["NodeName"]
cacheNodeName := orderers + "." + nodeName
ordererPath := filepath.Join(OrdererDir, nodeName)
ordererBin := WindowsBin(ordererBin)
switch cmd {
case "NODE_START":
cache := util.Caches.Get(cacheNodeName)
if cache != nil {
return "node_already_run"
}
cmd := exec.Command(ordererBin, "start")
util.Caches.Set(cacheNodeName, cmd)
return run(false, ALLINFO, cmd, ordererPath)
case "NODE_STOP":
cache := util.Caches.Get(cacheNodeName)
if cache == nil {
return "node_already_stop"
}
v := cache.Value
if _, ok := v.(*exec.Cmd); ok {
err := v.(*exec.Cmd).Process.Kill()
if err != nil {
return err.Error()
}
}
util.Caches.Delete(cacheNodeName)
return "node_stop_ok"
case "SEEK":
cache := util.Caches.Get(cacheNodeName)
if cache == nil {
return "node_must_run"
}
return Seek(cmdInfo)
}
return ""
}
func ExecChainCode(cmdInfo map[string]string) string {
cmd := cmdInfo["Cmd"]
nodeName := cmdInfo["NodeName"]
path := cmdInfo["Path"]
peerEndPoint := cmdInfo["PeerEndPoint"]
name := cmdInfo["Name"]
peerNodeName := cmdInfo["PeerNodeName"]
peerCacheNodeName := peers + "." + peerNodeName
cacheNodeName := chaincodes + "." + nodeName+"."+peerNodeName
switch cmd {
case "NODE_START":
cache := util.Caches.Get(peerCacheNodeName)
if cache == nil {
return "desc_4"+"|" + peerNodeName
}
cache = util.Caches.Get(cacheNodeName)
if cache != nil {
return "node_already_run"
}
path = filepath.Join(os.Getenv("GOPATH"), "src", path)
cmd := exec.Command(path)
env := "CORE_CHAINCODE_LOGLEVEL=debug"
cmd.Env = append(os.Environ(), env)
env = fmt.Sprintf("CORE_PEER_ADDRESS=%s", peerEndPoint)
cmd.Env = append(cmd.Env, env)
env = fmt.Sprintf("CORE_CHAINCODE_ID_NAME=%s", name)
cmd.Env = append(cmd.Env, env)
msg := run(false, OUTINFO, cmd, "")
if msg == "" {
util.Caches.Set(cacheNodeName, cmd)
return "ok"
}
return msg
case "NODE_STOP":
cache := util.Caches.Get(cacheNodeName)
if cache == nil {
return "node_already_stop"
}
v := cache.Value
util.Caches.Delete(cacheNodeName)
if _, ok := v.(*exec.Cmd); ok {
err := v.(*exec.Cmd).Process.Kill()
if err != nil {
return err.Error()
}
}
return "ok"
}
return ""
}
func run(isSycn bool, outType int, cmd *exec.Cmd, config string) string {
var stdoutBuf, stderrBuf bytes.Buffer
stdoutIn, _ := cmd.StdoutPipe()
stderrIn, _ := cmd.StderrPipe()
var errStdout, errStderr error
stdout := io.MultiWriter(os.Stdout, &stdoutBuf)
stderr := io.MultiWriter(os.Stderr, &stderrBuf)
cmd.Env = append(cmd.Env, os.Environ()...)
fabricCFGPath := "FABRIC_CFG_PATH=" + config
cmd.Env = append(cmd.Env, fabricCFGPath)
err := cmd.Start()
if err != nil {
return err.Error()
}
go func() {
_, errStdout = io.Copy(stdout, stdoutIn)
}()
go func() {
_, errStderr = io.Copy(stderr, stderrIn)
}()
if isSycn {
cmd.Wait()
} else {
go func() {
cmd.Wait()
}()
}
outStr, errStr := string(stdoutBuf.Bytes()), string(stderrBuf.Bytes())
if outType == OUTINFO {
return outStr
} else if outType == ERRINFO {
return errStr
}
return fmt.Sprintf("Environment:%s\nCommand:%s\n\n\n%s\n\n\n%s", fabricCFGPath, strings.Join(cmd.Args, " "), errStr, outStr)
}
func Seek(cmdInfo map[string]string) string {
nodeName := cmdInfo["NodeName"]
channelId := cmdInfo["ChannelId"]
seek := cmdInfo["Seek"]
ordererPath := filepath.Join(OrdererDir, nodeName)
os.Setenv("FABRIC_CFG_PATH", ordererPath)
deliverClient, err := client.NewDeliverClient(channelId)
if err != nil {
return err.Error()
}
var block string
if seek == "-2" {
block, err = deliverClient.GetOldestBlock()
if err != nil {
return err.Error()
}
} else if seek == "-1" {
block, err = deliverClient.GetNewestBlock()
if err != nil {
return err.Error()
}
} else {
i, err := strconv.ParseUint(seek, 10, 64)
if err != nil {
return err.Error()
}
block, err = deliverClient.GetSpecifiedBlock(i)
if err != nil {
return err.Error()
}
}
return block
}
|
package dependency
import (
"context"
"errors"
"log"
"net"
"net/http"
"os"
"cloud.google.com/go/firestore"
"github.com/99designs/gqlgen/graphql"
"github.com/99designs/gqlgen/graphql/handler"
"github.com/99designs/gqlgen/graphql/playground"
"github.com/dwaynelavon/es-loyalty-program/config"
"github.com/dwaynelavon/es-loyalty-program/graph"
"github.com/dwaynelavon/es-loyalty-program/graph/generated"
"github.com/dwaynelavon/es-loyalty-program/internal/app/eventsource"
"github.com/dwaynelavon/es-loyalty-program/internal/app/user"
"github.com/vektah/gqlparser/v2/gqlerror"
"go.uber.org/zap"
)
var defaultPort = "8080"
func RegisterRoutes(
logger *zap.Logger,
firestoreClient *firestore.Client,
dispatcher eventsource.CommandDispatcher,
userReadModel user.ReadModel,
) {
port := os.Getenv("PORT")
if port == "" {
port = defaultPort
}
// Build server
graphResolver := &graph.Resolver{
UserReadModel: userReadModel,
Dispatcher: dispatcher,
}
generatedConfig := generated.Config{
Resolvers: graphResolver,
}
schema := generated.NewExecutableSchema(generatedConfig)
srv := handler.NewDefaultServer(schema)
srv.SetErrorPresenter(errorPresenterWithLogger(logger))
// Handlers
http.Handle("/", playground.Handler("GraphQL playground", "/query"))
http.Handle("/query", srv)
log.Printf("connect to http://localhost:%s/ for GraphQL playground", port)
log.Fatal(http.ListenAndServe(":"+port, nil))
}
func LoadEnv() error {
errLoadEnv := config.LoadEnvWithPath("../config/.env")
if errLoadEnv != nil {
return errors.New("unable to load environment variables")
}
return nil
}
func errorPresenterWithLogger(
logger *zap.Logger,
) func(ctx context.Context, err error) *gqlerror.Error {
return func(ctx context.Context, err error) *gqlerror.Error {
logger.Error(err.Error())
if err, ok := err.(net.Error); ok && err.Timeout() {
return gqlerror.ErrorPathf(
graphql.GetFieldContext(ctx).Path(),
"Request timeout. Check network connection")
}
return graphql.DefaultErrorPresenter(ctx, err)
}
}
|
package recurring
import (
mgo "gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
"time"
)
const (
MONGO_URL = "localhost"
)
type DB struct {
coll *mgo.Collection
}
type KeyValue struct {
Key string
Value interface{}
}
func ConnectDB(dbName, collectionName string) *DB {
sess, err := mgo.Dial(MONGO_URL)
if err != nil {
return nil
}
// sess.SetSafe(&mgo.Safe{})
db := sess.DB(dbName)
collection := db.C(collectionName)
return &DB{
collection,
}
}
func (db *DB) Add(rp RecurringPayment) error {
return db.coll.Insert(rp)
}
func (db *DB) Delete(id uint64) error {
return db.coll.Remove(bson.M{"rpid": id})
}
func (db *DB) UpdateNext(id uint64, next time.Time) error {
_, err := db.Get(id)
if err != nil {
return err
}
return db.coll.Update(bson.M{"rpid": id}, bson.M{"$set": bson.M{"next": next}})
}
func (db *DB) Get(id uint64) (RecurringPayment, error) {
rp := RecurringPayment{}
err := db.coll.Find(bson.M{"rpid": id}).One(&rp)
return rp, err
}
func (db *DB) GetAllFrom(aid AccountID) ([]RecurringPayment, error) {
rps := []RecurringPayment{}
err := db.coll.Find(bson.M{"from": aid}).All(&rps)
return rps, err
}
func (db *DB) GetAll() ([]RecurringPayment, error) {
rps := []RecurringPayment{}
err := db.coll.Find(bson.M{"from": bson.M{"$exists": true}}).All(&rps)
return rps, err
}
func (db *DB) SetKeyValue(key string, value interface{}) error {
kv := KeyValue{key, value}
return db.coll.Insert(kv)
}
func (db *DB) GetValue(key string) (interface{}, error) {
kv := KeyValue{}
err := db.coll.Find(bson.M{"key": key}).One(&kv)
return kv.Value, err
}
func (db *DB) DropCollection() error {
return db.coll.DropCollection()
}
|
package get
import (
"errors"
"os"
"os/exec"
"path/filepath"
"strings"
. "github.com/xeha-gmbh/homelab/shared"
"github.com/spf13/cobra"
)
const (
flagFlavor = "flavor"
flagTargetDir = "target-dir"
flagReuse = "reuse"
defaultTargetDir = "/tmp"
defaultReuse = false
flavorUbuntuBionic64Live = "ubuntu/bionic64.live"
flavorUbuntuBionic64LiveUrl = "http://releases.ubuntu.com/bionic/ubuntu-18.04.2-live-server-amd64.iso"
flavorUbuntuBionic64NonLive = "ubuntu/bionic64"
flavorUbuntuBionic64NonLiveUrl = "http://cdimage.ubuntu.com/ubuntu/releases/18.04/release/ubuntu-18.04.2-server-amd64.iso"
flavorUbuntuXenial64 = "ubuntu/xenial64"
flavorUbuntuXenial64Url = "http://releases.ubuntu.com/xenial/ubuntu-16.04.5-server-amd64.iso"
noDefault = ""
)
type IsoGetPayload struct {
ExtraArgs
Flavor string
TargetDir string
Reuse bool
}
func NewIsoGetCommand() *cobra.Command {
payload := new(IsoGetPayload)
cmd := &cobra.Command{
Use: "get",
Short: "get system iso",
PreRunE: func(cmd *cobra.Command, args []string) error {
cmd.SetOutput(os.Stdout)
return cmd.ParseFlags(args)
},
RunE: func(cmd *cobra.Command, args []string) error {
var (
filename string
downloadUrl string
)
switch payload.Flavor {
case flavorUbuntuBionic64Live:
downloadUrl = flavorUbuntuBionic64LiveUrl
filename = filepath.Join(payload.TargetDir, flavorUbuntuBionic64LiveUrl[strings.LastIndex(flavorUbuntuBionic64LiveUrl, "/")+1:])
case flavorUbuntuBionic64NonLive:
downloadUrl = flavorUbuntuBionic64NonLiveUrl
filename = filepath.Join(payload.TargetDir, flavorUbuntuBionic64NonLiveUrl[strings.LastIndex(flavorUbuntuBionic64NonLiveUrl, "/")+1:])
case flavorUbuntuXenial64:
downloadUrl = flavorUbuntuXenial64Url
filename = filepath.Join(payload.TargetDir, flavorUbuntuXenial64Url[strings.LastIndex(flavorUbuntuXenial64Url, "/")+1:])
default:
WithConfig(cmd, &payload.ExtraArgs).Fatal(
1,
"Flavor {{index .flavor}} is not supported.",
map[string]interface{}{
"event": "unsupported_flavor",
"flavor": payload.Flavor,
"exit-code": 1,
})
return errors.New("unsupported_flavor")
}
if _, err := os.Stat(filename); !os.IsNotExist(err) && payload.Reuse {
WithConfig(cmd, &payload.ExtraArgs).Info(
"Reused file at {{index .file}}, no download was executed.",
map[string]interface{}{
"event": "reused_file",
"file": filename,
"reuse": payload.Reuse,
})
return nil
}
wgetArgs := []string{"-O", filename, downloadUrl}
if !payload.Debug {
wgetArgs = append([]string{"-q"}, wgetArgs...)
}
wget := exec.Command("wget", wgetArgs...)
wget.Stdout = cmd.OutOrStdout()
wget.Stderr = cmd.OutOrStderr()
WithConfig(cmd, &payload.ExtraArgs).Debug(
"Downloading from {{index .url}}, please wait.",
map[string]interface{}{
"event": "download_in_progress",
"url": downloadUrl,
})
if err := wget.Run(); err != nil {
WithConfig(cmd, &payload.ExtraArgs).Fatal(
2,
"Download from {{index .url}} failed. Cause: {{index .cause}}",
map[string]interface{}{
"event": "download_error",
"url": downloadUrl,
"cause": err.Error(),
"exit-code": 2,
})
return errors.New("download_error")
}
WithConfig(cmd, &payload.ExtraArgs).Info(
"Image {{index .flavor}} downloaded to {{index .file}}.",
map[string]interface{}{
"event": "download_success",
"flavor": payload.Flavor,
"file": filename,
})
return nil
},
}
parseIsoGetCommandFlags(cmd, payload)
markIsoGetCommandRequiredFlags(cmd)
(&payload.ExtraArgs).InjectExtraArgs(cmd)
return cmd
}
func markIsoGetCommandRequiredFlags(cmd *cobra.Command) {
cmd.MarkFlagRequired(flagFlavor)
}
func parseIsoGetCommandFlags(cmd *cobra.Command, payload *IsoGetPayload) {
cmd.Flags().StringVar(&payload.Flavor, flagFlavor, noDefault,
"flavor of the image to download. ["+strings.Join([]string{
flavorUbuntuBionic64Live,
flavorUbuntuBionic64NonLive,
flavorUbuntuXenial64,
}, "|")+"]")
cmd.Flags().StringVar(&payload.TargetDir, flagTargetDir, defaultTargetDir,
"directory to put the downloaded put into.")
cmd.Flags().BoolVar(&payload.Reuse, flagReuse, defaultReuse,
"whether to use an existing image in the target directory if one is found.")
}
|
package compatibility
import (
"fmt"
"strings"
"time"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/gruntwork-io/terratest/modules/k8s"
"github.com/gruntwork-io/terratest/modules/random"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/kumahq/kuma/pkg/config/core"
. "github.com/kumahq/kuma/test/framework"
)
var OldChart = "0.7.0"
var UpstreamImageRegistry = "kumahq"
func CpCompatibilityMultizoneKubernetes() {
var globalCluster Cluster
var globalReleaseName string
var globalDeployOptsFuncs = KumaK8sDeployOpts
var zoneCluster Cluster
var zoneDeployOptsFuncs = KumaZoneK8sDeployOpts
var zoneReleaseName string
// Ensure that the upstream Kuma help repository is configured
// and refreshed. This is needed for helm to be able to pull the
// OldChart version of the Kuma helm chart.
BeforeSuite(func() {
t := NewTestingT()
opts := helm.Options{}
// Adding the same repo multiple times is idempotent. The
// `--force-update` flag prevents heml emitting an error
// in this case.
_, err := helm.RunHelmCommandAndGetOutputE(t, &opts,
"repo", "add", "--force-update", "kuma", "https://kumahq.github.io/charts")
Expect(err).To(Succeed())
_, err = helm.RunHelmCommandAndGetOutputE(t, &opts, "repo", "update")
Expect(err).To(Succeed())
})
BeforeEach(func() {
// Global CP
c, err := NewK8sClusterWithTimeout(
NewTestingT(),
Kuma1,
Silent,
6*time.Second)
Expect(err).ToNot(HaveOccurred())
globalCluster = c.WithRetries(60)
globalReleaseName = fmt.Sprintf(
"kuma-%s",
strings.ToLower(random.UniqueId()),
)
globalDeployOptsFuncs = append(globalDeployOptsFuncs,
WithEnv("KUMA_API_SERVER_AUTH_ALLOW_FROM_LOCALHOST", "true"),
WithInstallationMode(HelmInstallationMode),
WithHelmChartPath(HelmRepo),
WithHelmReleaseName(globalReleaseName),
WithHelmChartVersion(OldChart),
WithoutHelmOpt("global.image.tag"),
WithHelmOpt("global.image.registry", UpstreamImageRegistry))
err = NewClusterSetup().
Install(Kuma(core.Global, globalDeployOptsFuncs...)).
Setup(globalCluster)
Expect(err).ToNot(HaveOccurred())
Expect(globalCluster.VerifyKuma()).To(Succeed())
// Zone CP
c, err = NewK8sClusterWithTimeout(
NewTestingT(),
Kuma2,
Silent,
6*time.Second)
Expect(err).ToNot(HaveOccurred())
zoneCluster = c.WithRetries(60)
zoneReleaseName = fmt.Sprintf(
"kuma-%s",
strings.ToLower(random.UniqueId()),
)
zoneDeployOptsFuncs = append(zoneDeployOptsFuncs,
WithEnv("KUMA_API_SERVER_AUTH_ALLOW_FROM_LOCALHOST", "true"),
WithInstallationMode(HelmInstallationMode),
WithHelmChartPath(HelmRepo),
WithHelmReleaseName(zoneReleaseName),
WithHelmChartVersion(OldChart),
WithoutHelmOpt("global.image.tag"),
WithHelmOpt("global.image.registry", UpstreamImageRegistry),
WithGlobalAddress(globalCluster.GetKuma().GetKDSServerAddress()),
WithHelmOpt("ingress.enabled", "true"),
)
err = NewClusterSetup().
Install(Kuma(core.Zone, zoneDeployOptsFuncs...)).
Install(NamespaceWithSidecarInjection(TestNamespace)).
Setup(zoneCluster)
Expect(err).ToNot(HaveOccurred())
Expect(zoneCluster.VerifyKuma()).To(Succeed())
})
AfterEach(func() {
if ShouldSkipCleanup() {
return
}
Expect(zoneCluster.DeleteKuma(zoneDeployOptsFuncs...)).To(Succeed())
Expect(zoneCluster.DismissCluster()).To(Succeed())
Expect(globalCluster.DeleteKuma(globalDeployOptsFuncs...)).To(Succeed())
Expect(globalCluster.DismissCluster()).To(Succeed())
})
It("should sync resources between new global and old zone", func() {
// when global is upgraded
upgradeOptsFuncs := append(KumaK8sDeployOpts, WithHelmReleaseName(globalReleaseName))
err := globalCluster.(*K8sCluster).UpgradeKuma(core.Global, upgradeOptsFuncs...)
Expect(err).ToNot(HaveOccurred())
// and new resource is created on Global
err = YamlK8s(`
apiVersion: kuma.io/v1alpha1
kind: Mesh
metadata:
name: demo
`)(globalCluster)
// then the resource is synchronized when old remote is connected (KDS is backwards compatible)
Expect(err).ToNot(HaveOccurred())
Eventually(func() (string, error) {
return k8s.RunKubectlAndGetOutputE(zoneCluster.GetTesting(), zoneCluster.GetKubectlOptions(), "get", "meshes")
}, "30s", "1s").Should(ContainSubstring("demo"))
// when new resources is created on Zone
err = DemoClientK8s("default")(zoneCluster)
// then resource is synchronized to Global
Expect(err).ToNot(HaveOccurred())
Eventually(func() (string, error) {
return k8s.RunKubectlAndGetOutputE(globalCluster.GetTesting(), globalCluster.GetKubectlOptions("default"), "get", "dataplanes")
}, "30s", "1s").Should(ContainSubstring("demo-client"))
})
}
|
package version
// Base version information.
//
// This is the fallback data used when version information from git is not
// provided via go ldflags.
var (
version = "dev"
commit = "none"
buildDate = "unknown"
)
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"context"
"fmt"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/statistics"
"github.com/pingcap/tidb/statistics/handle"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/logutil"
"go.uber.org/zap"
)
type globalStatsKey struct {
tableID int64
indexID int64
}
type globalStatsInfo struct {
isIndex int
// When the `isIndex == 0`, histIDs will be the column IDs.
// Otherwise, histIDs will only contain the index ID.
histIDs []int64
statsVersion int
}
// globalStatsMap is a map used to store which partition tables and the corresponding indexes need global-level stats.
// The meaning of key in map is the structure that used to store the tableID and indexID.
// The meaning of value in map is some additional information needed to build global-level stats.
type globalStatsMap map[globalStatsKey]globalStatsInfo
func (e *AnalyzeExec) handleGlobalStats(ctx context.Context, needGlobalStats bool, globalStatsMap globalStatsMap) error {
if !needGlobalStats {
return nil
}
globalStatsTableIDs := make(map[int64]struct{})
for globalStatsID := range globalStatsMap {
globalStatsTableIDs[globalStatsID.tableID] = struct{}{}
}
statsHandle := domain.GetDomain(e.Ctx()).StatsHandle()
tableIDs := map[int64]struct{}{}
for tableID := range globalStatsTableIDs {
tableIDs[tableID] = struct{}{}
tableAllPartitionStats := make(map[int64]*statistics.Table)
for globalStatsID, info := range globalStatsMap {
if globalStatsID.tableID != tableID {
continue
}
job := e.newAnalyzeHandleGlobalStatsJob(globalStatsID)
if job == nil {
logutil.BgLogger().Warn("cannot find the partitioned table, skip merging global stats", zap.Int64("tableID", globalStatsID.tableID))
continue
}
AddNewAnalyzeJob(e.Ctx(), job)
StartAnalyzeJob(e.Ctx(), job)
mergeStatsErr := func() error {
globalOpts := e.opts
if e.OptionsMap != nil {
if v2Options, ok := e.OptionsMap[globalStatsID.tableID]; ok {
globalOpts = v2Options.FilledOpts
}
}
globalStats, err := statsHandle.MergePartitionStats2GlobalStatsByTableID(e.Ctx(), globalOpts, e.Ctx().GetInfoSchema().(infoschema.InfoSchema),
globalStatsID.tableID, info.isIndex, info.histIDs,
tableAllPartitionStats)
if err != nil {
logutil.BgLogger().Warn("merge global stats failed",
zap.String("info", job.JobInfo), zap.Error(err), zap.Int64("tableID", tableID))
if types.ErrPartitionStatsMissing.Equal(err) || types.ErrPartitionColumnStatsMissing.Equal(err) {
// When we find some partition-level stats are missing, we need to report warning.
e.Ctx().GetSessionVars().StmtCtx.AppendWarning(err)
}
return err
}
for i := 0; i < globalStats.Num; i++ {
hg, cms, topN := globalStats.Hg[i], globalStats.Cms[i], globalStats.TopN[i]
if hg == nil {
// All partitions have no stats so global stats are not created.
continue
}
// fms for global stats doesn't need to dump to kv.
err = statsHandle.SaveStatsToStorage(globalStatsID.tableID,
globalStats.Count,
globalStats.ModifyCount,
info.isIndex,
hg,
cms,
topN,
info.statsVersion,
1,
true,
handle.StatsMetaHistorySourceAnalyze,
)
if err != nil {
logutil.Logger(ctx).Error("save global-level stats to storage failed", zap.String("info", job.JobInfo),
zap.Int64("histID", hg.ID), zap.Error(err), zap.Int64("tableID", tableID))
}
}
return err
}()
FinishAnalyzeMergeJob(e.Ctx(), job, mergeStatsErr)
}
}
for tableID := range tableIDs {
// Dump stats to historical storage.
if err := recordHistoricalStats(e.Ctx(), tableID); err != nil {
logutil.BgLogger().Error("record historical stats failed", zap.Error(err))
}
}
return nil
}
func (e *AnalyzeExec) newAnalyzeHandleGlobalStatsJob(key globalStatsKey) *statistics.AnalyzeJob {
dom := domain.GetDomain(e.Ctx())
is := dom.InfoSchema()
table, ok := is.TableByID(key.tableID)
if !ok {
return nil
}
db, ok := is.SchemaByTable(table.Meta())
if !ok {
return nil
}
dbName := db.Name.String()
tableName := table.Meta().Name.String()
jobInfo := fmt.Sprintf("merge global stats for %v.%v columns", dbName, tableName)
if key.indexID != -1 {
idxName := table.Meta().FindIndexNameByID(key.indexID)
jobInfo = fmt.Sprintf("merge global stats for %v.%v's index %v", dbName, tableName, idxName)
}
return &statistics.AnalyzeJob{
DBName: db.Name.String(),
TableName: table.Meta().Name.String(),
JobInfo: jobInfo,
}
}
|
package g2gin
import (
"github.com/gin-gonic/gin"
"github.com/atcharles/gof/v2/j2rpc"
)
// ItfGinRouter ...gin router interface
type ItfGinRouter interface {
Router(g *gin.RouterGroup)
J2rpc(jsv j2rpc.RPCServer)
}
|
package main
import (
"fmt"
"html/template"
"io/ioutil"
"net/http"
)
type myMux struct {
}
func (m myMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/" {
myHello(w, r)
}
}
func myHello(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "hello vegetable540")
fmt.Println(r.URL)
fmt.Println(r.Form)
}
func favicon(w http.ResponseWriter, r *http.Request) {
icon, err := ioutil.ReadFile("logo.ico")
if err != nil {
fmt.Println(err)
}
fmt.Fprint(w, icon)
}
func login(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" {
t, err := template.ParseFiles("login/login.html")
if err != nil {
fmt.Print(err)
}
err = t.Execute(w, nil)
if err != nil {
fmt.Print(err)
}
} else {
r.ParseForm()
fmt.Println(r.Form["usename"])
fmt.Println(r.Form["pwd"])
}
}
func main() {
http.HandleFunc("/", myHello)
// http.HandleFunc("/favicon.ico", favicon)
http.HandleFunc("/login", login)
err := http.ListenAndServe(":8080", nil)
if err != nil {
fmt.Println(err)
}
}
|
package main
import (
"bufio"
"fmt"
"net/http"
"os"
"strings"
"github.com/gorilla/websocket"
"github.com/mrWinston/knuffon/backend/manager"
log "github.com/sirupsen/logrus"
)
func readStdinLine(question string) (string, error) {
reader := bufio.NewReader(os.Stdin)
fmt.Println(question)
read, err := reader.ReadString('\n')
return strings.TrimSpace(read), err
}
func checkOrigin(r *http.Request) bool {
return true
}
var upgrader = websocket.Upgrader{
EnableCompression: true,
CheckOrigin: checkOrigin,
}
var gm = manager.NewGameManager()
func HandleWS(w http.ResponseWriter, r *http.Request) {
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.WithFields(log.Fields{
"error": err,
}).Error("Got an Error during Connection Upgrade")
return
}
for {
_, msg, err := conn.ReadMessage()
if err != nil {
log.WithFields(log.Fields{
"error": err,
}).Error("Got an Error during Connection Upgrade")
return
}
log.WithFields(log.Fields{"msg": msg}).Debug("Got a new Message")
gm.AddMessage(&manager.RawMessage{Raw: msg, Conn: conn})
}
}
func main() {
log.SetOutput(os.Stdout)
log.SetLevel(log.DebugLevel)
log.SetReportCaller(true)
defer gm.Stop()
http.HandleFunc("/ws", HandleWS)
err := http.ListenAndServe(":8000", nil)
if err != nil {
log.Error(err)
}
}
//func main_old() {
//
// g := game.CreateGame([]*game.Player{
// game.NewPlayer("Alf", "1"),
// game.NewPlayer("Rudi", "2"),
// game.NewPlayer("Christin", "3"),
// game.NewPlayer("Rachel", "4"),
// })
// fmt.Println("Commands: roll, reset, exit")
// var result *game.RollResult
// for {
// currPlayer := g.GetCurrentPlayer().Name
// input, err := readStdinLine(fmt.Sprintf("What to Do?(%s)", currPlayer))
// if err != nil {
// log.Fatal(err)
// }
//
// input = strings.TrimSpace(input)
//
// if input == "roll" {
// diceToRoll := queryDiceToRoll()
// result, err = g.Roll(diceToRoll)
// if err != nil {
// fmt.Println(err)
// } else {
// printResult(result)
// }
//
// } else if input == "done" {
// err := g.TurnDone(queryResultToChoose(result))
// if err != nil {
// log.Fatal(err)
// }
// } else if input == "exit" {
// log.Fatal("Let's get outta here")
// } else {
// fmt.Println("Whut?")
// }
// }
//}
//
//func queryDiceToRoll() []bool {
// diceToRollRaw, _ := readStdinLine("Which ones? 1 - 5, comma separated")
// return []bool{
// strings.Contains(diceToRollRaw, "1"),
// strings.Contains(diceToRollRaw, "2"),
// strings.Contains(diceToRollRaw, "3"),
// strings.Contains(diceToRollRaw, "4"),
// strings.Contains(diceToRollRaw, "5"),
// }
//}
//
//func printDice(dice []int) {
// res := ""
// dieToString := []string{
// "⚀", "⚁", "⚂", "⚃", "⚄", "⚅",
// }
//
// for _, die := range dice {
// res += " " + dieToString[die-1]
// }
//
// fmt.Printf("Dice Are: %s\n", res)
//}
//
//func printResult(res *game.RollResult) {
// printDice(res.Dice)
// fmt.Printf("Results Are: \n")
// for rt, score := range res.Result {
// fmt.Printf("%s: %d\n", rt, score)
// }
//}
//
//func queryResultToChoose(res *game.RollResult) game.ResultType {
// var intToResultType map[int]game.ResultType = map[int]game.ResultType{}
// i := 0
// for rt, score := range res.Result {
// fmt.Printf("%v - %s: %d\n", i, rt, score)
// intToResultType[i] = rt
// i++
// }
// numRaw, _ := readStdinLine("Select a Number")
// num, err := strconv.Atoi(numRaw)
// for err != nil {
// fmt.Printf("'%s' is not a number\n", numRaw)
// numRaw, _ = readStdinLine("Select a Number")
// num, err = strconv.Atoi(numRaw)
// }
// return intToResultType[num]
//
//}
|
package main
/**
95. 不同的二叉搜索树 II
给定一个整数 n,生成所有由 1 ... n 为节点所组成的 二叉搜索树 。
示例1:
```
输入:3
输出:
[
[1,null,3,2],
[3,2,null,1],
[3,1,null,null,2],
[2,1,3],
[1,null,2,null,3]
]
解释:
以上的输出对应以下 5 种不同结构的二叉搜索树:
1 3 3 2 1
\ / / / \ \
3 2 1 1 3 2
/ / \ \
2 1 2 3
```
提示:
- `0 <= n <= 8`
*/
/**
难
*/
func GenerateTrees(n int) []*TreeNode {
// nothing to do.
return nil
}
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
|
package kata
import ("fmt"
// "strings"
)
func CreatePhoneNumber(numbers [10]uint) string {
result:="("
for i:=0;i<10;i++{
str := fmt.Sprint(numbers[i])
result+=str
if(i==2){
result+=") "
}
if(i==5){
result+="-"
}
}
// result = strings.Replace(result, " ", "", -1)
return result
}
|
package function
import (
"fmt"
)
// ErrFunctionNotFound occurs when function couldn't been found in the discovery.
type ErrFunctionNotFound struct {
ID ID
}
func (e ErrFunctionNotFound) Error() string {
return fmt.Sprintf("Function %q not found.", string(e.ID))
}
// ErrFunctionAlreadyRegistered occurs when function with specified name is already registered.
type ErrFunctionAlreadyRegistered struct {
ID ID
}
func (e ErrFunctionAlreadyRegistered) Error() string {
return fmt.Sprintf("Function %q already registered.", string(e.ID))
}
// ErrFunctionValidation occurs when function payload doesn't validate.
type ErrFunctionValidation struct {
Message string
}
func (e ErrFunctionValidation) Error() string {
return fmt.Sprintf("Function doesn't validate. Validation error: %s", e.Message)
}
// ErrFunctionIsAuthorizer occurs when function cannot be deleted because is used as authorizer.
type ErrFunctionIsAuthorizer struct {
ID ID
EventType string
}
func (e ErrFunctionIsAuthorizer) Error() string {
return fmt.Sprintf("Function %s cannot be deleted because is used as an authorizer for %s event type.", e.ID, e.EventType)
}
// ErrFunctionCallFailed occurs when function call failed.
type ErrFunctionCallFailed struct {
Original error
}
func (e ErrFunctionCallFailed) Error() string {
return fmt.Sprintf("Function call failed. Error: %s", e.Original)
}
// ErrFunctionAccessDenied occurs when Event Gateway don't have access to call a function.
type ErrFunctionAccessDenied struct {
Original error
}
func (e ErrFunctionAccessDenied) Error() string {
return fmt.Sprintf("Function access denied. Error: %s", e.Original)
}
// ErrFunctionProviderError occurs when function call failed because of provider error.
type ErrFunctionProviderError struct {
Original error
}
func (e ErrFunctionProviderError) Error() string {
return fmt.Sprintf("Function call failed because of provider error. Error: %s", e.Original)
}
// ErrFunctionError occurs when function call failed because of function error.
type ErrFunctionError struct {
Original error
}
func (e ErrFunctionError) Error() string {
return fmt.Sprintf("Function call failed because of runtime error. Error: %s", e.Original)
}
// ErrFunctionHasSubscriptions occurs when function with subscription is being deleted.
type ErrFunctionHasSubscriptions struct{}
func (e ErrFunctionHasSubscriptions) Error() string {
return fmt.Sprintf("Function cannot be deleted because it's subscribed to a least one event.")
}
|
//go:build amd64 || arm64
// +build amd64 arm64
package as_test
import (
"math"
"testing"
"github.com/lunemec/as"
)
func TestInt16(t *testing.T) {
assertNoError(t, as.Int16, int8(math.MinInt8))
assertNoError(t, as.Int16, int8(math.MaxInt8))
pointerToMaxInt8 := int8(math.MaxInt8)
assertNoError(t, as.Int, &pointerToMaxInt8)
assertNoError(t, as.Int16, int16(math.MinInt16))
assertNoError(t, as.Int16, int16(math.MaxInt16))
assertError(t, as.Int16, int32(math.MinInt32))
assertError(t, as.Int16, int32(math.MaxInt32))
assertError(t, as.Int16, int64(math.MinInt64))
assertError(t, as.Int16, int64(math.MaxInt64))
assertError(t, as.Int16, int(math.MinInt64))
assertError(t, as.Int16, int(math.MaxInt64))
assertNoError(t, as.Int16, uint8(0))
assertNoError(t, as.Int16, uint8(math.MaxUint8))
assertNoError(t, as.Int16, uint16(0))
assertError(t, as.Int16, uint16(math.MaxUint16))
assertNoError(t, as.Int16, uint32(0))
assertError(t, as.Int16, uint32(math.MaxUint32))
assertNoError(t, as.Int16, uint64(0))
assertError(t, as.Int16, uint64(math.MaxUint64))
assertNoError(t, as.Int16, uint(0))
assertError(t, as.Int16, uint(math.MaxUint64))
}
var out16 int16
// BenchmarkAs16-8 36798218 32.61 ns/op 39 B/op 1 allocs/op
func BenchmarkAs16(b *testing.B) {
var t int16
for n := 0; n < b.N; n++ {
t, _ = as.Int16(n)
}
out16 = t
}
// BenchmarkInt16-8 1000000000 0.3211 ns/op 0 B/op 0 allocs/op
func BenchmarkInt16(b *testing.B) {
var t int16
for n := 0; n < b.N; n++ {
t = int16(n)
}
out16 = t
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.