text stringlengths 11 4.05M |
|---|
package main
import (
"fmt"
"strings"
)
func main() {
//Declare Var
var m1 int
var s1 string
m1 = 2
s1 = "Budi"
fmt.Println(m1)
fmt.Println(s1)
//Declare Many Var
var (
m2 = 3
m3 = 3
)
fmt.Println(m2 + m3)
var m4 int32
var m5 int64
//Casting
fmt.Println(int64(m4) + m5)
//Declare Simple Var
m6 := 2
m7 := 8
s6 := "Dina"
fmt.Println(m6 + m7)
fmt.Println(s6)
//Compare String
s2 := "My Name"
s3 := "Name"
fmt.Println(strings.Contains(s2, s3)) //Find Text
fmt.Println(strings.ReplaceAll(s2, "m", "LA")) //Replace All String
fmt.Println(strings.Split(s2, " ")) // Split by space
fmt.Println(s2 + s3) // Add String
}
|
package main_test
import (
"github.com/stretchr/testify/assert"
"go-restapi/routes"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"testing"
)
/* REST API TESTS */
type HttpTestCase struct {
method string
path string
jsonParams string
expectedStatus int
expectedResponseBody string
handler func(http.ResponseWriter, *http.Request)
}
var globalT *testing.T
func handlerBody(source string) string {
jsonFile, _ := os.Open(source)
body, _ := ioutil.ReadAll(jsonFile)
globalT.Log("Load Test JSON Source", source)
return string(body)
}
var httpCases = []HttpTestCase{
{
method: "POST",
path: "http://localhost:8080/:user/register",
expectedStatus: http.StatusOK,
expectedResponseBody: "../mockdata/register200.json",
},
{
method: "POST",
path: "http://localhost:8080/user/register",
expectedStatus: http.StatusBadRequest,
expectedResponseBody: "../mockdata/register400.json",
},
{
method: "POST",
path: "http://localhost:8080/user/register",
expectedStatus: http.StatusUnprocessableEntity,
expectedResponseBody: "../mockdata/register422.json",
},
{
method: "POST",
path: "user/find",
expectedStatus: http.StatusOK,
expectedResponseBody: "../mockdata/find200.json",
},
{
method: "POST",
path: "http://localhost:8080/user/find",
expectedStatus: http.StatusBadRequest,
expectedResponseBody: "../mockdata/find400.json",
},
{
method: "POST",
path: "http://localhost:8080/user/find",
expectedStatus: http.StatusUnprocessableEntity,
expectedResponseBody: "../mockdata/find422.json",
},
{
method: "POST",
path: "http://localhost:8080/user/find2",
expectedStatus: http.StatusOK,
expectedResponseBody: "../mockdata/find200.json",
},
{
method: "POST",
path: "http://localhost:8080/user/find2",
expectedStatus: http.StatusBadRequest,
expectedResponseBody: "../mockdata/find400.json",
},
{
method: "POST",
path: "http://localhost:8080/user/find2",
expectedStatus: http.StatusUnprocessableEntity,
expectedResponseBody: "../mockdata/find422.json",
},
}
func performRequest(r http.Handler, req *http.Request, statusCode int, expectedJSON string) *httptest.ResponseRecorder {
writer := httptest.NewRecorder()
writer.WriteHeader(statusCode)
globalT.Log("PerformRequst:", expectedJSON)
body := handlerBody(expectedJSON)
r.ServeHTTP(writer, req)
io.WriteString(writer, body)
return writer
}
func TestAPI(t *testing.T) {
t.Log("\n\n\n\n\n\n########### ################ ############### TestAPI")
globalT = t
router := routes.SetupRouter()
for _, testCase := range httpCases {
t.Log("\n\n\n########### NEW REQUEST ############### ")
t.Log("ResponseBody: Method: ", testCase.method, " Path: ", testCase.path, " StatusCode: ", testCase.expectedStatus)
request := httptest.NewRequest(testCase.method, testCase.path, nil)
writer := performRequest(router, request, testCase.expectedStatus, testCase.expectedResponseBody)
response := writer.Result()
body, _ := ioutil.ReadAll(response.Body)
t.Log("ResponseBody: ", string(body))
expectedJSON := handlerBody(testCase.expectedResponseBody)
assert.Equal(t, expectedJSON, string(body))
//var response map[string]string
//json.Unmarshal([]byte(writer.Result()))
}
}
/*
func NotTestRestAPI(t *testing.T) {
t.Log("\n\n\n\n\n\n\n\n\n\n\n\n########### ################ ############### TestRESTAPI()")
globalT = t
for _, testCase := range httpCases {
t.Log("\n\n\n\n\n\n########### NEW REQUEST ############### ")
request := httptest.NewRequest(testCase.method, testCase.path, nil)
writer := httptest.NewRecorder()
testCase.handler(writer, request)
response := writer.Result()
body, _ := ioutil.ReadAll(response.Body)
t.Log(fmt.Sprintf("%+v\n", response))
if testCase.expectedStatus != response.StatusCode {
t.Fatal("Status Code Failed at ", testCase.path, " with Code: ", testCase.expectedStatus)
}
expectedJSON := handlerBody(testCase.expectedResponseBody)
t.Log("\n\n#####\n - compare?")
t.Log("ResponseBody: ", string(body))
t.Log(" Expected: ", expectedJSON)
if string(body) != expectedJSON {
t.Fatal("Response Failed on ", testCase.path, " with Status Code: ", testCase.expectedStatus)
}
}
}
func registerTestHandlers200(writer http.ResponseWriter, request *http.Request) {
globalT.Log("\n\n\n\n\n##################################")
globalT.Log("registerTestHandlers200()")
writer.WriteHeader(http.StatusOK)
body := handlerBody("../mockdata/register200.json")
io.WriteString(writer, string(body))
}
func registerTestHandlers400(writer http.ResponseWriter, request *http.Request) {
globalT.Log("\n\n\n\n\n##################################")
globalT.Log("registerTestHandlers400()")
writer.WriteHeader(http.StatusBadRequest)
body := handlerBody("../mockdata/register400.json")
io.WriteString(writer, body)
}
func registerTestHandlers422(writer http.ResponseWriter, request *http.Request) {
globalT.Log("\n\n\n\n\n##################################")
globalT.Log("registerTestHandlers422()")
writer.WriteHeader(http.StatusUnprocessableEntity)
body := handlerBody("../mockdata/register422.json")
io.WriteString(writer, body)
}
func findTestHandlers200(writer http.ResponseWriter, request *http.Request) {
globalT.Log("\n\n\n\n\n##################################")
globalT.Log("findTestHandlers200()")
writer.WriteHeader(http.StatusOK)
body := handlerBody("../mockdata/find200.json")
io.WriteString(writer, body)
}
func findTestHandlers400(writer http.ResponseWriter, request *http.Request) {
globalT.Log("\n\n\n\n\n##################################")
globalT.Log("findTestHandlers400()")
writer.WriteHeader(http.StatusBadRequest)
body := handlerBody("../mockdata/find400.json")
io.WriteString(writer, body)
}
func findTestHandlers422(writer http.ResponseWriter, request *http.Request) {
globalT.Log("\n\n\n\n\n##################################")
globalT.Log("findTestHandlers422()")
writer.WriteHeader(http.StatusUnprocessableEntity)
body := handlerBody("../mockdata/find422.json")
io.WriteString(writer, body)
}
func find2TestHandlers200(writer http.ResponseWriter, request *http.Request) {
globalT.Log("\n\n\n\n\n##################################")
globalT.Log("find2TestHandlers200()")
writer.WriteHeader(http.StatusOK)
body := handlerBody("../mockdata/find200.json")
io.WriteString(writer, body)
}
func find2TestHandlers400(writer http.ResponseWriter, request *http.Request) {
globalT.Log("\n\n\n\n\n##################################")
globalT.Log("find2TestHandlers400()")
writer.WriteHeader(http.StatusBadRequest)
body := handlerBody("../mockdata/find400.json")
io.WriteString(writer, body)
}
func find2TestHandlers422(writer http.ResponseWriter, request *http.Request) {
globalT.Log("\n\n\n\n\n##################################")
globalT.Log("findTestHandlers422()")
writer.WriteHeader(http.StatusUnprocessableEntity)
body := handlerBody("../mockdata/find422.json")
io.WriteString(writer, body)
}
*/
|
// +build linux darwin freebsd openbsd
package pb
const sys_ioctl = 16
|
package global
import (
"encoding/json"
"net/http"
"github.com/garyburd/redigo/redis"
"github.com/felipeguilhermefs/restis/router"
)
type PipelineCommandPayload struct {
Command string `json:"command"`
Args []interface{} `json:"args,omitempty"`
}
type PipelinePayload []PipelineCommandPayload
func PipelineRoute(conn redis.Conn) router.Route {
return router.Route{
"/pipeline",
"POST",
PipelineHandler(conn),
}
}
func PipelineHandler(conn redis.Conn) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var payload PipelinePayload
if err := json.NewDecoder(r.Body).Decode(&payload); err != nil {
panic(err)
}
var totalCommands = len(payload)
for _, command := range(payload) {
if command.Args != nil {
conn.Send(command.Command, command.Args[:]...)
} else {
conn.Send(command.Command)
}
}
conn.Flush()
var result = []interface{}{}
for i := 0; i < totalCommands; i++ {
res, err := conn.Receive()
if err != nil {
result = append(result, err)
} else {
result = append(result, res)
}
}
if err := json.NewEncoder(w).Encode(result); err != nil {
panic(err)
}
}
}
|
package leet524
import "fmt"
func main() {
var s = "abpcplea"
var d = []string{"a", "b", "c"}
fmt.Println("----------", findLongestWord(s, d))
}
func findLongestWord(s string, d []string) string {
longest := ""
for _, v := range d {
if len(v) > len(longest) || (len(v) == len(longest) && v < longest) {
if isSubStr(s, v) {
longest = v
}
}
}
return longest
}
func isSubStr(s string, t string) bool {
index1, index2 := 0, 0
for index1 < len(s) && index2 < len(t) {
if s[index1] == t[index2] {
index2++
}
index1++
}
if index2 == len(t) {
return true
}
return false
}
|
package leetcode
/*给定一个二叉搜索树, 找到该树中两个指定节点的最近公共祖先。
百度百科中最近公共祖先的定义为:“对于有根树 T 的两个结点 p、q,最近公共祖先表示为一个结点 x,满足 x 是 p、q 的祖先且 x 的深度尽可能大(一个节点也可以是它自己的祖先)。”
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/er-cha-sou-suo-shu-de-zui-jin-gong-gong-zu-xian-lcof
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。*/
/**
* Definition for a binary tree node.
* public class TreeNode {
* int val;
* TreeNode left;
* TreeNode right;
* TreeNode(int x) { val = x; }
* }
*/
class Solution {
public TreeNode lowestCommonAncestor(TreeNode root, TreeNode p, TreeNode q) {
TreeNode rlt = null;
List<TreeNode> listP = new ArrayList<>();
List<TreeNode> listQ = new ArrayList<>();
treeSearch(listP, root, p);
treeSearch(listQ, root, q);
for(int i = 0; i < listP.size(); i++) {
if(listQ.contains(listP.get(i))) {
rlt = listP.get(i);
break;
}
}
return rlt;
}
private boolean treeSearch(List<TreeNode> list, TreeNode root, TreeNode target) {
if(root == null) {
return false;
}
if(root.val == target.val) {
list.add(root);
return true;
}
boolean rlt = treeSearch(list, root.left, target) || treeSearch(list, root.right, target);
if(rlt) {
list.add(root);
}
return rlt;
}
} |
package Problem0393
func validUtf8(data []int) bool {
// cnt 是还需检查的 byte 的个数
cnt := 0
var d int
for _, d = range data {
if cnt == 0 {
switch {
case d>>3 == 30: //0b11110
cnt = 3
case d>>4 == 14: //0b1110
cnt = 2
case d>>5 == 6: //0b110
cnt = 1
case d>>7 > 0:
// data[0] 和 data[len(data)-1] 都会到此处来检查
return false
}
} else {
// 非首尾的 byte 必须以 0b10 开头
if d>>6 != 2 { //0b10
return false
}
cnt--
}
}
return 0 == cnt
}
|
package todoController
import (
"net/http"
"github.com/gin-gonic/gin"
"github.com/user/gogo/models"
)
//FetchAllTodo GET
func FetchAllTodo(c *gin.Context) {
var todos []models.TodoModel
var _todos []models.TransformedTodo
db.Find(&todos)
if len(todos) <= 0 {
c.JSON(http.StatusNotFound, gin.H{"status": http.StatusNotFound, "message": "No todo found!"})
return
}
//transforms the todos for building a good response
for _, item := range todos {
completed := false
if item.Completed == 1 {
completed = true
} else {
completed = false
}
_todos = append(_todos, models.TransformedTodo{ID: item.ID, Title: item.Title, Completed: completed})
}
c.JSON(http.StatusOK, gin.H{"status": http.StatusOK, "data": _todos})
}
|
package filters
import (
"bufio"
"io"
)
type asciiCleaner struct {
buf []byte
src *bufio.Reader
index int
next int
}
func NewAsciiCleaner(reader io.Reader) io.Reader {
ac := &asciiCleaner{
buf: make([]byte, 512),
src: bufio.NewReader(reader),
}
return ac
}
func (ac *asciiCleaner) Read(p []byte) (int, error) {
n, err := ac.src.Read(ac.buf[ac.next:])
if err != nil {
return 0, err
}
ac.next += n
n = ac.cleanAscii(ac.buf[ac.index:ac.next], p)
if ac.index == ac.next {
ac.index = 0
ac.next = 0
}
copy(ac.buf, ac.buf[ac.index:ac.next])
ac.next -= ac.index
ac.index = 0
return n, nil
}
var safe = map[byte]bool{
'\a': true, // alert/bell
'\b': true, // backspace
'\f': true, // form feed
'\n': true, // line feed/newline
'\r': true, // carriage return
'\t': true, // horizontal tab
'\v': true, // vertical tab
}
func (ac *asciiCleaner) cleanAscii(in, out []byte) int {
outsz := cap(out)
cnt := 0
for _, c := range in {
ac.index++
if _, ok := safe[c]; (c < 32 || c >= 127) && ok == false {
c = '_'
}
out[cnt] = c
cnt++
if cnt >= outsz {
break
}
}
return cnt
}
|
// START ONE OMIT
func SendMessage(req *http.Request, to, from, message string) (err error) {
c := appengine.NewContext(req)
client := urlfetch.Client(c)
t := twilioclient.NewTwilioClient(twilioSID, twilioSecret)
err = t.SendMessage(*client, to, from, message)
c.Debugf("SENDING %s %s %s", to, from, message)
if err != nil {
c.Debugf("Twilio Error: %s", err.Error())
}
return
}
// STOP ONE OMIT
// START TWO OMIT
func (t *TwilioClient) SendMessage(client http.Client, toNumber, fromNumber, message string) (err error) {
...
twilioRequest.SetBasicAuth(t.AccountSid, t.AuthToken)
twilioRequest.Header.Add("Content-type", "application/x-www-form-urlencoded")
resp, clientError := client.Do(twilioRequest)
defer resp.Body.Close()
if resp.StatusCode > 299 {
err = errors.New(fmt.Sprintf("TWILIO ERROR: %d", resp.StatusCode))
}
return clientError
}
// STOP TWO OMIT |
/*package main
import (
//"encoding/json"
"fmt"
)
func main(){
//json.MarshalIndent()
}
*/
package main
import (
"strings"
"fmt"
)
const (
ONE_IPTABLES_RULES = int(1)
IPTABLES_PARAMS_NUM = int(8)
)
type IptablesRules struct {
PackageLossType string
ExecCmdIp string
ExecCmdPort string
PeerIp string
PeerPort string
PacketLossRate string
RunningPackageLoss bool //是否运行丢包规则
RunningTime int //执行丢包的时间长度,就是停止丢包的周期
StartPackageLoss int //开始执行丢包命令的周期,最大公约数
FuncTime string
RunTime string
}
func SplitIptablesRules(allRules string ,rulesNum int)(resolveRes map[string]IptablesRules){
var splitRes []string
if rulesNum == ONE_IPTABLES_RULES{
splitRes = append(splitRes,allRules)
}else {
splitRes = strings.Split(allRules,",end")
}
resolveRes = make(map[string]IptablesRules,rulesNum +1 )
for _,v:=range splitRes {
oneIptablesRules := strings.Split(v,",")
if len(oneIptablesRules) < IPTABLES_PARAMS_NUM {
fmt.Println("IpTables Rules Params Error")
}
//var tempRules IptablesRules
key := oneIptablesRules[0] + "_" + oneIptablesRules[1]
fmt.Println(key)
resolveRes[key] = IptablesRules {
PackageLossType: oneIptablesRules[0],
ExecCmdIp : oneIptablesRules[1],
ExecCmdPort : oneIptablesRules[2],
PacketLossRate : oneIptablesRules[3],
FuncTime : oneIptablesRules[4],
RunTime : oneIptablesRules[5],
PeerPort : oneIptablesRules[6],
PeerIp : oneIptablesRules[7],
}
//fmt.Println(resolveRes)
}
return
}
func main(){
var test ="INPUT1,ExecCmdIp,ExecCmdPort,PackageLossRate,FuncTime,RunTime,PeerPort,PeerIp,endINPUT2,ExecCmdIp,ExecCmdPort,PackageLossRate,FuncTime,RunTime,PeerPort,PeerIp,endINPUT3,ExecCmdIp,ExecCmdPort,PackageLossRate,FuncTime,RunTime,PeerPort,PeerIp,endINPUT4,ExecCmdIp,ExecCmdPort,PackageLossRate,FuncTime,RunTime,PeerPort,PeerIp,endOUTPUT5,ExecCmdIp,ExecCmdPort,PackageLossRate,FuncTime,RunTime,PeerPort,PeerIp"
re := SplitIptablesRules(test,5)
for k,v:=range re {
fmt.Printf("key : %v , value : %v\n", k,v )
}
fmt.Println(strings.Split("192.168.2.9_172.3.4.5","_"))
} |
package main
import (
"fmt"
)
//声明变量
var name string
var age int
var isOk bool
var (
a = 1
b = 3
c = "dds"
)
//批量声明常量
const (
OK = 200
notFound = 404
)
const (
n1 = 100
n2 //默认和上边一致
n3 //默认和上边一致
)
//iota 计数器
const (
a1 = iota //0
a2 = iota //1
a3 //2
)
const (
b1 = iota //0
b2 = iota //1
_ = iota //2
b3 = iota //3
)
//插队
const (
c1 = iota //0
c2 = 100 //100
c3 = iota //2
c4 //3
)
//定义数量级
const (
_ = iota
KB = 1 << (10 * iota)
MB = 1 << (10 * iota)
GB = 1 << (10 * iota)
TB = 1 << (10 * iota)
PB = 1 << (10 * iota)
)
//多个常量声明在一行
const (
d1, d2 = iota + 1, iota + 2 //d1:1 d2:2
d3, d4 = iota + 1, iota + 2 //d3:2 d4:3
)
func main() {
fmt.Println("hello world")
name = "tom"
age = 22
isOk = true
fmt.Println(name, age, isOk)
fmt.Println(a, b, c)
//声明变量同时赋值
var s1 string = "王"
//类型推导,无法修改成其他类型的值
var s2 = "hai"
//简短变量声明,只能用在函数中,无法用在全局变量中
s3 := true
fmt.Println(s1)
fmt.Println(s2)
fmt.Println(s3)
fmt.Println("a1=", a1)
fmt.Println("a2=", a2)
fmt.Println("a3=", a3)
fmt.Println("KB=", KB)
fmt.Println()
//10进制
var i1 = 101
fmt.Printf("%d\n", i1)
fmt.Printf("i1的%%b=%b\n", i1)
fmt.Printf("%o\n", i1)
fmt.Printf("%X\n", i1)
fmt.Printf("%T\n", i1)
var i2 int8 = 9
i2 = int8(22)
fmt.Println(i2)
}
|
package optionsgen
import (
"fmt"
"log"
"os"
"github.com/kazhuravlev/options-gen/internal/generator"
)
type DefaultsFrom string
const (
DefaultsFromTag DefaultsFrom = "tag"
DefaultsFromNone DefaultsFrom = "none"
DefaultsFromVar DefaultsFrom = "var"
DefaultsFromFunc DefaultsFrom = "func"
)
type Defaults struct {
From DefaultsFrom `json:"from"`
// Param is function name/variable name for func and var accordingly
Param string `json:"param"`
}
func Run(inFilename, outFilename, structName, packageName string, defaults Defaults, showWarnings bool) error {
// парсим исходный файл так, что бы получить не только структуру, но и токены, связанные с defaults.
// то есть defaults это модификатор парсинга, который заставит парсер вытащить доп инфу
var tagName, varName, funcName string
switch defaults.From {
case DefaultsFromNone:
case DefaultsFromTag:
tagName = defaults.Param
if tagName == "" {
tagName = "default"
}
case DefaultsFromVar:
varName = defaults.Param
if varName == "" {
varName = fmt.Sprintf("default%s", structName)
}
case DefaultsFromFunc:
funcName = defaults.Param
if funcName == "" {
funcName = fmt.Sprintf("getDefault%s", structName)
}
}
optionSpec, warnings, err := generator.GetOptionSpec(inFilename, structName, tagName)
if err != nil {
return fmt.Errorf("cannot get options spec: %w", err)
}
imports, err := generator.GetFileImports(inFilename)
if err != nil {
return fmt.Errorf("cannot get imports: %w", err)
}
res, err := generator.RenderOptions(packageName, structName, imports, optionSpec, tagName, varName, funcName)
if err != nil {
return fmt.Errorf("cannot renderOptions template: %w", err)
}
const perm = 0o644
if err := os.WriteFile(outFilename, res, perm); err != nil {
return fmt.Errorf("cannot write result: %w", err)
}
if showWarnings {
for _, warning := range warnings {
log.Println(warning)
}
}
return nil
}
|
package blobstore
import (
"crypto"
"crypto/rand"
"fmt"
"io"
"os"
"path/filepath"
"strings"
)
const (
defaultPerms = 0750
vfsRoot = ""
filesAtOnce = 10
)
// NewFileBlobServer returns a VFSBlobServer using a fileBlobs, that is on top of the os files
func NewFileBlobServer(dir string, hash crypto.Hash) *VFSBlobServer {
return &VFSBlobServer{fileBlobs{dir}, hash}
}
// VirtualFS on OS implementation
type fileBlobs struct {
dir string
}
// Open a file contents for reading
func (vfs fileBlobs) Open(key string) (io.ReadCloser, error) {
return os.Open(key)
}
// Create a file to write a key's contents for the first time
func (vfs fileBlobs) Create(key string) (io.WriteCloser, error) {
return os.OpenFile(key, os.O_CREATE|os.O_WRONLY, defaultPerms)
}
// Delete a key & contents from the FS
func (vfs fileBlobs) Delete(key string) error {
return os.Remove(key)
}
// Does the given key exists in disk?
func (vfs fileBlobs) Exists(key string) bool {
_, err := os.Stat(key)
return !os.IsNotExist(err)
}
// Rename a key, usually only used once, when the contents are done writting and the correspoding hash key is known
func (vfs fileBlobs) Rename(oldkey, newkey string) error {
err := os.MkdirAll(filepath.Dir(newkey), defaultPerms)
if err == nil {
err = os.Rename(oldkey, newkey)
}
return err
}
// ListTo lists all present keys in sort order to the keys channel
func (vfs fileBlobs) ListTo(keys chan<- KeyOrError, acceptor func(string) Key) bool {
return vfs.listTo(keys, acceptor, vfsRoot)
}
// listTo is the internal recursive implementation of ListTo list key names from recursive directories
func (vfs fileBlobs) listTo(keys chan<- KeyOrError, acceptor func(string) Key, dir string) bool {
if dir == vfsRoot { // start at the root dir
dir = vfs.dir
}
root, err := os.Open(dir)
if err != nil {
return failKeyOrError(keys, err)
}
for {
fileInfos, err := root.Readdir(filesAtOnce)
if err == io.EOF { // on EOF we are done
return true
} else if err != nil {
return failKeyOrError(keys, err)
}
for _, fileInfo := range fileInfos {
if fileInfo.IsDir() { // If it is a dir...
// List tha branch, but fail the pipeline if that returns false (=failure)
if !vfs.listTo(keys, acceptor, filepath.Join(dir, fileInfo.Name())) {
return false // give up if the subtree failed
}
} else { // If it is Not a directory but a file...
// get the filename
filename := fileInfo.Name()
// strip the extension, if any
if strings.Contains(filename, ".") {
filename = strings.Split(filename, ".")[0]
}
// if filename is accepted by acceptor it will produce a non nil key, then send it through keys
key := acceptor(filename)
if key != nil {
keys <- KeyOrError{key, nil}
}
}
}
}
}
// keyname returns a filename full path of where the key blob should be placed
func (vfs fileBlobs) Keyname(key Key) string {
hexKey := key.String()
return filepath.Join(vfs.dir, hexKey[0:2], hexKey[2:4], hexKey[4:6], hexKey[6:8], fmt.Sprintf("%s.blob", hexKey))
}
// tmpkeyname returns a temporary filename
func (vfs fileBlobs) TmpKeyname(size int) string {
key := make([]byte, size)
rand.Reader.Read(key)
return filepath.Join(vfs.dir, fmt.Sprintf("%s.new", Key(key).String()))
}
|
package main
/*
* Simple GO program to solve sudokus.
*
* TODO Try to use clz and popcount, link against C.
*/
import (
"fmt"
"os"
)
type mask uint16 // Enough to hold 9 bits.
type cell struct {
groups [3]*mask
next *cell
value uint
}
const (
DEFAULT_MASK mask = (1 << 9) - 1
)
var (
// Group resolvers.
resolvers = [3]func(int) int{
// Row.
func(i int) int { return i / 9 },
// Column.
func(i int) int { return i % 9 },
// Box.
func(i int) int {
return (i/27)*3 + // Row
(i / 3) - (i/9)*3 // Column
},
}
constraints = [3][9]mask{}
cells = [9 * 9]cell{}
)
// Link the cell to its appropriate row, column and box constraints.
func (c *cell) initConstraints(index int) {
for group, resolver := range resolvers {
c.groups[group] = &constraints[group][resolver(index)]
}
}
// Returns a mask representing valid candidates for a given cell.
func (c *cell) possibilities() mask {
return *c.groups[0] & *c.groups[1] & *c.groups[2]
}
// Remove candidate bit from all constraint groups referenced by c.
func (c *cell) unset(candidate uint) {
for _, group := range c.groups {
*group &= ^(1 << candidate)
}
}
// Reenter candidate for cell.
func (c *cell) set(candidate uint) {
for _, group := range c.groups {
*group |= 1 << candidate
}
}
func (m mask) popCount() int {
count := 0
for i := uint(0); i != 9; i++ {
if int(m)&(1<<i) != 0 {
count++
}
}
return count
}
func exit(format string, args ...interface{}) {
fmt.Fprintf(os.Stderr, format+"\n", args)
os.Exit(1)
}
func printSolution() {
for i, cell := range cells {
if i%9 == 0 && i != 0 {
fmt.Println()
}
fmt.Printf("%c ", cell.value+'1')
}
}
// Main solving step. Recurses on each cell in the queue. queue is a pointer to
// the head element in a linked list of cells which don't have a set value.
func solve(queue **cell) bool {
if *queue == nil {
return true
}
bestp := queue
best := 10
// Find the best candidate to recurse on.
for pptr := queue; *pptr != nil; pptr = &(*pptr).next {
pop := (*pptr).possibilities().popCount()
if pop == 1 {
bestp = pptr
break // Obvious elimination.
} else if pop < best {
bestp = pptr
best = pop
}
}
cell := *bestp
*bestp = cell.next // Unlink from queue.
candidates := cell.possibilities()
for value := uint(0); value != 9; value++ {
if candidates&(1<<value) != 0 {
cell.unset(value)
if solve(queue) {
// It's OK to set value and not clean up the queue here
// since we only reach this point if the puzzle was solved.
cell.value = value
return true
}
cell.set(value)
}
}
cell.next = *queue // Link back to head of queue.
*queue = cell
return false
}
func main() {
var head *cell = nil
// Set default bits in constraints.
for i, group := range constraints {
for j := range group {
constraints[i][j] = DEFAULT_MASK
}
}
// Parse input and fill out cells.
if len(os.Args) < 2 {
exit("Usage: %s input", os.Args[0])
}
input := os.Args[1]
if len(input) != 9*9 {
exit("Puzzle is not 9 times 9")
}
for idx, char := range input {
cell := &cells[idx]
cell.initConstraints(idx)
switch char {
case '1', '2', '3', '4', '5', '6', '7', '8', '9':
value := uint(char) - '1'
cell.value = value
cell.unset(value)
continue
case '.':
cell.next = head
head = cell
continue
}
exit("Invalid charater in input: %c", char)
}
if !solve(&head) {
exit("Impossible puzzle")
}
fmt.Println("Solution found!")
printSolution()
}
|
package storage
import (
// Standard Library Imports
"context"
"fmt"
// External Imports
"github.com/ory/fosite"
)
// User provides the specific types for storing, editing, deleting and
// retrieving a User record in mongo.
type User struct {
//// User Meta
// ID is the uniquely assigned uuid that references the user
ID string `bson:"id" json:"id" xml:"id"`
// createTime is when the resource was created in seconds from the epoch.
CreateTime int64 `bson:"createTime" json:"createTime" xml:"createTime"`
// updateTime is the last time the resource was modified in seconds from
// the epoch.
UpdateTime int64 `bson:"updateTime" json:"updateTime" xml:"updateTime"`
// AllowedTenantAccess contains the Tenant IDs that the user has been given
// rights to access.
// This helps in multi-tenanted situations where a user can be given
// explicit cross-tenant access.
AllowedTenantAccess []string `bson:"allowedTenantAccess" json:"allowedTenantAccess,omitempty" xml:"allowedTenantAccess,omitempty"`
// AllowedPersonAccess contains a list of Person IDs that the user is
// allowed access to.
// This helps in multi-tenanted situations where a user can be given
// explicit access to other people accounts, for example, parents to
// children records.
AllowedPersonAccess []string `bson:"allowedPersonAccess" json:"allowedPersonAccess,omitempty" xml:"allowedPersonAccess,omitempty"`
// Scopes contains the permissions that the user is entitled to request.
Scopes []string `bson:"scopes" json:"scopes" xml:"scopes"`
// PersonID is a uniquely assigned id that references a person within the
// system.
// This enables applications where an external person data store is present.
// This helps in multi-tenanted situations where the person is unique, but
// the underlying user accounts can exist per tenant.
PersonID string `bson:"personId" json:"personId" xml:"personId"`
// Disabled specifies whether the user has been disallowed from signing in
Disabled bool `bson:"disabled" json:"disabled" xml:"disabled"`
//// User Content
// Username is used to authenticate a user
Username string `bson:"username" json:"username" xml:"username"`
// Password of the user - will be a hash based on your fosite selected
// hasher.
// If using this model directly in an API, be sure to clear the password
// out when marshaling to json/xml.
Password string `bson:"password,omitempty" json:"password,omitempty" xml:"password,omitempty"`
// FirstName stores the user's Last Name
FirstName string `bson:"firstName" json:"firstName" xml:"firstName"`
// LastName stores the user's Last Name
LastName string `bson:"lastName" json:"lastName" xml:"lastName"`
// ProfileURI is a pointer to where their profile picture lives
ProfileURI string `bson:"profileUri" json:"profileUri,omitempty" xml:"profileUri,omitempty"`
}
// FullName concatenates the User's First Name and Last Name for templating
// purposes
func (u User) FullName() (fn string) {
return fmt.Sprintf("%s %s", u.FirstName, u.LastName)
}
// SetPassword takes a cleartext secret, hashes it with a hasher and sets it as
// the user's password
func (u *User) SetPassword(cleartext string, hasher fosite.Hasher) (err error) {
h, err := hasher.Hash(context.TODO(), []byte(cleartext))
if err != nil {
return err
}
u.Password = string(h)
return nil
}
// GetHashedSecret returns the Users's Hashed Secret as a byte array
func (u *User) GetHashedSecret() []byte {
return []byte(u.Password)
}
// Authenticate compares a cleartext string against the user's
func (u User) Authenticate(cleartext string, hasher fosite.Hasher) error {
return hasher.Compare(context.TODO(), u.GetHashedSecret(), []byte(cleartext))
}
// EnableTenantAccess enables user access to one or many tenants.
func (u *User) EnableTenantAccess(tenantIDs ...string) {
for i := range tenantIDs {
found := false
for j := range u.AllowedTenantAccess {
if tenantIDs[i] == u.AllowedTenantAccess[j] {
found = true
break
}
}
if !found {
u.AllowedTenantAccess = append(u.AllowedTenantAccess, tenantIDs[i])
}
}
}
// DisableTenantAccess disables user access to one or many tenants.
func (u *User) DisableTenantAccess(tenantIDs ...string) {
for i := range tenantIDs {
for j := range u.AllowedTenantAccess {
if tenantIDs[i] == u.AllowedTenantAccess[j] {
copy(u.AllowedTenantAccess[j:], u.AllowedTenantAccess[j+1:])
u.AllowedTenantAccess[len(u.AllowedTenantAccess)-1] = ""
u.AllowedTenantAccess = u.AllowedTenantAccess[:len(u.AllowedTenantAccess)-1]
break
}
}
}
}
// EnablePeopleAccess enables user access to the provided people
func (u *User) EnablePeopleAccess(personIDs ...string) {
for i := range personIDs {
found := false
for j := range u.AllowedPersonAccess {
if personIDs[i] == u.AllowedPersonAccess[j] {
found = true
break
}
}
if !found {
u.AllowedPersonAccess = append(u.AllowedPersonAccess, personIDs[i])
}
}
}
// DisablePeopleAccess disables user access to the provided people.
func (u *User) DisablePeopleAccess(personIDs ...string) {
for i := range personIDs {
for j := range u.AllowedPersonAccess {
if personIDs[i] == u.AllowedPersonAccess[j] {
copy(u.AllowedPersonAccess[j:], u.AllowedPersonAccess[j+1:])
u.AllowedPersonAccess[len(u.AllowedPersonAccess)-1] = ""
u.AllowedPersonAccess = u.AllowedPersonAccess[:len(u.AllowedPersonAccess)-1]
break
}
}
}
}
// EnableScopeAccess enables user access to one or many scopes.
func (u *User) EnableScopeAccess(scopes ...string) {
for i := range scopes {
found := false
for j := range u.Scopes {
if scopes[i] == u.Scopes[j] {
found = true
break
}
}
if !found {
u.Scopes = append(u.Scopes, scopes[i])
}
}
}
// DisableScopeAccess disables user access to one or many scopes.
func (u *User) DisableScopeAccess(scopes ...string) {
for i := range scopes {
for j := range u.Scopes {
if scopes[i] == u.Scopes[j] {
copy(u.Scopes[j:], u.Scopes[j+1:])
u.Scopes[len(u.Scopes)-1] = ""
u.Scopes = u.Scopes[:len(u.Scopes)-1]
break
}
}
}
}
// Equal enables checking equality as having a byte array in a struct stops
// allowing direct equality checks.
func (u User) Equal(x User) bool {
if u.ID != x.ID {
return false
}
if u.CreateTime != x.CreateTime {
return false
}
if u.UpdateTime != x.UpdateTime {
return false
}
if !stringArrayEquals(u.AllowedTenantAccess, x.AllowedTenantAccess) {
return false
}
if !stringArrayEquals(u.AllowedPersonAccess, x.AllowedPersonAccess) {
return false
}
if !stringArrayEquals(u.Scopes, x.Scopes) {
return false
}
if u.PersonID != x.PersonID {
return false
}
if u.Disabled != x.Disabled {
return false
}
if u.Username != x.Username {
return false
}
if u.Password != x.Password {
return false
}
if u.FirstName != x.FirstName {
return false
}
if u.LastName != x.LastName {
return false
}
if u.ProfileURI != x.ProfileURI {
return false
}
return true
}
// IsEmpty returns true if the current user holds no data.
func (u User) IsEmpty() bool {
return u.Equal(User{})
}
|
// Copyright (c) 2018 ECAD Labs Inc. MIT License
// Copyright (c) 2020 Blockwatch Data Inc.
// Author: alex@blockwatch.cc
package rpc
import (
"encoding/json"
"fmt"
"tezos_index/chain"
)
// TestChainStatus is a variable structure depending on the Status field
type TestChainStatus interface {
TestChainStatus() string
}
// GenericTestChainStatus holds the common values among all TestChainStatus variants
type GenericTestChainStatus struct {
Status string `json:"status"`
}
// TestChainStatus gets the TestChainStatus's Status field
func (t *GenericTestChainStatus) TestChainStatus() string {
return t.Status
}
// NotRunningTestChainStatus is a TestChainStatus variant for Status=not_running
type NotRunningTestChainStatus struct {
GenericTestChainStatus
}
// ForkingTestChainStatus is a TestChainStatus variant for Status=forking
type ForkingTestChainStatus struct {
GenericTestChainStatus
Protocol chain.ProtocolHash `json:"protocol"`
Expiration string `json:"expiration"`
}
// RunningTestChainStatus is a TestChainStatus variant for Status=running
type RunningTestChainStatus struct {
GenericTestChainStatus
ChainID chain.ChainIdHash `json:"chain_id"`
Genesis chain.BlockHash `json:"genesis"`
Protocol chain.ProtocolHash `json:"protocol"`
Expiration string `json:"expiration"`
}
func unmarshalTestChainStatus(data []byte) (TestChainStatus, error) {
var v TestChainStatus
var tmp GenericTestChainStatus
if err := json.Unmarshal(data, &tmp); err != nil {
return nil, err
}
switch tmp.Status {
case "not_running":
v = &NotRunningTestChainStatus{}
case "forking":
v = &ForkingTestChainStatus{}
case "running":
v = &RunningTestChainStatus{}
default:
return nil, fmt.Errorf("Unknown TestChainStatus.Status: %v", tmp.Status)
}
if err := json.Unmarshal(data, v); err != nil {
return nil, err
}
return v, nil
}
|
package service
import "github.com/myownhatred/botAPI/pkg/repository"
type Picture interface {
}
type Video interface {
}
type Translate interface {
}
type GoogleService struct {
Picture
Video
Translate
}
func NewGoogleService(rep *repository.GoogleRepository) *GoogleService {
return &GoogleService{}
}
|
package blockchain
import (
"encoding/hex"
"fmt"
"github.com/dgraph-io/badger"
log "github.com/sirupsen/logrus"
"os"
"runtime"
)
const (
dbPath = "/home/shiun/tmp/blocks"
// to verify if our blockchain database is exists
dbFile = "/home/shiun/tmp/MANIFEST"
genesisData = "First Transaction from Genesis"
)
type BlockChain struct {
//Blocks []*Block
LastHash []byte
Database *badger.DB
}
type BlockChainIterator struct {
CurrentHash []byte
Database *badger.DB
}
func DBexists() bool {
if _, err := os.Stat(dbFile); os.IsNotExist(err) {
return false
}
return true
}
func ContiuneBlockChain(address string) *BlockChain {
if DBexists() == false {
fmt.Println("No exists blockchain found, create one!")
runtime.Goexit()
}
var lastHash []byte
//for store the key and matadata
opts := badger.DefaultOptions(dbPath)
//for store the value
opts.ValueDir = dbPath
db, err := badger.Open(opts)
if err != nil {
log.Panic(err)
}
err = db.Update(func(txn *badger.Txn) error {
item, err := txn.Get([]byte("lh"))
if err != nil {
log.Panic(err)
}
err = item.Value(func(val []byte) error {
lastHash = append([]byte{}, val...)
return err
})
if err != nil {
log.Panic(err)
}
return err
})
if err != nil {
log.Panic(err)
}
chain := BlockChain{lastHash, db}
return &chain
}
func InitBlockChain(address string) *BlockChain {
//return &BlockChain{[]*Block{Genesis()}}
var lastHash []byte
if DBexists() {
fmt.Println("Blockchain already exists")
runtime.Goexit()
}
//for store the key and matadata
opts := badger.DefaultOptions(dbPath)
//for store the value
opts.ValueDir = dbPath
db, err := badger.Open(opts)
if err != nil {
log.Panic(err)
}
err = db.Update(func(txn *badger.Txn) error {
/*if _, err := txn.Get([]byte("lh")); err == badger.ErrKeyNotFound {
fmt.Println("No existing blockchain found")
genesis := Genesis()
fmt.Println("Genesis proved")
err = txn.Set(genesis.Hash, genesis.Serialize())
if err != nil {
log.WithField("Cannot save the data", err)
}
err = txn.Set([]byte("lh"), genesis.Hash)
lastHash = genesis.Hash
return err
} else {
item, err := txn.Get([]byte("lh"))
if err != nil {
log.WithError(err)
}
err = item.Value(func(val []byte) error {
lastHash = append([]byte{}, val...)
return nil
})
if err != nil {
log.WithError(err)
}
return err
}*/
cbtx := CoinbaseTx(address, genesisData)
genesis := Genesis(cbtx)
fmt.Println("Genesis created")
err = txn.Set(genesis.Hash, genesis.Serialize())
if err != nil {
log.Panic(err)
}
err = txn.Set([]byte("lh"), genesis.Hash)
if err != nil {
log.Panic(err)
}
lastHash = genesis.Hash
return err
})
if err != nil {
log.Panic(err)
}
blockchain := BlockChain{lastHash, db}
return &blockchain
}
func (chain *BlockChain) AddBlock(data string) {
var lastHash []byte
err := chain.Database.View(func(txn *badger.Txn) error {
item, err := txn.Get([]byte("lh"))
if err != nil {
log.WithField("Cannot get the last hash from DB", err)
}
err = item.Value(func(val []byte) error {
lastHash = append([]byte{}, val...)
return nil
})
return err
})
if err != nil {
log.Panic(err)
}
newBlock := CreateBlock(data, lastHash)
err = chain.Database.Update(func(txn *badger.Txn) error {
err := txn.Set(newBlock.Hash, newBlock.Serialize())
if err != nil{
log.Panic(err)
}
err = txn.Set([]byte("lh"), newBlock.Hash)
chain.LastHash = newBlock.Hash
return err
})
if err != nil {
log.WithField("Cannot update DB", err)
}
/*
prevBlock := chain.Blocks[len(chain.Blocks)-1]
//prevBlock.Hash = prevHash
new := CreateBlock(data, prevBlock.Hash)
chain.Blocks = append(chain.Blocks, new)
*/
}
func (chain * BlockChain) Iterator() *BlockChainIterator {
iter := &BlockChainIterator{chain.LastHash, chain.Database}
return iter
}
func (iter *BlockChainIterator) Next() *Block {
var block *Block
var encodedBlock []byte
err := iter.Database.View(func(txn *badger.Txn) error {
item, err := txn.Get(iter.CurrentHash)
if err != nil {
log.WithField("Cannot get the current block", err)
}
err = item.Value(func(val []byte) error {
encodedBlock = append([]byte{}, val...)
block = DeSerialize(encodedBlock)
return err
})
if err != nil {
log.WithField("Cannot deserialize block", err)
}
return err
})
if err != nil {
log.Panic(err)
}
iter.CurrentHash = block.PrevHash
return block
}
func (chain *BlockChain) FindUnspentTransactions(address string) []Transaction {
var unspentTxs []Transaction
// key is string and value is integer
spentTXOs := make(map[string][]int)
iter := chain.Iterator()
for {
block := iter.Next()
for _, tx := range block.Transactions {
txID := hex.EncodeToString(tx.ID)
//make a label, so it can break from the label instead of whole loop
Outputs:
for outIdx, out := range tx.Outputes {
if spentTXOs[txID] != nil {
for _, spentOut := range spentTXOs[txID] {
if spentOut == outIdx {
continue Outputs
}
}
}
if out.CanBeUnlocked(address) {
unspentTxs = append(unspentTxs, *tx)
}
}
if tx.IsCoinbase() == false {
//iter though all the tx inputs and find the outputs that is referenced by input
for _, in := range tx.Inputs {
if in.CanUnlock(address) {
inTxID := hex.EncodeToString(in.ID)
spentTXOs[inTxID] = append(spentTXOs[inTxID], in.Out)
}
}
}
}
if len(block.PrevHash) == 0 {
break
}
}
return unspentTxs
}
//for finding the unspend tx output
func (chain * BlockChain) FindUTXO(address string) []TxOutput {
var UTXOs []TxOutput
unspentTransactions := chain.FindUnspentTransactions(address)
for _, tx := range unspentTransactions {
for _, out := range tx.Outputes {
if out.CanBeUnlocked(address) {
UTXOs = append(UTXOs, out)
}
}
}
return UTXOs
} |
package architecture
import "strings"
type Architecture struct {
Root *Directory
}
func NewArchitecture() *Architecture {
return &Architecture{
Root: NewDirectory(),
}
}
func (arch *Architecture) FindDirectory(path string) *Directory {
pathSections := strings.Split(path, "/")
currentNode := arch.Root
for _, pathSection := range pathSections {
if _, found := currentNode.Directories[pathSection]; !found {
currentNode.Directories[pathSection] = NewDirectory()
}
currentNode = currentNode.Directories[pathSection]
}
return currentNode
}
|
package base
import (
"gengine/context"
"gengine/core/errors"
"reflect"
)
type IfStmt struct {
Expression *Expression
StatementList *Statements
ElseIfStmtList []*ElseIfStmt
ElseStmt *ElseStmt
knowledgeContext *KnowledgeContext
dataCtx *context.DataContext
}
func (i *IfStmt) Evaluate(Vars map[string]interface{}) (interface{}, error) {
it ,err := i.Expression.Evaluate(Vars)
if err != nil {
return nil, err
}
if reflect.ValueOf(it).Bool() {
if i.StatementList == nil{
return nil,nil
}else {
return i.StatementList.Evaluate(Vars)
}
}else {
if i.ElseIfStmtList != nil {
for _,elseIfStmt := range i.ElseIfStmtList {
v, err := elseIfStmt.Expression.Evaluate(Vars)
if err != nil {
return nil, err
}
if reflect.ValueOf(v).Bool() {
sv, err := elseIfStmt.StatementList.Evaluate(Vars)
if err != nil {
return nil, err
}
return sv, nil
}
}
}
if i.ElseStmt != nil{
return i.ElseStmt.Evaluate(Vars)
}else {
return nil,nil
}
}
}
func (i *IfStmt) Initialize(kc *KnowledgeContext, dc *context.DataContext) {
i.knowledgeContext = kc
i.dataCtx = dc
if i.Expression != nil {
i.Expression.Initialize(kc, dc)
}
if i.StatementList != nil {
i.StatementList.Initialize(kc, dc)
}
if i.ElseIfStmtList != nil {
for _,elseIfStmt := range i.ElseIfStmtList{
elseIfStmt.Initialize(kc, dc)
}
}
if i.ElseStmt != nil {
i.ElseStmt.Initialize(kc, dc)
}
}
func (i *IfStmt)AcceptExpression(expr *Expression) error{
if i.Expression == nil {
i.Expression = expr
return nil
}
return errors.New("IfStmt Expression set twice!")
}
func (i *IfStmt)AcceptStatements(stmts *Statements)error{
if i.StatementList == nil {
i.StatementList = stmts
return nil
}
return errors.New("ifStmt's statements set twice!")
} |
package Problem0172
func trailingZeroes(n int) int {
res := 0
for n >= 5 {
n /= 5
res += n
}
return res
}
|
package rtrserver
import (
"bytes"
"net"
"time"
"github.com/cpusoft/goutil/belogs"
"github.com/cpusoft/goutil/convert"
"github.com/cpusoft/goutil/jsonutil"
)
type RtrTcpServerProcessFunc struct {
}
func (rs *RtrTcpServerProcessFunc) OnConnect(conn *net.TCPConn) {
}
func (rs *RtrTcpServerProcessFunc) OnReceiveAndSend(conn *net.TCPConn, receiveData []byte) (err error) {
start := time.Now()
buf := bytes.NewReader(receiveData)
// parse []byte --> rtrpdumodel
rtrPduModel, err := ParseToRtrPduModel(buf)
if err != nil {
belogs.Error("OnReceiveAndSend():server, ParseToRtrPduModel fail: ", convert.PrintBytes(receiveData, 8), err)
err = SendErrorResponse(conn, err)
if err != nil {
belogs.Error("OnReceiveAndSend():server, SendErrorResponse fail: ", err)
}
return err
}
belogs.Info("OnReceiveAndSend():server get rtrPduModel:", jsonutil.MarshalJson(rtrPduModel),
" remoteAddr:", conn.RemoteAddr(), " time(s):", time.Since(start))
// process rtrpdumodel --> response rtrpdumodels
rtrPduModelResponses, err := ProcessRtrPduModel(buf, rtrPduModel)
if err != nil {
belogs.Error("OnReceiveAndSend():server, processRtrPduModel fail: ", jsonutil.MarshalJson(rtrPduModel), err)
err = SendErrorResponse(conn, err)
if err != nil {
belogs.Error("OnReceiveAndSend():server, SendErrorResponse fail: ", err)
}
return err
}
belogs.Info("OnReceiveAndSend():server process rtrPduModel:", jsonutil.MarshalJson(rtrPduModel),
" and assemable responses, len(responses) is ", len(rtrPduModelResponses), " time(s):", time.Since(start))
// send response rtrpdumodels
if len(rtrPduModelResponses) > 0 {
err = SendResponses(conn, rtrPduModelResponses)
if err != nil {
belogs.Error("OnReceiveAndSend():server, sendResponses fail: ", err)
// send internal error
return err
}
}
belogs.Info("OnReceiveAndSend(): server send responses ok, len(responses) is ", len(rtrPduModelResponses),
" remoteAddr:", conn.RemoteAddr(),
" time(s):", time.Since(start))
return nil
}
func (rs *RtrTcpServerProcessFunc) OnClose(conn *net.TCPConn) {
}
func (rs *RtrTcpServerProcessFunc) ActiveSend(conn *net.TCPConn, sendData []byte) (err error) {
belogs.Debug("ActiveSend():len(sendData):", len(sendData))
start := time.Now()
conn.SetWriteBuffer(len(sendData))
n, err := conn.Write(sendData)
if err != nil {
belogs.Debug("ActiveSend():server, conn.Write() fail, ", convert.Bytes2String(sendData), err)
return err
}
belogs.Info("ActiveSend(): conn.Write() ok, len(sendData), n:", len(sendData), n,
" remoteAddr:", conn.RemoteAddr(),
" time(s):", time.Since(start))
return nil
}
|
package dbops
import (
"log"
"database/sql"
_ "github.com/go-driver/mysql"
)
func openConn() *sql.DB {
dbConn, err := sql.Open("mysql","root:123@tcp(localhost:3306)/video_server?charset=utf8")
if err != nil {
panic(err.Error())
}
return dbConn
}
func AddUerCredential(loginName string, pwd string) error {
stmtIns, err := dbConn.Prepare("INSERT INTO users (login_name, pwd) VALUES (?, ?)")
if err != nil {
return err
}
stmtIns.Exec(loginName, pwd)
defer stmtIns.Close()
return nil
}
func GetUserCredential(loginName string) (string, error) {
stmtOut, err := dbConn.Prepare("SELECT pwd FROM users WHERE login_name = ?")
if err != nil {
log.Printf("%s",err)
return "", err
}
var pwd string
stmtOut.QueryRow(loginName).Scan(&pwd)
stmtOut.Close()
return pwd, nil
}
func DeleteUser(loginName string, pwd string) error{
stmtDel, err := dbConn.Prepare("DELETE FROM users WHERE lgoin_name=? AND pwd = ?")
if err != nill {
log.Printf("DeleteUser%s", err)
return err
}
stmtDel.Exec(loginName, pwd)
stmtDel.Close()
return nil
}
|
package pgsql
import (
"testing"
)
func TestNumRange(t *testing.T) {
testlist2{{
valuer: NumRangeFromIntArray2,
scanner: NumRangeToIntArray2,
data: []testdata{
{
input: [2]int{-9223372036854775808, 9223372036854775807},
output: [2]int{-9223372036854775808, 9223372036854775807}},
},
}, {
valuer: NumRangeFromInt8Array2,
scanner: NumRangeToInt8Array2,
data: []testdata{
{input: [2]int8{-128, 127}, output: [2]int8{-128, 127}},
},
}, {
valuer: NumRangeFromInt16Array2,
scanner: NumRangeToInt16Array2,
data: []testdata{
{
input: [2]int16{-32768, 32767},
output: [2]int16{-32768, 32767}},
},
}, {
valuer: NumRangeFromInt32Array2,
scanner: NumRangeToInt32Array2,
data: []testdata{
{
input: [2]int32{-2147483648, 2147483647},
output: [2]int32{-2147483648, 2147483647}},
},
}, {
valuer: NumRangeFromInt64Array2,
scanner: NumRangeToInt64Array2,
data: []testdata{
{
input: [2]int64{-9223372036854775808, 9223372036854775807},
output: [2]int64{-9223372036854775808, 9223372036854775807}},
},
}, {
valuer: NumRangeFromUintArray2,
scanner: NumRangeToUintArray2,
data: []testdata{
{
input: [2]uint{0, 9223372036854775807},
output: [2]uint{0, 9223372036854775807}},
},
}, {
valuer: NumRangeFromUint8Array2,
scanner: NumRangeToUint8Array2,
data: []testdata{
{
input: [2]uint8{0, 255},
output: [2]uint8{0, 255}},
},
}, {
valuer: NumRangeFromUint16Array2,
scanner: NumRangeToUint16Array2,
data: []testdata{
{
input: [2]uint16{0, 65535},
output: [2]uint16{0, 65535}},
},
}, {
valuer: NumRangeFromUint32Array2,
scanner: NumRangeToUint32Array2,
data: []testdata{
{
input: [2]uint32{0, 4294967295},
output: [2]uint32{0, 4294967295}},
},
}, {
valuer: NumRangeFromUint64Array2,
scanner: NumRangeToUint64Array2,
data: []testdata{
{
input: [2]uint64{0, 9223372036854775807},
output: [2]uint64{0, 9223372036854775807}},
},
}, {
valuer: NumRangeFromFloat32Array2,
scanner: NumRangeToFloat32Array2,
data: []testdata{
{
input: [2]float32{-9223372036854775808.0, 922337203685477580.0},
output: [2]float32{-9223372036854775808.0, 922337203685477580.0}},
},
}, {
valuer: NumRangeFromFloat64Array2,
scanner: NumRangeToFloat64Array2,
data: []testdata{
{
input: [2]float64{-9223372036854775808.0, 922337203685477580.0},
output: [2]float64{-9223372036854775808.0, 922337203685477580.0}},
},
}, {
data: []testdata{
{
input: string("[-9223372036854775808,9223372036854775807)"),
output: string(`[-9223372036854775808,9223372036854775807)`)},
},
}, {
data: []testdata{
{
input: []byte("[-9223372036854775808,9223372036854775807)"),
output: []byte(`[-9223372036854775808,9223372036854775807)`)},
},
}}.execute(t, "numrange")
}
|
package runner
import (
"context"
"runtime"
"strings"
"testing"
"encoding/json"
"github.com/pkg/errors"
)
const (
CombinedShScript = "./fixture/combined.sh"
CombinedPowershellScript = "./fixture/combined.ps1"
)
func TestConfigLoadConfig(t *testing.T) {
c := &Runner{}
if err := c.LoadFromFile("./fixture/checks.json"); err != nil {
t.Fatal(err)
}
}
func TestNewRunner(t *testing.T) {
r := NewRunner("master")
if r.role != "master" {
t.Fatalf("expecting role master. Got %s", r.role)
}
r = NewRunner("agent")
if r.role != "agent" {
t.Fatalf("expecting role agent. Got %s", r.role)
}
r = NewRunner("agent_public")
if r.role != "agent" {
t.Fatalf("expecting role agent. Got %s", r.role)
}
}
func TestRun(t *testing.T) {
r := NewRunner("master")
cfg := `
{
"cluster_checks": {
"test_check": {
"cmd": ["CombinedScriptName"],
"timeout": "1s"
}
},
"node_checks": {
"checks": {
"check1": {
"cmd": ["CombinedScriptName"],
"timeout": "1s"
},
"check2": {
"cmd": ["CombinedScriptName"],
"timeout": "1s"
}
},
"prestart": ["check1"],
"poststart": ["check2"]
}
}`
var expectedOutput string
if runtime.GOOS == "windows" {
cfg = strings.Replace(cfg, "CombinedScriptName", CombinedPowershellScript, -1)
expectedOutput = "STDOUT\r\nSTDERR\r\n"
} else {
cfg = strings.Replace(cfg, "CombinedScriptName", CombinedShScript, -1)
expectedOutput = "STDOUT\nSTDERR\n"
}
err := r.Load(strings.NewReader(cfg))
if err != nil {
t.Fatal(err)
}
out, err := r.Cluster(context.TODO(), false)
if err != nil {
t.Fatal(err)
}
if err := validateCheck(out, "test_check", expectedOutput); err != nil {
t.Fatal(err)
}
prestart, err := r.PreStart(context.TODO(), false)
if err != nil {
t.Fatal(err)
}
if err := validateCheck(prestart, "check1", expectedOutput); err != nil {
t.Fatal(err)
}
poststart, err := r.PostStart(context.TODO(), false)
if err != nil {
t.Fatal(err)
}
if err := validateCheck(poststart, "check2", expectedOutput); err != nil {
t.Fatal(err)
}
}
func validateCheck(cr *CombinedResponse, name, output string) error {
if cr.Status() != 0 {
return errors.Errorf("expect exit code 0. Got %d", cr.Status())
}
check, ok := cr.checks[name]
if !ok {
return errors.Errorf("expect check %s", name)
}
if check.output != output {
return errors.Errorf("expect %s. Got %s", output, check.output)
}
return nil
}
func TestList(t *testing.T) {
r := NewRunner("master")
cfg := `
{
"cluster_checks": {
"cluster_check_1": {
"description": "Cluster check 1",
"cmd": ["echo", "cluster_check_1"],
"timeout": "1s"
}
},
"node_checks": {
"checks": {
"node_check_1": {
"description": "Node check 1",
"cmd": ["echo", "node_check_1"],
"timeout": "1s"
},
"node_check_2": {
"description": "Node check 2",
"cmd": ["echo", "node_check_2"],
"timeout": "1s",
"roles": ["master"]
},
"node_check_3": {
"description": "Node check 3",
"cmd": ["echo", "node_check_3"],
"timeout": "1s",
"roles": ["agent"]
}
},
"prestart": ["node_check_1"],
"poststart": ["node_check_2", "node_check_3"]
}
}`
r.Load(strings.NewReader(cfg))
out, err := r.Cluster(context.TODO(), true)
if err != nil {
t.Fatal(err)
}
if err := validateCheckListing(out, "cluster_check_1", "Cluster check 1", "1s", []string{"echo", "cluster_check_1"}); err != nil {
t.Fatal(err)
}
out, err = r.PreStart(context.TODO(), true)
if err != nil {
t.Fatal(err)
}
if err := validateCheckListing(out, "node_check_1", "Node check 1", "1s", []string{"echo", "node_check_1"}); err != nil {
t.Fatal(err)
}
out, err = r.PostStart(context.TODO(), true)
if err != nil {
t.Fatal(err)
}
if err := validateCheckListing(out, "node_check_2", "Node check 2", "1s", []string{"echo", "node_check_2"}); err != nil {
t.Fatal(err)
}
// This runner is for a master, so a check that only runs on agents should not be listed.
unexpectedCheckName := "node_check_3"
if _, ok := out.checks[unexpectedCheckName]; ok {
t.Fatalf("found unexpected check %s", unexpectedCheckName)
}
}
func validateCheckListing(cr *CombinedResponse, name, description, timeout string, cmd []string) error {
check, ok := cr.checks[name]
if !ok {
return errors.Errorf("expect check %s", name)
}
if check.description != description {
return errors.Errorf("expect description %s. Got %s", description, check.description)
}
if check.timeout != timeout {
return errors.Errorf("expect timeout %s. Got %s", timeout, check.timeout)
}
for i := range check.cmd {
if check.cmd[i] != cmd[i] {
return errors.Errorf("expect cmd %s. Got %s", cmd, check.cmd)
}
}
return nil
}
func TestTimeout(t *testing.T) {
r := NewRunner("master")
cfg := `
{
"node_checks": {
"checks": {
"check1": {
"cmd": ["./fixture/combined.sh"],
"timeout": "1s"
},
"check2": {
"cmd": ["./fixture/inf2.sh"],
"timeout": "500ms"
}
},
"poststart": ["check1", "check2"]
}
}`
if runtime.GOOS == "windows" {
t.Skip("TestTimeout was skipped on Windows")
}
err := r.Load(strings.NewReader(cfg))
if err != nil {
t.Fatal(err)
}
out, err := r.PostStart(context.TODO(), false)
if err != nil {
t.Fatal(err)
}
// marshal the check output
mOut, err := json.Marshal(out)
if err != nil {
t.Fatal(err)
}
type expectedOutput struct {
Status int `json:"status"`
Checks map[string]struct {
Output string `json:"output"`
Status int `json:"status"`
} `json:"checks"`
}
var resp expectedOutput
if err := json.Unmarshal(mOut, &resp); err != nil {
t.Fatal(err)
}
expectedErrMsg := "command [./fixture/inf2.sh] exceeded timeout 500ms and was killed"
check2, ok := resp.Checks["check2"]
if !ok {
t.Fatal("check2 not found in response")
}
if check2.Status != statusUnknown {
t.Fatalf("expect check2 status %d. Got %d", statusUnknown, check2.Status)
}
if check2.Output != expectedErrMsg {
t.Fatalf("expect output %s. Got %s", expectedErrMsg, check2.Output)
}
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2020-05-12 11:57
# @File : pdf.go
# @Description :
# @Attention :
*/
package utils
import (
"errors"
"github.com/SebastiaanKlippert/go-wkhtmltopdf"
"io"
"os"
"path/filepath"
)
type PageOperationDecorator interface {
Decorate(options *wkhtmltopdf.PageOptions) *wkhtmltopdf.PageOptions
}
type Html2PdfStreamSourceReq struct {
PdfStoreBasePath string
PdfNewName string
// 彩色还是黑白模式,true 代表黑白
GrayMode bool
PageDecorator func(wkhtmltopdf.PageOptions) wkhtmltopdf.PageOptions
PDFGeneratorDecorator func(generator *wkhtmltopdf.PDFGenerator)
}
type Html2PdfPureSourceReq struct {
Html2PdfStreamSourceReq
HtmlSource string
}
type Html2PdfPureReaderReq struct {
Html2PdfStreamSourceReq
Reader io.Reader
}
type Html2PdfResp struct {
PdfStorePath string
}
func NewConfigableHtml2PdfReq(source string) Html2PdfPureSourceReq {
r := Html2PdfPureSourceReq{}
r.HtmlSource = source
return r
}
func NewLocalHtml2PdfPureSourceReq(source string, pdfStorePath, newName string) Html2PdfPureSourceReq {
return newPureSourceReq(source, pdfStorePath, newName)
}
func newPureSourceReq(source string, pdfStorePath, newName string) Html2PdfPureSourceReq {
r := Html2PdfPureSourceReq{}
r.HtmlSource = source
r.PageDecorator = func(options wkhtmltopdf.PageOptions) wkhtmltopdf.PageOptions {
options.FooterRight.Set("[PAGE]")
options.FooterFontSize.Set(10)
options.Zoom.Set(0.95)
return options
}
r.PDFGeneratorDecorator = func(pdfg *wkhtmltopdf.PDFGenerator) {
// Set global options
pdfg.Dpi.Set(300)
pdfg.Orientation.Set(wkhtmltopdf.OrientationLandscape)
pdfg.Grayscale.Set(r.GrayMode)
}
r.PdfNewName = newName
r.PdfStoreBasePath = pdfStorePath
return r
}
func NewRemotetHtml2PdfPureSourceReq(source string, pdfStorePath, newName string) Html2PdfPureSourceReq {
return newPureSourceReq(source, pdfStorePath, newName)
}
func Html2PdfByReader(req Html2PdfPureReaderReq) (Html2PdfResp, error) {
var (
result Html2PdfResp
)
pdfg, err := wkhtmltopdf.NewPDFGenerator()
if err != nil {
return result, err
}
req.PDFGeneratorDecorator(pdfg)
page := wkhtmltopdf.NewPageReader(req.Reader)
page.PageOptions = req.PageDecorator(page.PageOptions)
pdfg.AddPage(page)
err = pdfg.Create()
if err != nil {
return result, err
}
filePath := req.PdfStoreBasePath + string(filepath.Separator)
if !IsFileOrDirExists(filePath) {
if err := CreateMultiFileDirs(filePath); nil != err {
return result, errors.New("创建文件夹失败:" + err.Error())
}
}
filePath += req.PdfNewName + ".pdf"
// Write buffer contents to file on disk
err = pdfg.WriteFile(filePath)
if err != nil {
return result, err
}
result.PdfStorePath = filePath
return result, nil
}
func Html2PdfBySource(req Html2PdfPureSourceReq) (Html2PdfResp, error) {
var (
result Html2PdfResp
)
pdfg, err := wkhtmltopdf.NewPDFGenerator()
if err != nil {
return result, err
}
req.PDFGeneratorDecorator(pdfg)
page := wkhtmltopdf.NewPage(req.HtmlSource)
page.PageOptions = req.PageDecorator(page.PageOptions)
pdfg.AddPage(page)
err = pdfg.Create()
if err != nil {
return result, err
}
filePath := req.PdfStoreBasePath + string(filepath.Separator)
if !IsFileOrDirExists(filePath) {
if err := CreateMultiFileDirs(filePath); nil != err {
return result, errors.New("创建文件夹失败:" + err.Error())
}
}
filePath += string(filepath.Separator) + req.PdfNewName + ".pdf"
if IsFileOrDirExists(filePath) {
os.Remove(filePath)
}
// Write buffer contents to file on disk
err = pdfg.WriteFile(filePath)
if err != nil {
return result, err
}
result.PdfStorePath = filePath
return result, nil
}
|
//aws_status_vpc.go
package main
import (
"fmt"
)
/*func aws_status_vpc(region string, environment string) {
fmt.Println("AWS vpc Status in region: " + region + " for environment: " + environment)
}*/
func aws_status_vpc(region string,environment string) {
if check_aws_initialized() == true {
fmt.Println( get_environment_state(environment,region) )
} else {
fmt.Println("Provider AWS Not Initialized")
}
}
|
package main_test
import (
"fmt"
"strconv"
"strings"
"testing"
"time"
)
func TestReplace(t *testing.T) {
str := "BrokerAPIDeployment$TIMESTAMP$"
nowStr := strconv.Itoa(int(time.Now().Unix()))
replacedStr := strings.ReplaceAll(str, "$TIMESTAMP$", nowStr)
fmt.Println(replacedStr)
}
|
package account
import "context"
type Customer struct {
ID, Email, Password, Phone string
}
type Repository interface {
CreateCustomer(ctx context.Context, customer Customer) error
}
|
package main
import (
"context"
"fmt"
"net/http"
"strings"
_ "net/http/pprof"
"github.com/ephraimkunz/go-trending"
"github.com/google/go-github/github"
"golang.org/x/oauth2"
"google.golang.org/appengine/log"
"google.golang.org/appengine/urlfetch"
)
const (
SummaryIntent = "summary_intent"
TrendingReposIntent = "trending_repos_intent"
NotificationsIntent = "notifications_intent"
AssignedIssuesIntent = "assigned_issues_intent"
defaultTrendingRepos = 5
)
// Create new types so we can make them conform to FulfillmentBuilder
type GithubNotifications []*github.Notification
type GithubIssues []*github.Issue
type ProfileSummary struct {
user *github.User
}
type FulfillmentReq struct {
OriginalRequest OriginalReq `json:"originalRequest,omitempty"`
Result ResultReq `json:"result,omitempty"`
}
type ResultReq struct {
Action string `json:"action,omitempty"`
Parameters ParametersReq `json:"parameters,omitempty"`
}
type ParametersReq struct {
Number string `json:"number,omitempty"`
Lang string `json:"lang,omitempty"`
}
type OriginalReq struct {
Data DataReq `json:"data,omitempty"`
}
type DataReq struct {
User UserReq `json:"user,omitempty"`
}
type FulfillmentResp struct {
Speech string `json:"speech,omitempty"`
DisplayText string `json:"displayText,omitempty"`
}
type UserReq struct {
LastSeen string `json:"lastSeen,omitempty"`
AccessToken string `json:"accessToken,omitempty"`
Locale string `json:"locale,omitempty"`
UserId string `json:"userId,omitempty"`
}
type Trending struct {
text, speech string
}
type FulfillmentBuilder interface {
buildFulfillment(ctx context.Context) *FulfillmentResp
}
func minInt(x, y int) int {
if x < y {
return x
}
return y
}
func extractLang(client *http.Client, lang string) string {
trend := trending.NewTrendingWithClient(client)
langs, err := trend.GetLanguages()
if err != nil {
return ""
}
if lang == "" {
return ""
}
for _, trendLang := range langs {
if strings.ToLower(trendLang.Name) == strings.ToLower(lang) {
return trendLang.URLName
}
}
return ""
}
func debug(ctx context.Context, data []byte, err error) {
if err == nil {
log.Debugf(ctx, "Request: %s", string(data))
} else {
log.Debugf(ctx, err.Error())
}
}
// Count may be nil if the user didn't specify how many. Give them the default value.
func getTrending(ctx context.Context, client *http.Client, count *int, lang string) (FulfillmentBuilder, error) {
projects, err := get(ctx, lang)
if err != nil {
return nil, err
}
var projectText, projectSpeech string
var maxTrending int
if count == nil {
maxTrending = defaultTrendingRepos
} else {
maxTrending = *count
}
if lang != "" {
projectSpeech = fmt.Sprintf("<p>Here are the top %d trending repositories for %s:</p>", minInt(len(projects.Data), maxTrending), lang)
} else {
projectSpeech = fmt.Sprintf("<p>Here are the top %d trending repositories:</p>", minInt(len(projects.Data), maxTrending))
}
for index, project := range projects.Data {
if index >= maxTrending {
break
}
projectSpeech += fmt.Sprintf("<p>#%d. %s by %s: %s</p>", index+1, project.RepositoryName, project.Owner, project.Description)
projectText += fmt.Sprintf("\n#%d. %s by %s: %s", index+1, project.RepositoryName, project.Owner, project.Description)
}
return &Trending{projectText, projectSpeech}, nil
}
func (sum *ProfileSummary) buildFulfillment(ctx context.Context) *FulfillmentResp {
summary := fmt.Sprintf(
"Hello %s. You currently have %d public repos, "+
"%d private repos, and you own %d of these private repos."+
"You have %d followers and are following %d people.",
sum.user.GetName(), sum.user.GetPublicRepos(), sum.user.GetTotalPrivateRepos(),
sum.user.GetOwnedPrivateRepos(), sum.user.GetFollowers(), sum.user.GetFollowing())
resp := &FulfillmentResp{Speech: "<speak>" + summary + "</speak>", DisplayText: summary}
log.Debugf(ctx, "Built fulfillment with string: %s", summary)
return resp
}
func (trending *Trending) buildFulfillment(ctx context.Context) *FulfillmentResp {
resp := &FulfillmentResp{Speech: "<speak>" + trending.speech + "</speak>", DisplayText: trending.text}
log.Debugf(ctx, "Built fulfillment with string: %s", trending.speech)
return resp
}
func (not *GithubNotifications) buildFulfillment(ctx context.Context) *FulfillmentResp {
var text, speech string
if len([]*github.Notification(*not)) > 0 {
speech = "<speak><p>Here are your unread notifications:</p>"
} else {
speech = "<speak>You have no unread notifications"
}
for i, notification := range []*github.Notification(*not) {
text += fmt.Sprintf("\n#%d: This notification is on an %s and says: %s", i+1, notification.Subject.GetType(), notification.Subject.GetTitle())
speech += fmt.Sprintf("<p>#%d: This notification is on an %s and says: %s</p>", i+1, notification.Subject.GetType(), notification.Subject.GetTitle())
}
return &FulfillmentResp{speech + "</speak>", text}
}
func (iss *GithubIssues) buildFulfillment(ctx context.Context) *FulfillmentResp {
var text, speech string
if len([]*github.Issue(*iss)) > 0 {
speech = fmt.Sprintf("<speak><p>Here are the open issues assigned to you:</p>")
} else {
speech = fmt.Sprintf("<speak>You have no open issues assigned to you.")
}
for i, issue := range []*github.Issue(*iss) {
text += fmt.Sprintf("\n#%d: Opened in %s on %s by %s: %s", i+1, issue.Repository.GetName(), issue.GetCreatedAt().Format("Monday, January 2"), issue.User.GetLogin(), issue.GetTitle())
speech += fmt.Sprintf("<p>#%d: Opened in %s on %s by %s: %s</p>", i+1, issue.Repository.GetName(), issue.GetCreatedAt().Format("Monday, January 2"), issue.User.GetLogin(), issue.GetTitle())
}
return &FulfillmentResp{speech + "</speak>", text}
}
func getNotifications(ctx context.Context, accessToken string) (FulfillmentBuilder, error) {
client := createGithubClient(ctx, accessToken)
notifications, _, err := client.Activity.ListNotifications(ctx, nil)
if err != nil {
return nil, err
}
ghNot := GithubNotifications(notifications)
return &ghNot, nil
}
func getAssignedIssues(ctx context.Context, accessToken string) (FulfillmentBuilder, error) {
client := createGithubClient(ctx, accessToken)
issues, _, err := client.Issues.List(ctx, true, nil)
if err != nil {
return nil, err
}
iss := GithubIssues(issues)
return &iss, nil
}
func getProfileSummary(ctx context.Context, accessToken string) (FulfillmentBuilder, error) {
client := createGithubClient(ctx, accessToken)
user, _, err := client.Users.Get(ctx, "") // Get authenticated user
if err != nil {
return nil, err
}
summary := &ProfileSummary{user} // Type conversion to custom type so we can use buildFulfillment method
return summary, nil
}
func createGithubClient(ctx context.Context, accessToken string) *github.Client {
ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: accessToken})
authClient := &http.Client{
Transport: &oauth2.Transport{
Source: oauth2.ReuseTokenSource(nil, ts),
Base: &urlfetch.Transport{Context: ctx},
},
}
client := github.NewClient(authClient)
return client
}
|
// Copyright 2020 Ant Group. All rights reserved.
//
// SPDX-License-Identifier: Apache-2.0
package checker
import (
"context"
"os"
"path/filepath"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/dragonflyoss/image-service/contrib/nydusify/pkg/checker/rule"
"github.com/dragonflyoss/image-service/contrib/nydusify/pkg/checker/tool"
"github.com/dragonflyoss/image-service/contrib/nydusify/pkg/converter/provider"
"github.com/dragonflyoss/image-service/contrib/nydusify/pkg/parser"
)
// Opt defines Checker options.
// Note: target is the Nydus image reference.
type Opt struct {
WorkDir string
Source string
Target string
SourceInsecure bool
TargetInsecure bool
MultiPlatform bool
NydusImagePath string
NydusdPath string
BackendType string
BackendConfig string
ExpectedArch string
}
// Checker validates Nydus image manifest, bootstrap and mounts filesystem
// by Nydusd to compare file metadata and data with OCI image.
type Checker struct {
Opt
sourceParser *parser.Parser
targetParser *parser.Parser
}
// New creates Checker instance, target is the Nydus image reference.
func New(opt Opt) (*Checker, error) {
// TODO: support source and target resolver
targetRemote, err := provider.DefaultRemote(opt.Target, opt.TargetInsecure)
if err != nil {
return nil, errors.Wrap(err, "Init target image parser")
}
targetParser, err := parser.New(targetRemote, opt.ExpectedArch)
if err != nil {
return nil, errors.Wrap(err, "failed to create parser")
}
var sourceParser *parser.Parser
if opt.Source != "" {
sourceRemote, err := provider.DefaultRemote(opt.Source, opt.SourceInsecure)
if err != nil {
return nil, errors.Wrap(err, "Init source image parser")
}
sourceParser, err = parser.New(sourceRemote, opt.ExpectedArch)
if sourceParser == nil {
return nil, errors.Wrap(err, "failed to create parser")
}
}
checker := &Checker{
Opt: opt,
sourceParser: sourceParser,
targetParser: targetParser,
}
return checker, nil
}
// Check checks Nydus image, and outputs image information to work
// directory, the check workflow is composed of various rules.
func (checker *Checker) Check(ctx context.Context) error {
targetParsed, err := checker.targetParser.Parse(ctx)
if err != nil {
return errors.Wrap(err, "parse Nydus image")
}
var sourceParsed *parser.Parsed
if checker.sourceParser != nil {
sourceParsed, err = checker.sourceParser.Parse(ctx)
if err != nil {
return errors.Wrap(err, "parse source image")
}
} else {
sourceParsed = targetParsed
}
if err := os.RemoveAll(checker.WorkDir); err != nil {
return errors.Wrap(err, "clean up work directory")
}
if err := os.MkdirAll(filepath.Join(checker.WorkDir, "fs"), 0755); err != nil {
return errors.Wrap(err, "create work directory")
}
if err := checker.Output(ctx, sourceParsed, targetParsed, checker.WorkDir); err != nil {
return errors.Wrap(err, "output image information")
}
rules := []rule.Rule{
&rule.ManifestRule{
SourceParsed: sourceParsed,
TargetParsed: targetParsed,
MultiPlatform: checker.MultiPlatform,
BackendType: checker.BackendType,
ExpectedArch: checker.ExpectedArch,
},
&rule.BootstrapRule{
Parsed: targetParsed,
NydusImagePath: checker.NydusImagePath,
BootstrapPath: filepath.Join(checker.WorkDir, "nydus_bootstrap"),
DebugOutputPath: filepath.Join(checker.WorkDir, "nydus_bootstrap_debug.json"),
},
&rule.FilesystemRule{
Source: checker.Source,
SourceMountPath: filepath.Join(checker.WorkDir, "fs/source_mounted"),
NydusdConfig: tool.NydusdConfig{
NydusdPath: checker.NydusdPath,
BackendType: checker.BackendType,
BackendConfig: checker.BackendConfig,
BootstrapPath: filepath.Join(checker.WorkDir, "nydus_bootstrap"),
ConfigPath: filepath.Join(checker.WorkDir, "fs/nydusd_config.json"),
BlobCacheDir: filepath.Join(checker.WorkDir, "fs/nydus_blobs"),
MountPath: filepath.Join(checker.WorkDir, "fs/nydus_mounted"),
APISockPath: filepath.Join(checker.WorkDir, "fs/nydus_api.sock"),
},
},
}
for _, rule := range rules {
if err := rule.Validate(); err != nil {
return errors.Wrapf(err, "validate rule %s", rule.Name())
}
}
logrus.Infof("Verified Nydus image %s", checker.targetParser.Remote.Ref)
return nil
}
|
/*
Copyright 2021 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deprecated_test
import (
"context"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/internal/deprecated"
"github.com/tektoncd/pipeline/test/diff"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
logtesting "knative.dev/pkg/logging/testing"
)
const (
featureFlagDisableHomeEnvKey = "disable-home-env-overwrite"
featureFlagDisableWorkingDirKey = "disable-working-directory-overwrite"
)
func TestNewOverrideWorkingDirTransformer(t *testing.T) {
for _, tc := range []struct {
description string
configMap *corev1.ConfigMap
podspec corev1.PodSpec
expected corev1.PodSpec
}{{
description: "Default behaviour: A missing disable-working-directory-overwrite should mean true, so no overwrite",
configMap: &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName(), Namespace: "tekton-pipelines"},
Data: map[string]string{},
},
podspec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "step-bar",
Image: "foo",
}, {
Name: "sidecar-bar",
Image: "foo",
}, {
Name: "step-bar-wg",
Image: "foo",
WorkingDir: "/foobar",
}},
},
expected: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "step-bar",
Image: "foo",
}, {
Name: "sidecar-bar",
Image: "foo",
}, {
Name: "step-bar-wg",
Image: "foo",
WorkingDir: "/foobar",
}},
},
}, {
description: "Setting disable-working-directory-overwrite to false should result in we don't disable the behavior, so there should be an overwrite",
configMap: &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName(), Namespace: "tekton-pipelines"},
Data: map[string]string{
featureFlagDisableWorkingDirKey: "false",
},
},
podspec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "step-bar",
Image: "foo",
}, {
Name: "sidecar-bar",
Image: "foo",
}, {
Name: "step-bar-wg",
Image: "foo",
WorkingDir: "/foobar",
}},
},
expected: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "step-bar",
Image: "foo",
WorkingDir: "/workspace",
}, {
Name: "sidecar-bar",
Image: "foo",
}, {
Name: "step-bar-wg",
Image: "foo",
WorkingDir: "/foobar",
}},
},
}, {
description: "Setting disable-working-directory-overwrite to true should disable the overwrite, so no overwrite",
configMap: &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName(), Namespace: "tekton-pipelines"},
Data: map[string]string{
featureFlagDisableWorkingDirKey: "true",
},
},
podspec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "step-bar",
Image: "foo",
}, {
Name: "sidecar-bar",
Image: "foo",
}, {
Name: "step-bar-wg",
Image: "foo",
WorkingDir: "/foobar",
}},
},
expected: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "step-bar",
Image: "foo",
}, {
Name: "sidecar-bar",
Image: "foo",
}, {
Name: "step-bar-wg",
Image: "foo",
WorkingDir: "/foobar",
}},
},
}} {
t.Run(tc.description, func(t *testing.T) {
store := config.NewStore(logtesting.TestLogger(t))
store.OnConfigChanged(tc.configMap)
ctx := store.ToContext(context.Background())
f := deprecated.NewOverrideWorkingDirTransformer(ctx)
got, err := f(&corev1.Pod{Spec: tc.podspec})
if err != nil {
t.Fatalf("Transformer failed: %v", err)
}
if d := cmp.Diff(tc.expected, got.Spec); d != "" {
t.Errorf("Diff pod: %s", diff.PrintWantGot(d))
}
})
}
}
func TestShouldOverrideHomeEnv(t *testing.T) {
for _, tc := range []struct {
description string
configMap *corev1.ConfigMap
podspec corev1.PodSpec
expected corev1.PodSpec
}{{
description: "Default behaviour: A missing disable-home-env-overwrite flag should result in no overwrite",
configMap: &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName(), Namespace: "tekton-pipelines"},
Data: map[string]string{},
},
podspec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "step-bar",
Image: "foo",
Env: []corev1.EnvVar{{
Name: "HOME",
Value: "/home",
}},
}, {
Name: "step-baz",
Image: "foo",
Env: []corev1.EnvVar{{
Name: "FOO",
Value: "bar",
}},
}},
},
expected: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "step-bar",
Image: "foo",
Env: []corev1.EnvVar{{
Name: "HOME",
Value: "/home",
}},
}, {
Name: "step-baz",
Image: "foo",
Env: []corev1.EnvVar{{
Name: "FOO",
Value: "bar",
}},
}},
},
}, {
description: "Setting disable-home-env-overwrite to false should result in an overwrite",
configMap: &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName(), Namespace: "tekton-pipelines"},
Data: map[string]string{
featureFlagDisableHomeEnvKey: "false",
},
},
podspec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "step-bar",
Image: "foo",
Env: []corev1.EnvVar{{
Name: "HOME",
Value: "/home",
}},
}, {
Name: "step-baz",
Image: "foo",
Env: []corev1.EnvVar{{
Name: "FOO",
Value: "bar",
}},
}},
},
expected: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "step-bar",
Image: "foo",
Env: []corev1.EnvVar{{
Name: "HOME",
Value: "/home",
}},
}, {
Name: "step-baz",
Image: "foo",
Env: []corev1.EnvVar{{
Name: "FOO",
Value: "bar",
}, {
Name: "HOME",
Value: "/tekton/home",
}},
}},
},
}, {
description: "Setting disable-home-env-overwrite to true should result in no overwrite",
configMap: &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName(), Namespace: "tekton-pipelines"},
Data: map[string]string{
featureFlagDisableHomeEnvKey: "true",
},
},
podspec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "step-bar",
Image: "foo",
Env: []corev1.EnvVar{{
Name: "HOME",
Value: "/home",
}},
}, {
Name: "step-baz",
Image: "foo",
Env: []corev1.EnvVar{{
Name: "FOO",
Value: "bar",
}},
}},
},
expected: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "step-bar",
Image: "foo",
Env: []corev1.EnvVar{{
Name: "HOME",
Value: "/home",
}},
}, {
Name: "step-baz",
Image: "foo",
Env: []corev1.EnvVar{{
Name: "FOO",
Value: "bar",
}},
}},
},
}} {
t.Run(tc.description, func(t *testing.T) {
store := config.NewStore(logtesting.TestLogger(t))
store.OnConfigChanged(tc.configMap)
ctx := store.ToContext(context.Background())
f := deprecated.NewOverrideHomeTransformer(ctx)
got, err := f(&corev1.Pod{Spec: tc.podspec})
if err != nil {
t.Fatalf("Transformer failed: %v", err)
}
if d := cmp.Diff(tc.expected, got.Spec); d != "" {
t.Errorf("Diff pod: %s", diff.PrintWantGot(d))
}
})
}
}
|
package validatingroundtripper
import (
"fmt"
"net/http"
"os"
"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/install"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/client-go/rest"
)
type validatingRoundTripper struct {
delegate http.RoundTripper
}
func (rt *validatingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
if req.Method == "POST" {
b, err := req.GetBody()
if err != nil {
panic(err)
}
dec := yaml.NewYAMLOrJSONDecoder(b, 10)
unstructuredObject := &unstructured.Unstructured{}
if err := dec.Decode(unstructuredObject); err != nil {
panic(fmt.Errorf("error decoding object to an unstructured object: %w", err))
}
gvk := unstructuredObject.GroupVersionKind()
if gvk.Kind != "Event" {
if labels := unstructuredObject.GetLabels(); labels[install.OLMManagedLabelKey] != install.OLMManagedLabelValue {
panic(fmt.Errorf("%s.%s/%v %s/%s does not have labels[%s]=%s", gvk.Kind, gvk.Group, gvk.Version, unstructuredObject.GetNamespace(), unstructuredObject.GetName(), install.OLMManagedLabelKey, install.OLMManagedLabelValue))
}
}
}
return rt.delegate.RoundTrip(req)
}
var _ http.RoundTripper = (*validatingRoundTripper)(nil)
// Wrap is meant to be used in developer environments and CI to make it easy to find places
// where we accidentally create Kubernetes objects without our management label.
func Wrap(cfg *rest.Config) *rest.Config {
if _, set := os.LookupEnv("CI"); !set {
return cfg
}
cfgCopy := *cfg
cfgCopy.Wrap(func(rt http.RoundTripper) http.RoundTripper {
return &validatingRoundTripper{delegate: rt}
})
return &cfgCopy
}
|
package commonutils
import (
consulapi "github.com/hashicorp/consul/api"
log "github.com/sirupsen/logrus"
"strconv"
)
func GetConsulApiClient(host string, port int) (*consulapi.Client, error) {
log.WithFields(log.Fields{"package": "commonutils","function": "GetConsulApiClient",}).Debugf("Getting Consul API client for -> Consul.Host: %s Consul.Port: %s", host, strconv.Itoa(port))
config := consulapi.DefaultConfig()
config.Address = host+":"+strconv.Itoa(port)
return consulapi.NewClient(config)
}
func CreateKVConsul(key string, val []byte, client *consulapi.Client) error {
log.WithFields(log.Fields{"package": "commonutils","function": "CreateKVConsul",}).Debugf("Key: %s Value: %s", key, string(val))
kv := client.KV()
// PUT a new KV pair
p := &consulapi.KVPair{Key: key, Value: val}
_, err := kv.Put(p, nil)
if err != nil {
log.WithFields(log.Fields{"package": "commonutils","function": "CreateKVConsul",}).Errorf("Error creating KV err: %s", err)
return err
}
return nil
}
func UpdateKVTreeConsul(tree string, kvpair []*consulapi.KVPair, client *consulapi.Client) (bool, error){
kv := client.KV()
kvtxops := append(make([]*consulapi.KVTxnOp, 0), &consulapi.KVTxnOp{
Verb: consulapi.KVDeleteTree,
Key: tree,
})
for _, kv := range kvpair{
log.WithFields(log.Fields{"package": "commonutils","function": "UpdateKVTreeConsul",}).Debugf("Key: %s Value: %s", kv.Key, string(kv.Value))
kvtxops = append(kvtxops, &consulapi.KVTxnOp{
Verb: consulapi.KVSet,
Key: kv.Key,
Value: kv.Value,
})
}
ok, _, _, err := kv.Txn(kvtxops, nil)
if err != nil{
log.WithFields(log.Fields{"package": "commonutils","function": "UpdateKVTreeConsul",}).Errorf("Error creating bulk KV Txn err: %s", err)
}
return ok, err
}
|
/*
Write a program to find all the prime factors of a given number.
The program must return an array containing all the prime factors, sorted in ascending order.
Remember that 1 is neither prime nor composite and should not be included in your output array.
Examples
primeFactorize(25) ➞ [5, 5]
primeFactorize(19) ➞ [19]
primeFactorize(77) ➞ [7, 11]
Notes
Output array must be sorted in ascending order
The only positive integer which is neither prime nor composite is 1. Return an empty array if 1 is the input.
*/
package main
func main() {
eq(factorize(25), []uint{5, 5})
eq(factorize(19), []uint{19})
eq(factorize(77), []uint{7, 11})
eq(factorize(32), []uint{2, 2, 2, 2, 2})
eq(factorize(17), []uint{17})
eq(factorize(35), []uint{5, 7})
eq(factorize(2), []uint{2})
ne(factorize(2), []uint{1, 2})
ne(factorize(1), []uint{1})
ne(factorize(35), []uint{7, 5})
eq(factorize(2591), []uint{2591})
eq(factorize(2532), []uint{2, 2, 3, 211})
}
func factorize(n uint) []uint {
var r []uint
loop:
for n > 1 {
for i := uint(2); i <= n; i++ {
if n%i == 0 {
n /= i
r = append(r, i)
continue loop
}
}
}
return r
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func eq(a, b []uint) {
assert(len(a) == len(b))
for i := range a {
assert(a[i] == b[i])
}
}
func ne(a, b []uint) {
if len(a) != len(b) {
return
}
for i := range a {
if a[i] != b[i] {
return
}
}
assert(false)
}
|
// SPDX-License-Identifier: Apache-2.0
// Copyright The Linux Foundation
package main
// based on quickstart from https://developers.google.com/sheets/api/quickstart/go
// and code from https://github.com/gsuitedevs/go-samples/blob/master/sheets/quickstart/quickstart.go
// with the following copyright and license notice:
//
// Copyright Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"path/filepath"
"time"
"github.com/spdx/tools-golang/v0/tvsaver"
"github.com/swinslow/cncf-exceptions-maker/pkg/exceptionmaker"
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/api/sheets/v4"
)
// Retrieve a token, saves the token, then returns the generated client.
func getClient(config *oauth2.Config) *http.Client {
// The file token.json stores the user's access and refresh tokens, and is
// created automatically when the authorization flow completes for the first
// time.
tokFile := "token.json"
tok, err := tokenFromFile(tokFile)
if err != nil {
tok = getTokenFromWeb(config)
saveToken(tokFile, tok)
}
return config.Client(context.Background(), tok)
}
// Request a token from the web, then returns the retrieved token.
func getTokenFromWeb(config *oauth2.Config) *oauth2.Token {
authURL := config.AuthCodeURL("state-token", oauth2.AccessTypeOffline)
fmt.Printf("Go to the following link in your browser then type the "+
"authorization code: \n%v\n", authURL)
var authCode string
if _, err := fmt.Scan(&authCode); err != nil {
log.Fatalf("Unable to read authorization code: %v", err)
}
tok, err := config.Exchange(context.TODO(), authCode)
if err != nil {
log.Fatalf("Unable to retrieve token from web: %v", err)
}
return tok
}
// Retrieves a token from a local file.
func tokenFromFile(file string) (*oauth2.Token, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close()
tok := &oauth2.Token{}
err = json.NewDecoder(f).Decode(tok)
return tok, err
}
// Saves a token to a file path.
func saveToken(path string, token *oauth2.Token) {
fmt.Printf("Saving credential file to: %s\n", path)
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
log.Fatalf("Unable to cache oauth token: %v", err)
}
defer f.Close()
json.NewEncoder(f).Encode(token)
}
// ExceptionConfig holds the configuration options for the exception maker.
type ExceptionConfig struct {
SpreadsheetID string `json:"spreadsheetId"`
}
func loadConfig(filename string) (*ExceptionConfig, error) {
js, err := ioutil.ReadFile(filename)
if err != nil {
return nil, fmt.Errorf("error reading %s: %v", filename, err)
}
cfg := ExceptionConfig{}
err = json.Unmarshal(js, &cfg)
if err != nil {
return nil, fmt.Errorf("error unmarshalling from JSON: %v", err)
}
return &cfg, nil
}
func main() {
home, err := os.UserHomeDir()
if err != nil {
log.Fatalf("Unable to get user home directory: %v", err)
}
b, err := ioutil.ReadFile(filepath.Join(home, ".google-sheets-cncf-exceptions-credentials.json"))
if err != nil {
log.Fatalf("Unable to read client secret file: %v", err)
}
// If modifying these scopes, delete your previously saved token.json.
config, err := google.ConfigFromJSON(b, "https://www.googleapis.com/auth/spreadsheets.readonly")
if err != nil {
log.Fatalf("Unable to parse client secret file to config: %v", err)
}
client := getClient(config)
srv, err := sheets.New(client)
if err != nil {
log.Fatalf("Unable to retrieve Sheets client: %v", err)
}
cfg, err := loadConfig(filepath.Join(home, ".cncf-exceptions-config"))
readRange := "Approved!A2:I"
resp, err := srv.Spreadsheets.Values.Get(cfg.SpreadsheetID, readRange).Do()
if err != nil {
log.Fatalf("Unable to retrieve data from sheet: %v", err)
}
doc := exceptionmaker.MakeDocument()
if len(resp.Values) == 0 {
fmt.Println("No data found.")
} else {
rowNum := 2
for _, row := range resp.Values {
// check whether this row is complete
if len(row) < 9 {
log.Printf("==> INCOMPLETE ROW (%d): %v\n", len(row), row)
continue
}
pkg, err := exceptionmaker.MakePackageFromRow(row, rowNum)
if err != nil {
log.Fatalf("Unable to convert rowNum %d data to SPDX package: %v\n", rowNum, err)
}
doc.Packages = append(doc.Packages, pkg)
rowNum++
}
}
// and write to disk
fileOut := fmt.Sprintf("cncf-exceptions-%s.spdx", time.Now().Format("2006-01-02"))
w, err := os.Create(fileOut)
if err != nil {
log.Fatalf("Error while opening %v for writing: %v", fileOut, err)
}
defer w.Close()
err = tvsaver.Save2_1(doc, w)
if err != nil {
log.Fatalf("Error while saving %v: %v", fileOut, err)
}
fmt.Printf("Saved exceptions list as SPDX to %s\n", fileOut)
subsets := exceptionmaker.ConvertSPDXToJSONPackageSubset(doc)
jsonStr, err := json.MarshalIndent(subsets, "", " ")
if err != nil {
log.Fatalf("Error while marshalling to JSON: %v", err)
}
jsonOut := fmt.Sprintf("cncf-exceptions-%s.json", time.Now().Format("2006-01-02"))
j, err := os.Create(jsonOut)
if err != nil {
log.Fatalf("Error while opening %v for writing: %v", fileOut, err)
}
defer j.Close()
_, err = j.Write(jsonStr)
if err != nil {
log.Fatalf("Error while saving %v: %v", jsonOut, err)
}
fmt.Printf("Saved exceptions list as JSON to %s\n", jsonOut)
}
|
package element
// note: not thourougly tested on moduli != .NoCarry
const FromMont = `
// FromMont converts z in place (i.e. mutates) from Montgomery to regular representation
// sets and returns z = z * 1
{{- if eq .IfaceName .ElementName}}
func (z *{{.ElementName}}) FromMont() *{{.ElementName}} {
{{else}}
func (z *{{.ElementName}}) FromMont() {{.IfaceName}} {
{{end}}
fromMont{{.ElementName}}(z)
return z
}
`
const Conv = `
// ToMont converts z to Montgomery form
// sets and returns z = z * r^2
{{- if eq .IfaceName .ElementName}}
func (z *{{.ElementName}}) ToMont() *{{.ElementName}} {
{{else}}
func (z *{{.ElementName}}) ToMont() {{.IfaceName}} {
{{end}}
var rSquare = {{.ElementName}}{
{{- range $i := .RSquare}}
{{$i}},{{end}}
}
mulAssign{{.ElementName}}(z, &rSquare)
return z
}
// ToRegular returns z in regular form (doesn't mutate z)
{{- if eq .IfaceName .ElementName}}
func (z {{.ElementName}}) ToRegular() {{.ElementName}} {
return *z.FromMont()
{{else}}
func (z {{.ElementName}}) ToRegular() {{.IfaceName}} {
return z.FromMont()
{{end}}
}
// String returns the string form of an {{.ElementName}} in Montgomery form
func (z *{{.ElementName}}) String() string {
var _z big.Int
return z.ToBigIntRegular(&_z).String()
}
// ToByte returns the byte form of an {{.ElementName}} in Regular form
func (z {{.ElementName}}) ToByte() []byte {
{{- if eq .IfaceName .ElementName}}
t := z.ToRegular()
{{else}}
t := z.ToRegular().(*{{.ElementName}})
{{end}}
var _z []byte
_z1 := make([]byte,8)
{{- range $i := .NbWordsIndexesFull}}
binary.LittleEndian.PutUint64(_z1, t[{{$i}}])
_z = append(_z,_z1...)
{{- end}}
return _z
}
// FromByte returns the byte form of an {{.ElementName}} in Regular form (mutates z)
{{- if eq .IfaceName .ElementName}}
func (z *{{.ElementName}}) FromByte(x []byte) *{{.ElementName}} {
{{else}}
func (z *{{.ElementName}}) FromByte(x []byte) {{.IfaceName}} {
{{end}}
{{- range $i := .NbWordsIndexesFull}}
z[{{$i}}] = binary.LittleEndian.Uint64(x[{{$i}}*8:({{$i}}+1)*8])
{{- end}}
return z.ToMont()
}
// ToBigInt returns z as a big.Int in Montgomery form
func (z *{{.ElementName}}) ToBigInt(res *big.Int) *big.Int {
if bits.UintSize == 64 {
bits := (*[{{.NbWords}}]big.Word)(unsafe.Pointer(z))
return res.SetBits(bits[:])
} else {
var bits[{{.NbWords}}*2]big.Word
{{- range $i := .NbWordsIndexesFull}}
bits[{{$i}}*2] = big.Word(z[{{$i}}])
bits[{{$i}}*2+1] = big.Word(z[{{$i}}] >> 32)
{{- end }}
return res.SetBits(bits[:])
}
}
// ToBigIntRegular returns z as a big.Int in regular form
func (z {{.ElementName}}) ToBigIntRegular(res *big.Int) *big.Int {
if bits.UintSize == 64 {
z.FromMont()
bits := (*[{{.NbWords}}]big.Word)(unsafe.Pointer(&z))
return res.SetBits(bits[:])
} else {
var bits[{{.NbWords}}*2]big.Word
{{- range $i := .NbWordsIndexesFull}}
bits[{{$i}}*2] = big.Word(z[{{$i}}])
bits[{{$i}}*2+1] = big.Word(z[{{$i}}] >> 32)
{{- end }}
return res.SetBits(bits[:])
}
}
// SetBigInt sets z to v (regular form) and returns z in Montgomery form
{{- if eq .IfaceName .ElementName}}
func (z *{{.ElementName}}) SetBigInt(v *big.Int) *{{.ElementName}} {
{{else}}
func (z *{{.ElementName}}) SetBigInt(v *big.Int) {{.IfaceName}} {
{{end}}
z.SetZero()
zero := big.NewInt(0)
q := {{.ElementName}}Modulus()
// fast path
c := v.Cmp(q)
if c == 0 {
return z
} else if c != 1 && v.Cmp(zero) != -1 {
// v should
vBits := v.Bits()
for i := 0; i < len(vBits); i++ {
z[i] = uint64(vBits[i])
}
return z.ToMont()
}
// copy input
vv := new(big.Int).Set(v)
vv.Mod(v, q)
// v should
vBits := vv.Bits()
if bits.UintSize == 64 {
for i := 0; i < len(vBits); i++ {
z[i] = uint64(vBits[i])
}
} else {
for i := 0; i < len(vBits); i++ {
if i%2 == 0 {
z[i/2] = uint64(vBits[i])
} else {
z[i/2] |= uint64(vBits[i]) << 32
}
}
}
return z.ToMont()
}
// SetString creates a big.Int with s (in base 10) and calls SetBigInt on z
{{- if eq .IfaceName .ElementName}}
func (z *{{.ElementName}}) SetString( s string) *{{.ElementName}} {
{{else}}
func (z *{{.ElementName}}) SetString( s string) {{.IfaceName}} {
{{end}}
x, ok := new(big.Int).SetString(s, 10)
if !ok {
panic("{{.ElementName}}.SetString failed -> can't parse number in base10 into a big.Int")
}
return z.SetBigInt(x)
}
`
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package reader
import (
"time"
"github.com/Shopify/sarama"
"github.com/pingcap/errors"
"github.com/pingcap/log"
pb "github.com/pingcap/tidb/tidb-binlog/proto/go-binlog"
"go.uber.org/zap"
)
// KafkaSeeker seeks offset in kafka topics by given condition
type KafkaSeeker struct {
consumer sarama.Consumer
client sarama.Client
}
// NewKafkaSeeker creates an instance of KafkaSeeker
func NewKafkaSeeker(addr []string, config *sarama.Config) (*KafkaSeeker, error) {
client, err := sarama.NewClient(addr, config)
if err != nil {
return nil, errors.Trace(err)
}
consumer, err := sarama.NewConsumerFromClient(client)
if err != nil {
return nil, errors.Trace(err)
}
s := &KafkaSeeker{
client: client,
consumer: consumer,
}
return s, nil
}
// Close releases resources of KafkaSeeker
func (ks *KafkaSeeker) Close() {
_ = ks.consumer.Close()
_ = ks.client.Close()
}
// Seek seeks the first offset which binlog CommitTs bigger than ts
func (ks *KafkaSeeker) Seek(topic string, ts int64, partitions []int32) (offsets []int64, err error) {
if len(partitions) == 0 {
partitions, err = ks.consumer.Partitions(topic)
if err != nil {
log.Error("get partitions from topic failed", zap.String("topic", topic), zap.Error(err))
return nil, errors.Trace(err)
}
}
offsets, err = ks.seekOffsets(topic, partitions, ts)
if err != nil {
err = errors.Trace(err)
log.Error("seek offsets failed", zap.Error(err))
}
return
}
func (ks *KafkaSeeker) getTSFromMSG(msg *sarama.ConsumerMessage) (ts int64, err error) {
binlog := new(pb.Binlog)
err = binlog.Unmarshal(msg.Value)
if err != nil {
err = errors.Trace(err)
return
}
return binlog.CommitTs, nil
}
// seekOffsets returns all valid offsets in partitions
func (ks *KafkaSeeker) seekOffsets(topic string, partitions []int32, pos int64) ([]int64, error) {
offsets := make([]int64, len(partitions))
for _, partition := range partitions {
start, err := ks.client.GetOffset(topic, partition, sarama.OffsetOldest)
if err != nil {
err = errors.Trace(err)
return nil, err
}
end, err := ks.client.GetOffset(topic, partition, sarama.OffsetNewest)
if err != nil {
err = errors.Trace(err)
return nil, err
}
log.Info("seek offsets in",
zap.String("topic", topic),
zap.Int32("partition", partition),
zap.Int64("start", start),
zap.Int64("end", end),
zap.Int64("target ts", pos))
offset, err := ks.seekOffset(topic, partition, start, end-1, pos)
if err != nil {
err = errors.Trace(err)
return nil, err
}
log.Info("seek offset success", zap.Int64("offset", offset), zap.Int64("target ts", pos))
offsets[partition] = offset
}
return offsets, nil
}
func (ks *KafkaSeeker) seekOffset(topic string, partition int32, start int64, end int64, ts int64) (offset int64, err error) {
startTS, err := ks.getTSAtOffset(topic, partition, start)
if err != nil {
err = errors.Trace(err)
return
}
if ts < startTS {
log.Warn("given ts is smaller than oldest message's ts, some binlogs may lose", zap.Int64("given ts", ts), zap.Int64("oldest ts", startTS))
offset = start
return
} else if ts == startTS {
offset = start + 1
return
}
for start < end {
mid := (end-start)/2 + start
var midTS int64
midTS, err = ks.getTSAtOffset(topic, partition, mid)
if err != nil {
err = errors.Trace(err)
return
}
if midTS <= ts {
start = mid + 1
} else {
end = mid
}
}
var endTS int64
endTS, err = ks.getTSAtOffset(topic, partition, end)
if err != nil {
err = errors.Trace(err)
return
}
if endTS <= ts {
return end + 1, nil
}
return end, nil
}
func (ks *KafkaSeeker) getTSAtOffset(topic string, partition int32, offset int64) (ts int64, err error) {
log.Debug("start consumer on kafka",
zap.String("topic", topic),
zap.Int32("partition", partition),
zap.Int64("offset", offset))
pc, err := ks.consumer.ConsumePartition(topic, partition, offset)
if err != nil {
err = errors.Trace(err)
return
}
defer func() { _ = pc.Close() }()
errorCnt := 0
for {
select {
case msg := <-pc.Messages():
ts, err = ks.getTSFromMSG(msg)
if err == nil {
log.Debug("get ts at offset success",
zap.String("topic", topic),
zap.Int32("partition", partition),
zap.Int64("ts", ts),
zap.Int64("at offset", offset))
}
err = errors.Trace(err)
return
case msg := <-pc.Errors():
err = msg.Err
log.Error("get ts at offset failed",
zap.String("topic", topic),
zap.Int32("partition", partition),
zap.Int64("ts", ts),
zap.Int64("at offset", offset))
time.Sleep(time.Second)
errorCnt++
if errorCnt > 10 {
return
}
case <-time.After(KafkaWaitTimeout):
return 0, errors.Errorf("timeout to consume from kafka, topic:%s, partition:%d, offset:%d", topic, partition, offset)
}
}
}
|
package main
import (
"fmt"
smartling "github.com/Smartling/api-sdk-go"
"github.com/reconquest/hierr-go"
)
func doFilesDelete(
client *smartling.Client,
config Config,
args map[string]interface{},
) error {
var (
project = config.ProjectID
uri = args["<uri>"].(string)
)
var (
err error
files []smartling.File
)
if uri == "-" {
files, err = readFilesFromStdin()
if err != nil {
return err
}
} else {
files, err = globFilesRemote(client, project, uri)
if err != nil {
return err
}
}
if len(files) == 0 {
return NewError(
fmt.Errorf("no files match specified pattern"),
`Check files list on remote server and your pattern according `+
`to help.`,
)
}
for _, file := range files {
err := client.DeleteFile(project, file.FileURI)
if err != nil {
return hierr.Errorf(
err,
`unable to delete file "%s"`,
file.FileURI,
)
}
fmt.Printf("%s deleted\n", file.FileURI)
}
return nil
}
|
// Copyright 2020 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
package wasmproc
import (
"encoding/binary"
"fmt"
"github.com/iotaledger/wasp/packages/kv"
"github.com/iotaledger/wasp/packages/kv/codec"
"github.com/iotaledger/wasp/packages/kv/dict"
"github.com/iotaledger/wasp/packages/vm/wasmhost"
"strings"
)
type ObjFactory func() WaspObject
type ObjFactories map[int32]ObjFactory
type WaspObject interface {
wasmhost.HostObject
InitObj(id int32, keyId int32, owner *ScDict)
Panic(format string, args ...interface{})
FindOrMakeObjectId(keyId int32, factory ObjFactory) int32
NestedKey() string
Suffix(keyId int32) string
}
func GetArrayObjectId(arrayObj WaspObject, index int32, typeId int32, factory ObjFactory) int32 {
if !arrayObj.Exists(index, typeId) {
arrayObj.Panic("GetArrayObjectId: Invalid index")
}
if typeId != arrayObj.GetTypeId(index) {
arrayObj.Panic("GetArrayObjectId: Invalid type")
}
return arrayObj.FindOrMakeObjectId(index, factory)
}
func GetMapObjectId(mapObj WaspObject, keyId int32, typeId int32, factories ObjFactories) int32 {
factory, ok := factories[keyId]
if !ok {
mapObj.Panic("GetMapObjectId: Invalid key")
}
if typeId != mapObj.GetTypeId(keyId) {
mapObj.Panic("GetMapObjectId: Invalid type")
}
return mapObj.FindOrMakeObjectId(keyId, factory)
}
// \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\
type ScDict struct {
host *wasmhost.KvStoreHost
id int32
isMutable bool
isRoot bool
keyId int32
kvStore kv.KVStore
length int32
name string
objects map[int32]int32
ownerId int32
typeId int32
types map[int32]int32
}
func NewScDict(vm *wasmProcessor) *ScDict {
return NewScDictFromKvStore(&vm.KvStoreHost, dict.New())
}
func NewScDictFromKvStore(host *wasmhost.KvStoreHost, kvStore kv.KVStore) *ScDict {
o := &ScDict{}
o.host = host
o.kvStore = kvStore
return o
}
func NewNullObject(host *wasmhost.KvStoreHost) WaspObject {
o := &ScSandboxObject{}
o.host = host
o.name = "null"
o.isRoot = true
return o
}
func (o *ScDict) InitObj(id int32, keyId int32, owner *ScDict) {
o.id = id
o.keyId = keyId
o.ownerId = owner.id
o.host = owner.host
o.isRoot = o.kvStore != nil
if !o.isRoot {
o.kvStore = owner.kvStore
}
ownerObj := o.Owner()
o.typeId = ownerObj.GetTypeId(keyId)
o.name = owner.name + ownerObj.Suffix(keyId)
if o.ownerId == 1 {
if strings.HasPrefix(o.name, "root.") {
// strip off "root." prefix
o.name = o.name[len("root."):]
}
if strings.HasPrefix(o.name, ".") {
// strip off "." prefix
o.name = o.name[1:]
}
}
if (o.typeId&wasmhost.OBJTYPE_ARRAY) != 0 && o.kvStore != nil {
key := o.NestedKey()[1:]
length, _, err := codec.DecodeInt64(o.kvStore.MustGet(kv.Key(key)))
if err != nil {
o.Panic("InitObj: %v", err)
}
o.length = int32(length)
}
o.Trace("InitObj %s", o.name)
o.objects = make(map[int32]int32)
o.types = make(map[int32]int32)
}
func (o *ScDict) Exists(keyId int32, typeId int32) bool {
if keyId == wasmhost.KeyLength && (o.typeId&wasmhost.OBJTYPE_ARRAY) != 0 {
return true
}
if o.typeId == (wasmhost.OBJTYPE_ARRAY | wasmhost.OBJTYPE_MAP) {
return uint32(keyId) <= uint32(len(o.objects))
}
return o.kvStore.MustHas(o.key(keyId, typeId))
}
func (o *ScDict) FindOrMakeObjectId(keyId int32, factory ObjFactory) int32 {
objId, ok := o.objects[keyId]
if ok {
return objId
}
newObject := factory()
objId = o.host.TrackObject(newObject)
newObject.InitObj(objId, keyId, o)
o.objects[keyId] = objId
return objId
}
func (o *ScDict) GetBytes(keyId int32, typeId int32) []byte {
if keyId == wasmhost.KeyLength && (o.typeId&wasmhost.OBJTYPE_ARRAY) != 0 {
return o.Int64Bytes(int64(o.length))
}
return o.kvStore.MustGet(o.key(keyId, typeId))
}
func (o *ScDict) GetObjectId(keyId int32, typeId int32) int32 {
o.validate(keyId, typeId)
if (typeId&wasmhost.OBJTYPE_ARRAY) == 0 && typeId != wasmhost.OBJTYPE_MAP {
o.Panic("GetObjectId: Invalid type")
}
return GetMapObjectId(o, keyId, typeId, ObjFactories{
keyId: func() WaspObject { return &ScDict{} },
})
}
func (o *ScDict) GetTypeId(keyId int32) int32 {
if (o.typeId & wasmhost.OBJTYPE_ARRAY) != 0 {
return o.typeId &^ wasmhost.OBJTYPE_ARRAY
}
//TODO incomplete, currently only contains used field types
typeId, ok := o.types[keyId]
if ok {
return typeId
}
return 0
}
func (o *ScDict) Int64Bytes(value int64) []byte {
bytes := make([]byte, 8)
binary.LittleEndian.PutUint64(bytes, uint64(value))
return bytes
}
func (o *ScDict) key(keyId int32, typeId int32) kv.Key {
o.validate(keyId, typeId)
suffix := o.Suffix(keyId)
key := o.NestedKey() + suffix
o.Trace("fld: %s%s", o.name, suffix)
o.Trace("key: %s", key[1:])
return kv.Key(key[1:])
}
func (o *ScDict) MustInt64(bytes []byte) int64 {
if len(bytes) != 8 {
o.Panic("invalid int64 length")
}
return int64(binary.LittleEndian.Uint64(bytes))
}
func (o *ScDict) NestedKey() string {
if o.isRoot {
return ""
}
ownerObj := o.Owner()
return ownerObj.NestedKey() + ownerObj.Suffix(o.keyId)
}
func (o *ScDict) Owner() WaspObject {
return o.host.FindObject(o.ownerId).(WaspObject)
}
func (o *ScDict) Panic(format string, args ...interface{}) {
err := o.name + "." + fmt.Sprintf(format, args...)
o.Trace(err)
panic(err)
}
func (o *ScDict) SetBytes(keyId int32, typeId int32, bytes []byte) {
//TODO
//if !o.isMutable {
// o.Panic("validate: Immutable field: %s key %d", o.name, keyId)
//}
if keyId == wasmhost.KeyLength {
if o.kvStore != nil {
//TODO this goes wrong for state, should clear map tree instead
o.kvStore = dict.New()
//if (o.typeId & wasmhost.OBJTYPE_ARRAY) != 0 {
// key := o.NestedKey()[1:]
// o.kvStore.Del(kv.Key(key))
//}
}
o.objects = make(map[int32]int32)
o.length = 0
return
}
o.kvStore.Set(o.key(keyId, typeId), bytes)
}
func (o *ScDict) Suffix(keyId int32) string {
if (o.typeId & wasmhost.OBJTYPE_ARRAY) != 0 {
return fmt.Sprintf(".%d", keyId)
}
key := o.host.GetKeyFromId(keyId)
if (keyId & wasmhost.KeyFromString) != 0 {
return "." + string(key)
}
return "." + string(key)
}
func (o *ScDict) Trace(format string, a ...interface{}) {
o.host.Trace(format, a...)
}
func (o *ScDict) validate(keyId int32, typeId int32) {
if o.kvStore == nil {
o.Panic("validate: Missing kvstore")
}
if typeId == -1 {
return
}
if (o.typeId & wasmhost.OBJTYPE_ARRAY) != 0 {
// actually array
arrayTypeId := o.typeId &^ wasmhost.OBJTYPE_ARRAY
if typeId == wasmhost.OBJTYPE_BYTES {
switch arrayTypeId {
case wasmhost.OBJTYPE_ADDRESS:
case wasmhost.OBJTYPE_AGENT_ID:
case wasmhost.OBJTYPE_BYTES:
case wasmhost.OBJTYPE_COLOR:
case wasmhost.OBJTYPE_HASH:
default:
o.Panic("validate: Invalid byte type")
}
} else if arrayTypeId != typeId {
o.Panic("validate: Invalid type")
}
if /*o.isMutable && */ keyId == o.length {
o.length++
if o.kvStore != nil {
key := o.NestedKey()[1:]
o.kvStore.Set(kv.Key(key), codec.EncodeInt64(int64(o.length)))
}
return
}
if keyId < 0 || keyId >= o.length {
o.Panic("validate: Invalid index")
}
return
}
fieldType, ok := o.types[keyId]
if !ok {
// first encounter of this key id, register type to make
// sure that future usages are all using that same type
o.types[keyId] = typeId
return
}
if fieldType != typeId {
o.Panic("validate: Invalid access")
}
}
|
package raw_client
import (
"context"
)
type PutRecordRequestRecord struct {
Value string `json:"value"`
}
type PutRecordRequest struct {
App string `json:"app"`
Id string `json:"id"`
Record map[string]PutRecordRequestRecord `json:"record,omitempty"`
}
type PutRecordResponse struct {
Revision string `json:"revision"`
}
func PutRecord(ctx context.Context, apiClient *ApiClient, req PutRecordRequest) (*PutRecordResponse, error) {
apiRequest := ApiRequest{
Method: "PUT",
Scheme: "https",
Path: "/k/v1/record.json",
Json: req,
}
var postAppResponse PutRecordResponse
if err := apiClient.Call(ctx, apiRequest, &postAppResponse); err != nil {
return nil, err
}
return &postAppResponse, nil
}
|
package LongSteps
type DoNothing struct {
Operand
}
func (d DoNothing) Evaluate(environment Environment) (Operand, Environment) {
return d.Operand, environment
}
func (d DoNothing) Int(environment Environment) int {
return d.Operand.Int(environment)
}
func (d DoNothing) Bool(environment Environment) bool {
return d.Operand.Bool(environment)
}
type If struct {
condition Operand
consequence Operand
alternative Operand
}
func (i If) Evaluate(environment Environment) (Operand, Environment) {
if i.condition.Bool(environment) {
return i.consequence.Evaluate(environment)
} else {
return i.alternative.Evaluate(environment)
}
}
func (i If) Int(environment Environment) int {
evaluated, _ := i.Evaluate(environment)
return evaluated.Int(environment)
}
func (i If) Bool(environment Environment) bool {
evaluated, _ := i.Evaluate(environment)
return evaluated.Bool(environment)
}
type Sequence struct {
left Operand
right Operand
}
func (s Sequence) Evaluate(environment Environment) (Operand, Environment) {
_, evaluatedEnvironment := s.left.Evaluate(environment)
return s.right.Evaluate(evaluatedEnvironment)
}
func (s Sequence) Int(environment Environment) int {
evaluated, _ := s.Evaluate(environment)
return evaluated.Int(environment)
}
func (s Sequence) Bool(environment Environment) bool {
evaluated, _ := s.Evaluate(environment)
return evaluated.Bool(environment)
}
type While struct {
condition Operand
body Operand
}
func (w While) Evaluate(environment Environment) (Operand, Environment) {
if w.condition.Bool(environment) {
_, newEnvironment := w.body.Evaluate(environment)
return w.Evaluate(newEnvironment)
} else {
return w.body, environment
}
}
func (w While) Int(environment Environment) int {
evaluated, _ := w.Evaluate(environment)
return evaluated.Int(environment)
}
func (w While) Bool(environment Environment) bool {
evaluated, _ := w.Evaluate(environment)
return evaluated.Bool(environment)
}
|
// Copyright 2019 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rust
import (
"android/soong/android"
)
func init() {
android.RegisterModuleType("rust_prebuilt_dylib", PrebuiltDylibFactory)
}
type PrebuiltProperties struct {
// path to the prebuilt file
Srcs []string `android:"path,arch_variant"`
}
type prebuiltLibraryDecorator struct {
*libraryDecorator
Properties PrebuiltProperties
}
var _ compiler = (*prebuiltLibraryDecorator)(nil)
func PrebuiltDylibFactory() android.Module {
module, _ := NewPrebuiltDylib(android.HostAndDeviceSupported)
return module.Init()
}
func NewPrebuiltDylib(hod android.HostOrDeviceSupported) (*Module, *prebuiltLibraryDecorator) {
module, library := NewRustLibrary(hod)
library.BuildOnlyDylib()
library.setNoStdlibs()
library.setDylib()
prebuilt := &prebuiltLibraryDecorator{
libraryDecorator: library,
}
module.compiler = prebuilt
module.AddProperties(&library.Properties)
return module, prebuilt
}
func (prebuilt *prebuiltLibraryDecorator) compilerProps() []interface{} {
return append(prebuilt.baseCompiler.compilerProps(),
&prebuilt.Properties)
}
func (prebuilt *prebuiltLibraryDecorator) compile(ctx ModuleContext, flags Flags, deps PathDeps) android.Path {
srcPath := srcPathFromModuleSrcs(ctx, prebuilt.Properties.Srcs)
prebuilt.unstrippedOutputFile = srcPath
return srcPath
}
func (prebuilt *prebuiltLibraryDecorator) compilerDeps(ctx DepsContext, deps Deps) Deps {
deps = prebuilt.baseCompiler.compilerDeps(ctx, deps)
return deps
}
|
package blog
import "github.com/jinzhu/gorm"
type (
Post struct {
gorm.Model
From string `json:"from" gorm: NOT NULL"`
Message string `json:"message" gorm: NOT NULL"`
}
)
|
package main
import (
"fmt"
"io/ioutil"
"net/http"
)
func sayHello(w http.ResponseWriter, r *http.Request) {
b, _ := ioutil.ReadFile("./hello.txt")
_, _ = fmt.Fprintln(w, string(b))
}
func main() {
http.HandleFunc("/hello", sayHello)
err := http.ListenAndServe(":9090", nil)
if err != nil {
fmt.Printf("http serve failed, err:%v\n", err)
return
}
}
|
package get
import (
"encoding/json"
"fmt"
"reflect"
"testing"
)
func TestGet_getArgs(t *testing.T) {
type args struct {
input string
}
tests := []struct {
name string
args args
want map[string]string
}{
{name: "tes1", args: args{input: "/login?ph=1"}, want: map[string]string{"ph": "1"}},
{name: "tes2", args: args{input: "/danmu/v3/?id=15666&max=1000"}, want: map[string]string{"id": "15666", "max": "1000"}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := getArgs(tt.args.input); !reflect.DeepEqual(got, tt.want) {
t.Errorf("getArgs() = %v, want %v", got, tt.want)
}
})
}
}
func Test_scanStruct(t *testing.T) {
type Test1 struct {
Name string
ID int
Sex float64 `json:"s"`
}
type args struct {
target interface{}
args map[string]string
}
tests := []struct {
name string
args args
want *Test1
}{
{name: "test1", args: args{target: &Test1{}, args: map[string]string{"name": "1", "id": "19", "s": "1.3"}}, want: &Test1{"1", 19, 1.3}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if scanStruct(tt.args.target, tt.args.args); !reflect.DeepEqual(tt.args.target, tt.want) {
t.Errorf("getArgs() = %v, want %v", tt.args.target, tt.want)
}
})
}
}
func Test_scanStruct2(t *testing.T) {
input := "http://127.0.0.1:3000/danmu/v3/?id=1&max=1000"
type Input1 struct {
Id int `json:"id"`
Max int `json:"max"`
}
t1 := &Input1{}
scanStruct2(t1, input)
want1 := &Input1{Id: 1, Max: 1000}
if !reflect.DeepEqual(t1, want1) {
t.Errorf("got %v, want %v", t1, want1)
}
}
// 测试错误的json是不是会影响get到的数据
func Test_Chan(a *testing.T) {
type Test1 struct {
Name string
ID int
Sex float64 `json:"s"`
}
var t = &Test1{}
var args = map[string]string{"name": "1", "id": "19", "s": "1.3"}
scanStruct(t, args)
json.Unmarshal([]byte("nil"), t)
fmt.Println(t)
}
func Test_getArgs(t *testing.T) {
type args struct {
input string
}
tests := []struct {
name string
args args
want map[string]string
}{
{name: "test1", args: args{"http://127.0.0.1:3000/danmu/v3/?id=1&max=1000"}, want: map[string]string{"id": "1", "max": "1000"}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := getArgs(tt.args.input); !reflect.DeepEqual(got, tt.want) {
t.Errorf("getArgs() = %v, want %v", got, tt.want)
}
})
}
}
|
package main
import "fmt"
/*
get this code working using a buffered channel
*/
/*BEFORE
func main() {
c := make(chan int)
c <- 42
fmt.Println(<-c)
}
*/
//AFTER
func main() {
c := make(chan int, 3)
c <- 42
c <- 43
c <- 44
fmt.Println(<-c)
fmt.Println(<-c)
fmt.Println(<-c)
} |
/*
* Tencent is pleased to support the open source community by making Blueking Container Service available.
* Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package cloudprovider
import (
"sync"
proto "github.com/Tencent/bk-bcs/bcs-services/bcs-cluster-manager/api/clustermanager"
"github.com/Tencent/bk-bcs/bcs-services/bcs-cluster-manager/internal/store"
)
var (
lock sync.RWMutex
once sync.Once
clusterMgrs map[string]ClusterManager
cloudInfoMgrs map[string]CloudInfoManager
cloudValidateMgrs map[string]CloudValidateManager
nodeGroupMgrs map[string]NodeGroupManager
nodeMgrs map[string]NodeManager
taskMgrs map[string]TaskManager
vpcMgrs map[string]VPCManager
storage store.ClusterManagerModel
)
func init() {
once.Do(func() {
clusterMgrs = make(map[string]ClusterManager)
nodeGroupMgrs = make(map[string]NodeGroupManager)
nodeMgrs = make(map[string]NodeManager)
taskMgrs = make(map[string]TaskManager)
cloudInfoMgrs = make(map[string]CloudInfoManager)
cloudValidateMgrs = make(map[string]CloudValidateManager)
vpcMgrs = make(map[string]VPCManager)
})
}
// InitStorageModel for cluster manager storage tools
func InitStorageModel(model store.ClusterManagerModel) {
lock.Lock()
defer lock.Unlock()
storage = model
}
// GetStorageModel for cluster manager storage tools
func GetStorageModel() store.ClusterManagerModel {
return storage
}
// InitTaskManager for cluster manager initialization
func InitTaskManager(provider string, t TaskManager) {
lock.Lock()
defer lock.Unlock()
taskMgrs[provider] = t
}
// GetTaskManager for nodegroup manager initialization
func GetTaskManager(provider string) (TaskManager, error) {
lock.RLock()
defer lock.RUnlock()
mgr, ok := taskMgrs[provider]
if !ok {
return nil, ErrCloudNoProvider
}
return mgr, nil
}
// GetAllTaskManager for all task manager
func GetAllTaskManager() []TaskManager {
lock.RLock()
defer lock.RUnlock()
var mgrs []TaskManager
for _, mgr := range taskMgrs {
mgrs = append(mgrs, mgr)
}
return mgrs
}
// InitClusterManager for cluster manager initialization
func InitClusterManager(provider string, cls ClusterManager) {
lock.Lock()
defer lock.Unlock()
clusterMgrs[provider] = cls
}
// InitNodeGroupManager for nodegroup manager initialization
func InitNodeGroupManager(provider string, group NodeGroupManager) {
lock.Lock()
defer lock.Unlock()
nodeGroupMgrs[provider] = group
}
// InitNodeManager for nodegroup manager initialization
func InitNodeManager(provider string, nodeMgr NodeManager) {
lock.Lock()
defer lock.Unlock()
nodeMgrs[provider] = nodeMgr
}
// InitCloudInfoManager for cloudInfo manager initialization
func InitCloudInfoManager(provider string, cloudInfoMgr CloudInfoManager) {
lock.Lock()
defer lock.Unlock()
cloudInfoMgrs[provider] = cloudInfoMgr
}
// InitCloudValidateManager for cloud validate manager check
func InitCloudValidateManager(provider string, cloudValidateMgr CloudValidateManager) {
lock.Lock()
defer lock.Unlock()
cloudValidateMgrs[provider] = cloudValidateMgr
}
// InitVPCManager for vpc manager check
func InitVPCManager(provider string, vpcMgr VPCManager) {
lock.Lock()
defer lock.Unlock()
vpcMgrs[provider] = vpcMgr
}
// GetClusterMgr get cluster manager implementation according cloud provider
func GetClusterMgr(provider string) (ClusterManager, error) {
lock.RLock()
defer lock.RUnlock()
cls, ok := clusterMgrs[provider]
if !ok {
return nil, ErrCloudNoProvider
}
return cls, nil
}
// GetNodeGroupMgr get NodeGroup implementation according cloud provider
func GetNodeGroupMgr(provider string) (NodeGroupManager, error) {
lock.RLock()
defer lock.RUnlock()
group, ok := nodeGroupMgrs[provider]
if !ok {
return nil, ErrCloudNoProvider
}
return group, nil
}
// GetNodeMgr get node implementation according cloud provider
func GetNodeMgr(provider string) (NodeManager, error) {
lock.RLock()
defer lock.RUnlock()
nodeMgr, ok := nodeMgrs[provider]
if !ok {
return nil, ErrCloudNoProvider
}
return nodeMgr, nil
}
// GetCloudInfoMgr get cloudInfo according cloud provider
func GetCloudInfoMgr(provider string) (CloudInfoManager, error) {
lock.RLock()
defer lock.RUnlock()
cloudInfo, ok := cloudInfoMgrs[provider]
if !ok {
return nil, ErrCloudNoProvider
}
return cloudInfo, nil
}
// GetCloudValidateMgr get cloudValidate according cloud provider
func GetCloudValidateMgr(provider string) (CloudValidateManager, error) {
lock.RLock()
defer lock.RUnlock()
cloudValidate, ok := cloudValidateMgrs[provider]
if !ok {
return nil, ErrCloudNoProvider
}
return cloudValidate, nil
}
// GetVPCMgr get vpc according cloud provider
func GetVPCMgr(provider string) (VPCManager, error) {
lock.RLock()
defer lock.RUnlock()
vpcmgr, ok := vpcMgrs[provider]
if !ok {
return nil, ErrCloudNoProvider
}
return vpcmgr, nil
}
// CloudInfoManager cloud interface for basic config info(region or no region)
type CloudInfoManager interface {
// InitCloudClusterDefaultInfo init cloud cluster default configInfo
InitCloudClusterDefaultInfo(cls *proto.Cluster, opt *InitClusterConfigOption) error
// SyncClusterCloudInfo sync cluster metadata
SyncClusterCloudInfo(cls *proto.Cluster, opt *SyncClusterCloudInfoOption) error
}
// NodeManager cloud interface for cvm management
type NodeManager interface {
// GetNodeByIP get specified Node by innerIP address
GetNodeByIP(ip string, opt *GetNodeOption) (*proto.Node, error)
// ListNodesByIP list node by IP set
ListNodesByIP(ips []string, opt *ListNodesOption) ([]*proto.Node, error)
// GetExternalNodeByIP get specified Node by innerIP address
GetExternalNodeByIP(ip string, opt *GetNodeOption) (*proto.Node, error)
// ListExternalNodesByIP list node by IP set
ListExternalNodesByIP(ips []string, opt *ListNodesOption) ([]*proto.Node, error)
// GetCVMImageIDByImageName get imageID by imageName
GetCVMImageIDByImageName(imageName string, opt *CommonOption) (string, error)
// GetCloudRegions get cloud regions
GetCloudRegions(opt *CommonOption) ([]*proto.RegionInfo, error)
// GetZoneList get zoneList by region
GetZoneList(opt *CommonOption) ([]*proto.ZoneInfo, error)
// ListNodeInstanceType get node instance type list
ListNodeInstanceType(info InstanceInfo, opt *CommonOption) ([]*proto.InstanceType, error)
// ListOsImage get osimage list
ListOsImage(provider string, opt *CommonOption) ([]*proto.OsImage, error)
// ListKeyPairs list ssh keyPairs
ListKeyPairs(opt *CommonOption) ([]*proto.KeyPair, error)
}
// CloudValidateManager validate interface for check cloud resourceInfo
type CloudValidateManager interface {
// ImportClusterValidate import cluster validate
ImportClusterValidate(req *proto.ImportClusterReq, opt *CommonOption) error
// AddNodesToClusterValidate validate
AddNodesToClusterValidate(req *proto.AddNodesRequest, opt *CommonOption) error
// DeleteNodesFromClusterValidate validate
DeleteNodesFromClusterValidate(req *proto.DeleteNodesRequest, opt *CommonOption) error
// ImportCloudAccountValidate import cloud account validate
ImportCloudAccountValidate(account *proto.Account) error
// GetCloudRegionZonesValidate get cloud region zones validate
GetCloudRegionZonesValidate(req *proto.GetCloudRegionZonesRequest, account *proto.Account) error
// ListCloudRegionClusterValidate get cloud region zones validate
ListCloudRegionClusterValidate(req *proto.ListCloudRegionClusterRequest, account *proto.Account) error
// ListCloudSubnetsValidate list subnets validate
ListCloudSubnetsValidate(req *proto.ListCloudSubnetsRequest, account *proto.Account) error
// ListSecurityGroupsValidate list SecurityGroups validate
ListSecurityGroupsValidate(req *proto.ListCloudSecurityGroupsRequest, account *proto.Account) error
// ListKeyPairsValidate list key pairs validate
ListKeyPairsValidate(req *proto.ListKeyPairsRequest, account *proto.Account) error
// ListInstanceTypeValidate list instance type validate
ListInstanceTypeValidate(req *proto.ListCloudInstanceTypeRequest, account *proto.Account) error
// ListCloudOsImageValidate list tke image os validate
ListCloudOsImageValidate(req *proto.ListCloudOsImageRequest, account *proto.Account) error
// CreateNodeGroupValidate create node group validate
CreateNodeGroupValidate(req *proto.CreateNodeGroupRequest, opt *CommonOption) error
// ListInstancesValidate ListInstanceTypeValidate list instance type validate
ListInstancesValidate(req *proto.ListCloudInstancesRequest, account *proto.Account) error
}
// ClusterManager cloud interface for kubernetes cluster management
type ClusterManager interface {
// CreateCluster create kubernetes cluster according cloudprovider
CreateCluster(cls *proto.Cluster, opt *CreateClusterOption) (*proto.Task, error)
// CreateVirtualCluster create virtual cluster by hostCluster namespace
CreateVirtualCluster(cls *proto.Cluster, opt *CreateVirtualClusterOption) (*proto.Task, error)
// ImportCluster import different cluster by provider
ImportCluster(cls *proto.Cluster, opt *ImportClusterOption) (*proto.Task, error)
// DeleteCluster delete kubernetes cluster according cloudprovider
DeleteCluster(cls *proto.Cluster, opt *DeleteClusterOption) (*proto.Task, error)
// DeleteVirtualCluster delete virtual cluster in hostCluster according cloudprovider
DeleteVirtualCluster(cls *proto.Cluster, opt *DeleteVirtualClusterOption) (*proto.Task, error)
// GetCluster get kubernetes cluster detail information according cloudprovider
GetCluster(cloudID string, opt *GetClusterOption) (*proto.Cluster, error)
// ListCluster get cloud cluster list by region
ListCluster(opt *ListClusterOption) ([]*proto.CloudClusterInfo, error)
// CheckClusterCidrAvailable check cluster cidr if meet to add nodes
CheckClusterCidrAvailable(cls *proto.Cluster, opt *CheckClusterCIDROption) (bool, error)
// GetNodesInCluster get all nodes belong to cluster according cloudprovider
GetNodesInCluster(cls *proto.Cluster, opt *GetNodesOption) ([]*proto.Node, error)
// AddNodesToCluster add new node to cluster according cloudprovider
AddNodesToCluster(cls *proto.Cluster, nodes []*proto.Node, opt *AddNodesOption) (*proto.Task, error)
// DeleteNodesFromCluster delete specified nodes from cluster according cloudprovider
DeleteNodesFromCluster(cls *proto.Cluster, nodes []*proto.Node, opt *DeleteNodesOption) (*proto.Task, error)
// EnableExternalNodeSupport enable cluster support external node
EnableExternalNodeSupport(cls *proto.Cluster, opt *EnableExternalNodeOption) error
// ListOsImage get osimage list
ListOsImage(provider string, opt *CommonOption) ([]*proto.OsImage, error)
// CheckClusterEndpointStatus check cluster endpoint status
CheckClusterEndpointStatus(clusterID string, isExtranet bool, opt *CheckEndpointStatusOption) (bool, error)
}
// NodeGroupManager cloud interface for nodegroup management
type NodeGroupManager interface {
// CreateNodeGroup create nodegroup by cloudprovider api, only create NodeGroup entity
CreateNodeGroup(group *proto.NodeGroup, opt *CreateNodeGroupOption) (*proto.Task, error)
// DeleteNodeGroup delete nodegroup by cloudprovider api, all nodes belong to NodeGroup
// will be released. Task is backgroup automatic task
DeleteNodeGroup(group *proto.NodeGroup, nodes []*proto.Node, opt *DeleteNodeGroupOption) (*proto.Task, error)
// UpdateNodeGroup update specified nodegroup configuration
UpdateNodeGroup(group *proto.NodeGroup, opt *UpdateNodeGroupOption) (*proto.Task, error)
// GetNodesInGroup get all nodes belong to NodeGroup
GetNodesInGroup(group *proto.NodeGroup, opt *CommonOption) ([]*proto.Node, error)
// GetNodesInGroupV2 get all nodes belong to NodeGroup
GetNodesInGroupV2(group *proto.NodeGroup, opt *CommonOption) ([]*proto.NodeGroupNode, error)
// MoveNodesToGroup add cluster nodes to NodeGroup
MoveNodesToGroup(nodes []*proto.Node, group *proto.NodeGroup, opt *MoveNodesOption) (*proto.Task, error)
// RemoveNodesFromGroup remove nodes from NodeGroup, nodes are still in cluster
RemoveNodesFromGroup(nodes []*proto.Node, group *proto.NodeGroup, opt *RemoveNodesOption) error
// CleanNodesInGroup clean specified nodes in NodeGroup,
CleanNodesInGroup(nodes []*proto.Node, group *proto.NodeGroup, opt *CleanNodesOption) (*proto.Task, error)
// UpdateDesiredNodes update nodegroup desired node
UpdateDesiredNodes(desired uint32, group *proto.NodeGroup, opt *UpdateDesiredNodeOption) (*ScalingResponse, error)
// SwitchNodeGroupAutoScaling switch nodegroup auto scale
SwitchNodeGroupAutoScaling(group *proto.NodeGroup, enable bool, opt *SwitchNodeGroupAutoScalingOption) (*proto.Task, error)
// CreateAutoScalingOption create cluster autoscaling option, cloudprovider will
// deploy cluster-autoscaler in backgroup according cloudprovider implementation
CreateAutoScalingOption(scalingOption *proto.ClusterAutoScalingOption, opt *CreateScalingOption) (*proto.Task, error)
// DeleteAutoScalingOption delete cluster autoscaling, cloudprovider will clean
// cluster-autoscaler in backgroup according cloudprovider implementation
DeleteAutoScalingOption(scalingOption *proto.ClusterAutoScalingOption, opt *DeleteScalingOption) (*proto.Task, error)
// UpdateAutoScalingOption update cluster autoscaling option, cloudprovider will update
// cluster-autoscaler configuration in backgroup according cloudprovider implementation.
// Implementation is optional.
UpdateAutoScalingOption(scalingOption *proto.ClusterAutoScalingOption, opt *UpdateScalingOption) (*proto.Task, error)
// SwitchAutoScalingOptionStatus switch cluster autoscaling option enable auto scaling status
SwitchAutoScalingOptionStatus(scalingOption *proto.ClusterAutoScalingOption, enable bool, opt *CommonOption) (*proto.Task, error)
//ExternalNode support external node operation
// AddExternalNodeToCluster add external to cluster
AddExternalNodeToCluster(group *proto.NodeGroup, nodes []*proto.Node, opt *AddExternalNodesOption) (*proto.Task, error)
// DeleteExternalNodeFromCluster remove external node from cluster
DeleteExternalNodeFromCluster(group *proto.NodeGroup, nodes []*proto.Node, opt *DeleteExternalNodesOption) (*proto.Task, error)
// GetExternalNodeScript get external node script from cluster nodeGroup
GetExternalNodeScript(group *proto.NodeGroup) (string, error)
}
// VPCManager cloud interface for vpc management
type VPCManager interface {
// ListSubnets list vpc's subnets
ListSubnets(vpcID string, opt *CommonOption) ([]*proto.Subnet, error)
// ListSecurityGroups list security groups
ListSecurityGroups(opt *CommonOption) ([]*proto.SecurityGroup, error)
// GetCloudNetworkAccountType get cloud account type
GetCloudNetworkAccountType(opt *CommonOption) (*proto.CloudAccountType, error)
// ListBandwidthPacks list bandWidthPacks
ListBandwidthPacks(opt *CommonOption) ([]*proto.BandwidthPackageInfo, error)
}
// TaskManager backgroup back management
type TaskManager interface {
Name() string
// GetAllTask get all register task for worker running
GetAllTask() map[string]interface{}
// specific cloud different implement
// NodeGroup taskList
// BuildCreateNodeGroupTask build create nodegroup task
BuildCreateNodeGroupTask(group *proto.NodeGroup, opt *CreateNodeGroupOption) (*proto.Task, error)
// BuildDeleteNodeGroupTask when delete nodegroup, we need to create background
// task to clean all nodes in nodegroup, release all resource in cloudprovider,
// finally delete nodes information in local storage.
BuildDeleteNodeGroupTask(group *proto.NodeGroup, nodes []*proto.Node, opt *DeleteNodeGroupOption) (*proto.Task, error)
// BuildMoveNodesToGroupTask when move nodes to nodegroup, we need to create background task
BuildMoveNodesToGroupTask(nodes []*proto.Node, group *proto.NodeGroup, opt *MoveNodesOption) (*proto.Task, error)
// BuildCleanNodesInGroupTask clean specified nodes in NodeGroup
BuildCleanNodesInGroupTask(nodes []*proto.Node, group *proto.NodeGroup, opt *CleanNodesOption) (*proto.Task, error)
// BuildUpdateDesiredNodesTask update nodegroup desired node
BuildUpdateDesiredNodesTask(desired uint32, group *proto.NodeGroup, opt *UpdateDesiredNodeOption) (*proto.Task, error)
// BuildSwitchNodeGroupAutoScalingTask switch nodegroup autoscaling
BuildSwitchNodeGroupAutoScalingTask(group *proto.NodeGroup, enable bool, opt *SwitchNodeGroupAutoScalingOption) (*proto.Task, error)
// BuildUpdateAutoScalingOptionTask update cluster autoscaling option
BuildUpdateAutoScalingOptionTask(scalingOption *proto.ClusterAutoScalingOption, opt *UpdateScalingOption) (*proto.Task, error)
// BuildSwitchAsOptionStatusTask switch cluster autoscaling option enable auto scaling status
BuildSwitchAsOptionStatusTask(scalingOption *proto.ClusterAutoScalingOption, enable bool, opt *CommonOption) (*proto.Task, error)
// BuildUpdateNodeGroupTask update nodeGroup data and update autoScalingOption
BuildUpdateNodeGroupTask(group *proto.NodeGroup, opt *CommonOption) (*proto.Task, error)
// ClusterManager taskList
// BuildCreateVirtualClusterTask create virtual cluster by different cloud provider
BuildCreateVirtualClusterTask(cls *proto.Cluster, opt *CreateVirtualClusterOption) (*proto.Task, error)
// BuildDeleteVirtualClusterTask delete virtual cluster by different cloud provider
BuildDeleteVirtualClusterTask(cls *proto.Cluster, opt *DeleteVirtualClusterOption) (*proto.Task, error)
// BuildImportClusterTask create cluster by different cloud provider
BuildImportClusterTask(cls *proto.Cluster, opt *ImportClusterOption) (*proto.Task, error)
// BuildCreateClusterTask create cluster by different cloud provider
BuildCreateClusterTask(cls *proto.Cluster, opt *CreateClusterOption) (*proto.Task, error)
// BuildDeleteClusterTask delete cluster by different cloud provider
BuildDeleteClusterTask(cls *proto.Cluster, opt *DeleteClusterOption) (*proto.Task, error)
// BuildAddNodesToClusterTask add instances to cluster
BuildAddNodesToClusterTask(cls *proto.Cluster, nodes []*proto.Node, opt *AddNodesOption) (*proto.Task, error)
// BuildRemoveNodesFromClusterTask remove instances from cluster
BuildRemoveNodesFromClusterTask(cls *proto.Cluster, nodes []*proto.Node, opt *DeleteNodesOption) (*proto.Task, error)
// BuildAddExternalNodeToCluster add external to cluster
BuildAddExternalNodeToCluster(group *proto.NodeGroup, nodes []*proto.Node, opt *AddExternalNodesOption) (*proto.Task, error)
// BuildDeleteExternalNodeFromCluster remove external node from cluster
BuildDeleteExternalNodeFromCluster(group *proto.NodeGroup, nodes []*proto.Node, opt *DeleteExternalNodesOption) (*proto.Task, error)
}
|
package main
import (
"fmt"
"strings"
"../../link"
)
var exampleHTML = `
<html>
<head>
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.min.css">
</head>
<body>
<h1>Social stuffs</h1>
<div>
<a href="https://www.twitter.com/joncalhoun">
Check me out on twitter
<i class="fa fa-twitter" aria-hidden="true"></i>
</a>
<a href="https://github.com/gophercises">
Gophercises is on <strong>Github</strong>!
</a>
</div>
</body>
</html>`
func main() {
////////// Parsing the HTML of the webpage
// res, err := http.Get("https://www.golangprograms.com/golang-html-parser.html")
// if err != nil {
// panic(err)
// }
// // do this now so it won't be forgotten
// defer res.Body.Close()
// //
// html, err := ioutil.ReadAll(res.Body)
// if err != nil {
// panic(err)
// }
// show the HTML code as a string %s
// Converting the uint8 to string of the parsed html body
// htmlString := string(html)
// fmt.Println(reflect.TypeOf(string(html)))
// fmt.Printf("%s\n", html)
r := strings.NewReader(exampleHTML)
// fmt.Println(r)
links, err := link.Parser(r)
if err != nil {
panic(err)
}
fmt.Printf("%+v\n", links)
}
|
package main
import (
"flag"
"fmt"
"io/ioutil"
"log"
"strings"
)
func main() {
flag.Parse()
if len(flag.Args()) != 1 {
return
}
fileContents := MustOpenTextFile(flag.Args()[0])
for _, line := range strings.Split(fileContents, "\n") {
if len(line) == 0 {
continue
}
var n, m int
fmt.Sscanf(line, "%d,%d", &n, &m)
fmt.Printf("%d\n", modulus(n, m))
}
}
// MustOpenTextFile reads a file or panics
func MustOpenTextFile(filename string) string {
inputData, err := ioutil.ReadFile(filename)
if err != nil {
log.Fatal(err)
}
return string(inputData)
}
func modulus(n, m int) int {
div := n / m
return n - (m * div)
}
|
// Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package config
import (
"encoding/json"
"fmt"
"net"
"strconv"
"strings"
log "github.com/cihub/seelog"
cniSkel "github.com/containernetworking/cni/pkg/skel"
cniTypes "github.com/containernetworking/cni/pkg/types"
cniTypesCurrent "github.com/containernetworking/cni/pkg/types/100"
cniVersion "github.com/containernetworking/cni/pkg/version"
)
// NetConfig defines the network configuration for the aws-appmesh cni plugin.
type NetConfig struct {
cniTypes.NetConf
PrevResult *cniTypesCurrent.Result
IgnoredUID string
IgnoredGID string
ProxyIngressPort string
ProxyEgressPort string
AppPorts []string
EgressIgnoredPorts []string
EgressIgnoredIPv4s string
EgressIgnoredIPv6s string
EnableIPv6 bool
}
// netConfigJSON defines the network configuration JSON file format for the aws-appmesh cni plugin.
type netConfigJSON struct {
cniTypes.NetConf
PrevResult map[string]interface{} `json:"prevResult,omitempty"`
IgnoredUID string `json:"ignoredUID"`
IgnoredGID string `json:"ignoredGID"`
ProxyIngressPort string `json:"proxyIngressPort"`
ProxyEgressPort string `json:"proxyEgressPort"`
AppPorts []string `json:"appPorts"`
EgressIgnoredPorts []string `json:"egressIgnoredPorts"`
EgressIgnoredIPs []string `json:"egressIgnoredIPs"`
EnableIPv6 bool `json:"enableIPv6"`
}
const (
splitter = ","
ipv4Proto = "IPv4"
ipv6Proto = "IPv6"
)
// New creates a new NetConfig object by parsing the given CNI arguments.
func New(args *cniSkel.CmdArgs) (*NetConfig, error) {
// Parse network configuration.
var config netConfigJSON
if err := json.Unmarshal(args.StdinData, &config); err != nil {
return nil, fmt.Errorf("failed to parse network config: %v", err)
}
// Validate network configuration.
if err := validateConfig(&config); err != nil {
return nil, err
}
// Get separate lists of IPv4 address/CIDR block and IPv6 address/CIDR block.
ipv4s, ipv6s, err := separateIPs(config.EgressIgnoredIPs)
if err != nil {
return nil, err
}
// Populate NetConfig.
netConfig := NetConfig{
NetConf: config.NetConf,
IgnoredUID: config.IgnoredUID,
IgnoredGID: config.IgnoredGID,
ProxyIngressPort: config.ProxyIngressPort,
ProxyEgressPort: config.ProxyEgressPort,
AppPorts: config.AppPorts,
EgressIgnoredIPv4s: ipv4s,
EgressIgnoredIPv6s: ipv6s,
EgressIgnoredPorts: config.EgressIgnoredPorts,
EnableIPv6: config.EnableIPv6,
}
if config.PrevResult != nil {
// Plugin was called as part of a chain. Parse the previous result to pass forward.
prevResBytes, err := json.Marshal(config.PrevResult)
if err != nil {
return nil, fmt.Errorf("failed to serialize prevResult: %v", err)
}
prevRes, err := cniVersion.NewResult(config.CNIVersion, prevResBytes)
if err != nil {
return nil, fmt.Errorf("failed to parse prevResult: %v", err)
}
netConfig.PrevResult, err = cniTypesCurrent.NewResultFromResult(prevRes)
if err != nil {
return nil, fmt.Errorf("failed to convert result to current version: %v", err)
}
} else {
// Plugin was called stand-alone.
netConfig.PrevResult = &cniTypesCurrent.Result{}
}
// Validation complete. Return the parsed NetConfig object.
log.Debugf("Created NetConfig: %+v", netConfig)
return &netConfig, nil
}
// validateConfig validates network configuration.
func validateConfig(config *netConfigJSON) error {
// Validate if all the required fields are present.
if config.IgnoredGID == "" && config.IgnoredUID == "" {
return fmt.Errorf("missing required parameter ignoredGID or ignoredUID")
}
if config.ProxyEgressPort == "" {
return fmt.Errorf("missing required parameter proxyEgressPort")
}
// AppPorts and ProxyIngressPort go in pairs,
// i.e. either both are set or both are not set.
if config.ProxyIngressPort == "" && len(config.AppPorts) > 0 {
return fmt.Errorf("missing parameter proxyIngressPort (required if appPorts are provided)")
}
if config.ProxyIngressPort != "" && len(config.AppPorts) == 0 {
return fmt.Errorf("missing parameter appPorts (required if proxyIngressPort is provided)")
}
// Validate the format of all fields.
if err := isValidPort(config.ProxyEgressPort); err != nil {
return err
}
if err := isValidPort(config.ProxyIngressPort); err != nil {
return err
}
// If incoming ports or IP addresses are empty we still treat that as valid and delete that empty element.
if len(config.AppPorts) == 1 && config.AppPorts[0] == "" {
config.AppPorts = nil
}
for _, port := range config.AppPorts {
if err := isValidPort(port); err != nil {
return err
}
}
if len(config.EgressIgnoredPorts) == 1 && config.EgressIgnoredPorts[0] == "" {
config.EgressIgnoredPorts = nil
}
for _, port := range config.EgressIgnoredPorts {
if err := isValidPort(port); err != nil {
return err
}
}
if len(config.EgressIgnoredIPs) == 1 && config.EgressIgnoredIPs[0] == "" {
config.EgressIgnoredIPs = nil
}
return nil
}
// separateIPs separate IPv4 addresses/CIDR block and IPv6 addresses/CIDR block
// into two lists.
func separateIPs(ignoredIPs []string) (string, string, error) {
if len(ignoredIPs) == 0 {
return "", "", nil
}
var ipv4s, ipv6s []string
for _, ip := range ignoredIPs {
trimIP := strings.TrimSpace(ip)
proto, valid := isValidIPAddressOrCIDR(trimIP)
if !valid {
return "", "", fmt.Errorf("invalid IP or CIDR block [%s] specified in egressIgnoredIPs", trimIP)
}
if proto == ipv4Proto {
ipv4s = append(ipv4s, trimIP)
} else {
ipv6s = append(ipv6s, trimIP)
}
}
return strings.Join(ipv4s, splitter), strings.Join(ipv6s, splitter), nil
}
// isValidPort checks whether the port only has digits.
func isValidPort(port string) error {
if port == "" {
return nil
}
i, err := strconv.Atoi(port)
if err == nil && i > 0 {
return nil
}
return fmt.Errorf("invalid port [%s] specified", port)
}
// isValidIPAddressOrCIDR checks whether the input is a valid IP addresses/CIDR block and checks the IP protocol.
func isValidIPAddressOrCIDR(address string) (string, bool) {
ip := net.ParseIP(address)
var err error
if ip == nil {
// Check whether it is a valid CIDR block.
ip, _, err = net.ParseCIDR(address)
if err != nil {
return "", false
}
}
// There's no To6() method in the `net` package. Instead, just check that
// it's not a valid `v4` IP.
if ip.To4() != nil {
return ipv4Proto, true
}
return ipv6Proto, true
}
|
package main
import "fmt"
// https://leetcode-cn.com/problems/find-first-and-last-position-of-element-in-sorted-array/
func searchRange(nums []int, target int) []int {
l, h := 0, len(nums)-1
low, up := 0, 0
ok := false
for m := 0; l <= h; {
m = (l + h) / 2
if nums[m] == target {
ok = true
}
if nums[m] >= target {
h = m - 1
} else {
l = m + 1
}
}
low = h + 1
if !ok {
return []int{-1, -1}
}
l, h = 0, len(nums)-1
for m := 0; l <= h; {
m = (l + h) / 2
if nums[m] > target {
h = m - 1
} else {
l = m + 1
}
}
up = l - 1
return []int{low, up}
}
func main() {
cases := [][][]int{
{
{8}, {5, 7, 7, 8, 8, 10},
},
{
{8}, {5, 7, 7, 8, 8, 10},
},
}
realCase := cases[1:]
for i, c := range realCase {
fmt.Println("## case", i)
fmt.Println(searchRange(c[1], c[0][0]))
}
}
|
package main
import (
"bytes"
"context"
"errors"
"html/template"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"github.com/caddyserver/certmagic"
"github.com/gernest/8x8/pkg/auth"
"github.com/gernest/8x8/pkg/mw"
"github.com/gernest/8x8/pkg/xl"
"github.com/gernest/8x8/templates"
"github.com/gorilla/mux"
"github.com/urfave/cli"
"go.uber.org/zap"
)
const host = "8x8.co.tz"
const (
WorkingDirectory = "/opt/8x8"
DataDirectory = "/data/8x8"
SystemDUnitFile = "/etc/systemd/system/8x8.service"
)
var DefaultEmail = os.Getenv("8x8_DEFAULT_EMAIL")
//go:generate protoc -I pkg/models/ --go_out=./pkg/models pkg/models/models.proto
//go:generate protoc -I pkg/models/ --go_out=./pkg/models pkg/models/checkers.proto
func main() {
a := cli.NewApp()
a.Name = "8x8 realtime checkers game"
a.Commands = cli.Commands{
{
Name: "install",
Usage: "installs systemd unit files and sets up 8x8",
Action: install,
},
}
a.Action = run
if err := a.Run(os.Args); err != nil {
if !errors.Is(err, context.Canceled) {
xl.Error(err, "failed to run the app")
}
os.Exit(1)
}
}
func run(_ *cli.Context) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tpl, err := template.ParseFS(templates.Files, "*/*.html")
if err != nil {
return err
}
m := mw.New()
mu := mux.NewRouter()
mu.HandleFunc("/", func(rw http.ResponseWriter, r *http.Request) {
err := tpl.ExecuteTemplate(rw, "index.html", map[string]interface{}{})
if err != nil {
xl.Error(err, "failed executing index template")
}
})
mu.HandleFunc("/auth/google/login", auth.Login)
mu.HandleFunc("/auth/google/callback", auth.Callback)
go func() {
xl.Info("starting service")
certmagic.Default.Storage = &certmagic.FileStorage{Path: "/data/8x8/certs"}
certmagic.Default.Logger = xl.Logger
certmagic.DefaultACME.Email = DefaultEmail
err := certmagic.HTTPS([]string{host}, m.Then(mu))
if err != nil {
xl.Error(err, "exit https server")
}
cancel()
}()
<-ctx.Done()
return ctx.Err()
}
func ensure(dir string) error {
_, err := os.Stat(dir)
if err != nil {
if !os.IsNotExist(err) {
return err
}
return os.MkdirAll(dir, 0755)
}
return nil
}
func install(ctx *cli.Context) error {
tpl, err := template.ParseFS(templates.Installation, "*/*")
if err != nil {
return err
}
xl.Info("setting working directory", zap.String("w", WorkingDirectory))
if err := ensure(WorkingDirectory); err != nil {
return err
}
xl.Info("setting data directory", zap.String("d", DataDirectory))
if err := ensure(DataDirectory); err != nil {
return err
}
dataDirs := []string{"db", "certs"}
for _, v := range dataDirs {
if err := ensure(filepath.Join(DataDirectory, v)); err != nil {
return err
}
}
xl.Info("Setting up systemd")
var buf bytes.Buffer
err = tpl.ExecuteTemplate(&buf, "8x8.service", map[string]interface{}{
"WorkingDirectory": WorkingDirectory,
"Data": DataDirectory,
"GOOGLE_CLIENT_ID": os.Getenv("GOOGLE_CLIENT_ID"),
"GOOGLE_CLIENT_SECRET": os.Getenv("GOOGLE_CLIENT_SECRET"),
})
if err != nil {
return err
}
xl.Info("writing systemd service file", zap.String("path", SystemDUnitFile))
err = ioutil.WriteFile(SystemDUnitFile, buf.Bytes(), 0600)
if err != nil {
return err
}
xl.Info("systemctl enable 8x8.service # to start at boot")
xl.Info("systemctl start 8x8.service # to star the service")
return nil
}
|
package logic
import (
"Open_IM/pkg/common/config"
"Open_IM/pkg/common/kafka"
"Open_IM/pkg/common/log"
)
var (
persistentCH PersistentConsumerHandler
historyCH HistoryConsumerHandler
producer *kafka.Producer
)
func Init() {
log.NewPrivateLog(config.Config.ModuleName.MsgTransferName)
persistentCH.Init()
historyCH.Init()
producer = kafka.NewKafkaProducer(config.Config.Kafka.Ms2pschat.Addr, config.Config.Kafka.Ms2pschat.Topic)
}
func Run() {
//register mysqlConsumerHandler to
go persistentCH.persistentConsumerGroup.RegisterHandleAndConsumer(&persistentCH)
go historyCH.historyConsumerGroup.RegisterHandleAndConsumer(&historyCH)
}
|
package temp
type ObjectMetaTemp struct {
Name, Namespace string
}
|
package eventfile
import (
"bytes"
"strings"
"testing"
"time"
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/runtime/protoiface"
spb "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/summary_go_proto"
epb "github.com/tensorflow/tensorflow/tensorflow/go/core/util/event_go_proto"
tbio "github.com/wchargin/tensorboard-data-server/io"
)
func TestGrowingEventFile(t *testing.T) {
var buf bytes.Buffer
input1 := &epb.Event{What: &epb.Event_FileVersion{FileVersion: "brain.Event:2"}}
input2 := &epb.Event{
What: &epb.Event_Summary{
Summary: &spb.Summary{
Value: []*spb.Summary_Value{
{
Tag: "loss",
Value: &spb.Summary_Value_SimpleValue{
SimpleValue: 0.1,
},
},
},
},
},
}
rec1 := tbio.NewTFRecord(marshalHard(t, input1))
rec1.Write(&buf)
rec2 := tbio.NewTFRecord(marshalHard(t, input2))
rec2.Write(&buf)
rec2.Write(&buf) // again!
// Start with a buffer that has the entire first record and a truncated
// prefix of the second record. After this goes to sleep, fill up the
// rest of the buffer.
truncateAfter := 7
split := rec1.ByteSize() + truncateAfter
buf1 := bytes.NewBuffer(append([]byte{}, buf.Bytes()[:split]...))
buf2 := bytes.NewBuffer(append([]byte{}, buf.Bytes()[split:]...))
buf.Reset()
buf.ReadFrom(buf1)
efr := ReaderBuilder{File: &buf}.Start()
efr.Wake <- Resume
// First read should read a full record.
select {
case got := <-efr.Results:
want := EventResult{Event: input1}
if !proto.Equal(got.Event, want.Event) || got.Err != nil || got.Fatal {
t.Errorf("first read: got %+v, want %+v", got, want)
}
case <-efr.Asleep:
t.Fatalf("got Asleep, want first result")
case <-time.After(time.Second):
t.Fatalf("no interaction after 1s; want first result")
}
// Second read should progress partially, then sleep due to truncation.
select {
case got := <-efr.Results:
t.Fatalf("unexpected result: got %+v, want first sleep", got)
case <-efr.Asleep:
case <-time.After(time.Second):
t.Fatalf("no interaction after 1s; want first sleep")
}
// Wake, even though no new data has been written yet. Should go right
// back to sleep.
efr.Wake <- Resume
select {
case got := <-efr.Results:
t.Fatalf("unexpected result: got %+v, want second sleep", got)
case <-efr.Asleep:
case <-time.After(time.Second):
t.Fatalf("no interaction after 1s; want second sleep")
}
// Wake with new data. Should resume from previous truncation.
buf.ReadFrom(buf2)
efr.Wake <- Resume
select {
case got := <-efr.Results:
want := EventResult{Event: input2}
if !proto.Equal(got.Event, want.Event) || got.Err != nil || got.Fatal {
t.Errorf("second read: got %+v, want %+v", got, want)
}
case <-efr.Asleep:
t.Fatalf("got Asleep, want second result")
case <-time.After(time.Second):
t.Fatalf("no interaction after 1s; want second result")
}
// Third read is another full event (identical to the second one).
select {
case got := <-efr.Results:
want := EventResult{Event: input2}
if !proto.Equal(got.Event, want.Event) || got.Err != nil || got.Fatal {
t.Errorf("third read: got %+v, want %+v", got, want)
}
case <-efr.Asleep:
t.Fatalf("got Asleep, want third result")
case <-time.After(time.Second):
t.Fatalf("no interaction after 1s; want third result")
}
select {
case got := <-efr.Results:
t.Fatalf("unexpected result: got %+v, want third sleep", got)
case <-efr.Asleep:
case <-time.After(time.Second):
t.Fatalf("no interaction after 1s; want third sleep")
}
}
func TestEventFileWithBadRecordLength(t *testing.T) {
var buf bytes.Buffer
inputEvent := &epb.Event{What: &epb.Event_FileVersion{FileVersion: "brain.Event:2"}}
okRecord := tbio.NewTFRecord(marshalHard(t, inputEvent))
okRecord.Write(&buf)
// Write an all-zeros record, which is corrupt: length checksum wrong.
emptyRecord := tbio.NewTFRecord([]byte{})
buf.Write(make([]byte, emptyRecord.ByteSize()))
okRecord.Write(&buf)
efr := ReaderBuilder{File: &buf}.Start()
efr.Wake <- Resume
// First read should succeed.
select {
case got := <-efr.Results:
want := EventResult{Event: inputEvent}
if !proto.Equal(got.Event, want.Event) || got.Err != nil || got.Fatal {
t.Errorf("first read: got %+v, want %+v", got, want)
}
case <-efr.Asleep:
t.Fatalf("got Asleep, want first result")
case <-time.After(time.Second):
t.Fatalf("no interaction after 1s; want first result")
}
// Second read should fail fatally.
select {
case got := <-efr.Results:
wantMsgSubstr := "length CRC mismatch"
if got.Event != nil || got.Err == nil || !strings.Contains(got.Err.Error(), wantMsgSubstr) || !got.Fatal {
t.Errorf("first read: got %+v, want fatal failure with %q", got, wantMsgSubstr)
}
case <-efr.Asleep:
t.Fatalf("got Asleep, want second result")
case <-time.After(time.Second):
t.Fatalf("no interaction after 1s; want second result")
}
// Reader should now be dead.
select {
case got := <-efr.Results:
t.Errorf("got result %+v, want no interaction", got)
case <-efr.Asleep:
t.Errorf("got <-Asleep, want no interaction")
case efr.Wake <- Resume:
t.Errorf("got Wake<-, want no interaction")
default:
}
}
func TestEventFileWithBadRecordData(t *testing.T) {
var buf bytes.Buffer
inputEvent := &epb.Event{What: &epb.Event_FileVersion{FileVersion: "brain.Event:2"}}
okRecord := tbio.NewTFRecord(marshalHard(t, inputEvent))
okRecord.Write(&buf)
buf.Bytes()[okRecord.ByteSize()-1] ^= 0x55
okRecord.Write(&buf)
efr := ReaderBuilder{File: &buf}.Start()
efr.Wake <- Resume
// First read should fail non-fatally.
select {
case got := <-efr.Results:
wantMsgSubstr := "data CRC mismatch"
if got.Event != nil || got.Err == nil || !strings.Contains(got.Err.Error(), wantMsgSubstr) || got.Fatal {
t.Errorf("first read: got %+v, want non-fatal failure with %q", got, wantMsgSubstr)
}
case <-efr.Asleep:
t.Fatalf("got Asleep, want first result")
case <-time.After(time.Second):
t.Fatalf("no interaction after 1s; want first result")
}
// Second read should succeed.
select {
case got := <-efr.Results:
want := EventResult{Event: inputEvent}
if !proto.Equal(got.Event, want.Event) || got.Err != nil || got.Fatal {
t.Errorf("second read: got %+v, want %+v", got, want)
}
case <-efr.Asleep:
t.Fatalf("got Asleep, want second result")
case <-time.After(time.Second):
t.Fatalf("no interaction after 1s; want second result")
}
select {
case got := <-efr.Results:
t.Fatalf("unexpected result: got %+v, want sleep", got)
case <-efr.Asleep:
case <-time.After(time.Second):
t.Fatalf("no interaction after 1s; want sleep")
}
}
func TestEventFileWithBadProto(t *testing.T) {
var buf bytes.Buffer
badRecord := tbio.NewTFRecord([]byte("not likely a proto"))
badRecord.Write(&buf)
inputEvent := &epb.Event{What: &epb.Event_FileVersion{FileVersion: "brain.Event:2"}}
okRecord := tbio.NewTFRecord(marshalHard(t, inputEvent))
okRecord.Write(&buf)
efr := ReaderBuilder{File: &buf}.Start()
efr.Wake <- Resume
// First read should fail non-fatally.
select {
case got := <-efr.Results:
wantMsgSubstr := "reserved wire type"
if got.Event != nil || got.Err == nil || !strings.Contains(got.Err.Error(), wantMsgSubstr) || got.Fatal {
t.Errorf("first read: got %+v, want non-fatal failure with %q", got, wantMsgSubstr)
}
case <-efr.Asleep:
t.Fatalf("got Asleep, want first result")
case <-time.After(time.Second):
t.Fatalf("no interaction after 1s; want first result")
}
// Second read should succeed.
select {
case got := <-efr.Results:
want := EventResult{Event: inputEvent}
if !proto.Equal(got.Event, want.Event) || got.Err != nil || got.Fatal {
t.Errorf("second read: got %+v, want %+v", got, want)
}
case <-efr.Asleep:
t.Fatalf("got Asleep, want second result")
case <-time.After(time.Second):
t.Fatalf("no interaction after 1s; want second result")
}
select {
case got := <-efr.Results:
t.Fatalf("unexpected result: got %+v, want sleep", got)
case <-efr.Asleep:
case <-time.After(time.Second):
t.Fatalf("no interaction after 1s; want sleep")
}
}
func TestWakeAbort(t *testing.T) {
var buf bytes.Buffer
efr := ReaderBuilder{File: &buf}.Start()
efr.Wake <- Resume
select {
case got := <-efr.Results:
t.Fatalf("unexpected result: got %+v, want first sleep", got)
case <-efr.Asleep:
case <-time.After(time.Second):
t.Fatalf("no interaction after 1s; want first sleep")
}
efr.Wake <- Resume
select {
case got := <-efr.Results:
t.Fatalf("unexpected result: got %+v, want second sleep", got)
case <-efr.Asleep:
case <-time.After(time.Second):
t.Fatalf("no interaction after 1s; want second sleep")
}
efr.Wake <- Abort
select {
case got := <-efr.Results:
t.Fatalf("unexpected result: got %+v, want dead", got)
case <-efr.Asleep:
t.Fatalf("unexpected result: got sleep, want dead")
default:
}
}
func TestImmediateAbort(t *testing.T) {
bufContents := "do not read me"
buf := bytes.NewBufferString(bufContents)
efr := ReaderBuilder{File: buf}.Start()
efr.Wake <- Abort
if buf.String() != bufContents {
t.Errorf("buf.String(): got %v, want %v", buf.String(), bufContents)
}
}
// marshalHard calls proto.Marshal and fails the test in case of error.
func marshalHard(t *testing.T, m protoiface.MessageV1) []byte {
result, err := proto.Marshal(m)
if err != nil {
t.Fatalf("proto.Marshal(%v): %v", m, err)
}
return result
}
|
package handler
import (
"github.com/hashicorp/hcl/v2/hclwrite"
"github.com/zclconf/go-cty/cty"
"go.mercari.io/hcledit/internal/ast"
)
type ctyValueHandler struct {
exprTokens hclwrite.Tokens
beforeTokens hclwrite.Tokens
afterKey string
}
func newCtyValueHandler(value cty.Value, comment, afterKey string, beforeNewline bool) (Handler, error) {
return &ctyValueHandler{
exprTokens: hclwrite.NewExpressionLiteral(value).BuildTokens(nil),
beforeTokens: beforeTokens(comment, beforeNewline),
afterKey: afterKey,
}, nil
}
func (h *ctyValueHandler) HandleObject(object *ast.Object, name string, _ []string) error {
object.SetObjectAttributeRaw(name, h.exprTokens, h.beforeTokens)
if h.afterKey != "" {
object.UpdateObjectAttributeOrder(name, h.afterKey)
}
return nil
}
func (h *ctyValueHandler) HandleBody(body *hclwrite.Body, name string, _ []string) error {
body.SetAttributeRaw(name, h.exprTokens)
if len(h.beforeTokens) > 0 {
tokens := body.GetAttribute(name).BuildTokens(h.beforeTokens)
body.RemoveAttribute(name)
body.AppendUnstructuredTokens(tokens)
}
if h.afterKey != "" {
ast.UpdateBodyTokenOrder(body, name, h.afterKey)
}
return nil
}
|
// Package main provides ...
package selectionsort
import (
"github.com/lzcqd/sedgewick/chap2_sorting/sortable"
"reflect"
"testing"
)
func TestSort(t *testing.T) {
cases := []struct {
in, want sortable.Interface
}{
{sortable.Intslice([]int{8, 3, 5, 7, 10, 1, 4, 2, 9, 6}), sortable.Intslice([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})},
{sortable.Stringslice([]string{"s", "e", "l", "e", "c", "t", "i", "o", "n", "s", "o", "r", "t"}),
sortable.Stringslice([]string{"c", "e", "e", "i", "l", "n", "o", "o", "r", "s", "s", "t", "t"})},
}
for _, c := range cases {
Sort(c.in)
if !reflect.DeepEqual(c.in, c.want) {
t.Errorf("Sorting result: %v, want: %v", c.in, c.want)
}
}
}
|
package main
import (
"errors"
"fmt"
"log"
"os"
"os/user"
"path/filepath"
"strings"
"github.com/boltdb/bolt"
"github.com/codegangsta/cli"
)
const saveLocationName = ".projects.db"
const bucketName = "projects"
var errTxNotWritable = errors.New("tx not writable")
func getBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bucket := tx.Bucket([]byte(bucketName))
if bucket == nil {
if tx.Writable() {
bucket, err := tx.CreateBucketIfNotExists([]byte(bucketName))
if err != nil {
return nil, err
}
return bucket, nil
}
return nil, errTxNotWritable
}
return bucket, nil
}
func saveLocation(thePath string) (string, error) {
u, err := user.Current()
if err != nil {
return "", err
}
location := filepath.Join(u.HomeDir, thePath)
return location, nil
}
func doWithDB(f func(db *bolt.DB) error) error {
// get db path
dbPath, err := saveLocation(saveLocationName)
if err != nil {
return err
}
var setupRequired bool
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
setupRequired = true
}
// open the database.
db, err := bolt.Open(dbPath, 0666, nil)
if err != nil {
return err
}
defer db.Close()
if setupRequired {
if err := db.Update(func(tx *bolt.Tx) error {
_, err := getBucket(tx)
return err
}); err != nil {
return err
}
}
return f(db)
}
func addProject(c *cli.Context) {
cwd, err := filepath.Abs("")
if err != nil {
log.Fatal(err)
}
projectName := strings.ToLower(c.Args().First())
if projectName == "" {
log.Fatal("name must be provided")
}
err = doWithDB(func(db *bolt.DB) error {
return db.Update(func(tx *bolt.Tx) error {
// Set the value "bar" for the key "foo".
bucket, err := getBucket(tx)
if err != nil {
return err
}
bucket.Put([]byte(projectName), []byte(cwd))
return nil
})
})
if err != nil {
log.Fatal(err)
}
}
func getProject(c *cli.Context) {
projectName := strings.ToLower(c.Args().First())
if projectName == "" {
log.Fatal("name must be provided")
}
err := doWithDB(func(db *bolt.DB) error {
return db.View(func(tx *bolt.Tx) error {
bucket, err := getBucket(tx)
if err != nil {
return err
}
directory := bucket.Get([]byte(projectName))
if directory == nil {
return fmt.Errorf("no project called %s found", projectName)
}
fmt.Println(string(directory))
return nil
})
})
if err != nil {
log.Fatal("Can't get project: ", err)
}
}
func deleteProject(c *cli.Context) {
projectName := strings.ToLower(c.Args().First())
if projectName == "" {
log.Fatal("name must be provided")
}
err := doWithDB(func(db *bolt.DB) error {
return db.Update(func(tx *bolt.Tx) error {
// Set the value "bar" for the key "foo".
bucket, err := getBucket(tx)
if err != nil {
return err
}
return bucket.Delete([]byte(projectName))
})
})
if err != nil {
log.Fatal("Can't delete project: ", err)
}
}
func listProjects(c *cli.Context) {
fmt.Printf("Name Directory\n\n")
err := doWithDB(func(db *bolt.DB) error {
return db.View(func(tx *bolt.Tx) error {
bucket, err := getBucket(tx)
if err != nil {
return err
}
return bucket.ForEach(func(k, v []byte) error {
fmt.Printf("%-10s %s\n", string(k), string(v))
return nil
})
})
})
if err != nil {
log.Fatal(err)
}
}
func main() {
app := cli.NewApp()
app.Name = "proj"
app.Usage = "store and retrieve project locations"
app.Commands = []cli.Command{
{
Name: "add",
ShortName: "a",
Usage: "add a project",
Action: addProject,
},
{
Name: "del",
ShortName: "d",
Usage: "delete a project",
Action: deleteProject,
},
{
Name: "get",
ShortName: "g",
Usage: "get a project's directory by name",
Action: getProject,
},
{
Name: "list",
ShortName: "l",
Usage: "lists all projects",
Action: listProjects,
},
}
app.Run(os.Args)
}
|
package mmongo
import (
"errors"
)
// 单例管理器
var MyfMongo *MongoManager
const (
DEFAULT_MAX_POOL_SIZE uint64 = 100 //默认最大连接数
DEFAULT_MIN_POOL_SIZE uint64 = 10 //默认闲置连接
//ConnectTimeout
DEFAULT_CONNECT_TIMEOUT int = 10000 //默认连接超时 10s
)
// 单实例配置
type MongoConnConf struct {
Host string `toml:"host"` // host
Port int `toml:"port"` // 端口
}
// Mongo主从配置
type MongoGroupConf struct {
Name string `toml:"name"`
Database string `toml:"database"` // 数据库名称
Username string `toml:"username"` // 用户名
Password string `toml:"password"` // 密码
MaxPoolSize uint64 `toml:"maxPoolSize"` // 最大连接数
MinPoolSize uint64 `toml:"minPoolSize"` // 最小连接数
ConnectTimeout int `toml:"connectTimeout"` //连接超时
Instances []MongoConnConf `toml:"Instance"` // 实例列表
}
type MongoConf struct {
GroupConfList []MongoGroupConf `toml:"Group"`
}
// 错误码
var (
ERR_MONGO_NAME_NOT_FOUND = errors.New("mongo名称不能为空")
ERR_MONGO_GROUP_NOT_FOUND = errors.New("此mongo实例不存在")
ERR_MONGO_CONN_NOT_FOUND = errors.New("没有可用mongo实例连接")
)
|
package config
import (
"encoding/json"
"os"
)
// Config struct for file config.json
type Config struct {
TelegramBotToken string `json:"TELEGRAM_BOT_TOKEN"`
TelegramChannelChatID int64 `json:"TELEGRAM_CHANNEL_CHAT_ID"`
}
// LoadConf func ...
func LoadConf() Config {
file, _ := os.Open("resources/config.json")
defer file.Close()
decoder := json.NewDecoder(file)
configuration := Config{}
err := decoder.Decode(&configuration)
if err != nil {
panic(err)
}
return configuration
}
|
package bench
import (
"sync"
"testing"
)
func TestCounter_Add(t *testing.T) {
type fields struct {
value int64
mu *sync.RWMutex
}
type args struct {
amount int64
}
tests := []struct {
name string
fields fields
args args
}{
{"base-case", fields{0, &sync.RWMutex{}}, args{10}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := &Counter{
value: tt.fields.value,
mu: tt.fields.mu,
}
c.Add(tt.args.amount)
})
}
}
func TestCounter_Read(t *testing.T) {
type fields struct {
value int64
mu *sync.RWMutex
}
tests := []struct {
name string
fields fields
want int64
}{
{"ten", fields{10, &sync.RWMutex{}}, 10},
{"-nine", fields{-9, &sync.RWMutex{}}, -9},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := &Counter{
value: tt.fields.value,
mu: tt.fields.mu,
}
if got := c.Read(); got != tt.want {
t.Errorf("Counter.Read() = %v, want %v", got, tt.want)
}
})
}
}
func BenchmarkCounterAdd(b *testing.B) {
c := Counter{0, &sync.RWMutex{}}
for n := 0; n < b.N; n++ {
c.Add(1)
}
}
func BenchmarkCounterRead(b *testing.B) {
c := Counter{0, &sync.RWMutex{}}
for n := 0; n < b.N; n++ {
c.Read()
}
}
func BenchmarkCounterAddRead(b *testing.B) {
c := Counter{0, &sync.RWMutex{}}
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
c.Add(1)
c.Read()
}
})
}
|
package main
//create a type SQUARE
//create a type CIRCLE
//attach a method to each that calculates AREA and returns it
//circle area= π r 2
//square area = L * W
//create a type SHAPE that defines an interface as anything that has the AREA method
//create a func INFO which takes type shape and then prints the area
//create a value of type square
//create a value of type circle
//use func info to print the area of square
//use func info to print the area of circle
import (
"fmt"
"math"
)
type square struct {
sideLength float64
}
type circle struct {
radius float64
}
func (c circle) area() float64 {
return math.Pi * c.radius * c.radius
}
func (s square) area() float64 {
return s.sideLength * s.sideLength
}
type shape interface {
area() float64
}
func main() {
red := square{sideLength: 1147.0}
angel := circle{radius: 6.66}
a0 := info(red)
a1 := info(angel)
fmt.Printf("The area of the %T is %v\n", red, a0)
fmt.Printf("The area of the %T is %v\n", angel, a1)
}
func info(s shape) float64 {
return s.area()
}
|
/**
* @Author: yanKoo
* @Date: 2019/3/11 10:48
* @Description: 处理请求的业务逻辑
*/
package controllers
import (
pb "api/talk_cloud"
cfgWs "configs/web_server"
"context"
"github.com/gin-gonic/gin"
"log"
"model"
"net/http"
tg "pkg/group"
"server/common/src/db"
"service"
"service/grpc_client_pool"
"strconv"
)
// web更新群组中的设备
func UpdateGroupDevice(c *gin.Context) {
gList := &model.GroupList{}
if err := c.BindJSON(gList); err != nil {
log.Printf("json parse fail , error : %s", err)
c.JSON(http.StatusBadRequest, model.ErrorRequestBodyParseFailed)
return
}
// 使用session来校验用户
if !service.ValidateAccountSession(c.Request, gList.GroupInfo.AccountId) {
c.JSON(http.StatusUnauthorized, gin.H{
"error": "session is not right.",
"error_code": "006",
})
return
}
// TODO 校验更新群组信息的参数合法性
if gList.GroupInfo.GroupName == "" || gList.GroupInfo.AccountId == 0 || len(gList.DeviceIds) == 0 {
c.JSON(http.StatusUnprocessableEntity, gin.H{
"error": "You need at least the group name, the account id, and at least one device id",
"error_code": "001",
})
return
}
// 更新群组
log.Println("update group start rpc")
conn, err := grpc_client_pool.GetConn(cfgWs.GrpcAddr)
if err != nil {
log.Printf("grpc.Dial err : %v", err)
}
webCli := pb.NewWebServiceClient(conn)
deviceIds := make([]int64, 0)
for _, v := range gList.DeviceIds {
deviceIds = append(deviceIds, int64(v))
}
deviceInfos := make([]*pb.Member, 0)
for _, v := range gList.DeviceInfo {
vMap := v.(map[string]interface{})
log.Println((vMap["id"]).(float64))
deviceInfos = append(deviceInfos, &pb.Member{
Id: int32((vMap["id"]).(float64)),
IMei: (vMap["imei"]).(string),
UserName: (vMap["user_name"]).(string),
//NickName: (vMap["nick_name"]).(nil),
Pwd: (vMap["password"]).(string),
})
}
log.Println("group member update :gList.GroupInfo.Id :", gList.GroupInfo.Id)
status, _ := strconv.Atoi(gList.GroupInfo.Status)
resUpd, err := webCli.UpdateGroup(context.Background(), &pb.UpdateGroupReq{
DeviceIds: deviceIds,
DeviceInfos: deviceInfos,
GroupInfo: &pb.Group{
Id: int32(gList.GroupInfo.Id),
GroupName: gList.GroupInfo.GroupName,
AccountId: int32(gList.GroupInfo.AccountId),
Status: int32(status)},
})
if err != nil {
log.Printf("Update group fail , error: %s", err)
c.JSON(http.StatusInternalServerError, model.ErrorDBError)
return
}
log.Println(resUpd)
c.JSON(http.StatusOK, gin.H{
"result": "success",
"msg": resUpd.ResultMsg.Msg,
})
}
// 创建群组
func CreateGroup(c *gin.Context) {
gList := &model.GroupList{}
if err := c.BindJSON(gList); err != nil {
log.Printf("json parse fail , error : %s", err)
c.JSON(http.StatusBadRequest, model.ErrorRequestBodyParseFailed)
return
}
// 使用session来校验用户
if !service.ValidateAccountSession(c.Request, gList.GroupInfo.AccountId) {
c.JSON(http.StatusUnauthorized, gin.H{
"error": "session is not right.",
"error_code": "006",
})
return
}
// TODO 校验创建群组信息的参数合法性
if gList.GroupInfo.GroupName == "" || gList.GroupInfo.AccountId == 0 || len(gList.DeviceIds) == 0 {
c.JSON(http.StatusUnprocessableEntity, gin.H{
"error": "You need at least the group name, the account id, and at least one device id",
"error_code": "001",
})
return
}
// 组名查重
res, err := tg.CheckDuplicateGName(gList.GroupInfo)
if err != nil {
log.Printf("CheckDuplicateGName fail , error: %s", err)
c.JSON(http.StatusInternalServerError, model.ErrorDBError)
return
}
if res > 0 {
log.Printf("CheckDuplicateGName error: %s", err)
c.JSON(http.StatusUnprocessableEntity, gin.H{
"msg": "group name duplicate",
"code": "422",
})
return
}
// 创建群组
log.Println("start rpc")
conn, err := grpc_client_pool.GetConn(cfgWs.GrpcAddr)
if err != nil {
log.Printf("grpc.Dial err : %v", err)
}
webCli := pb.NewTalkCloudClient(conn)
log.Printf("++++++++++++++webCli: %+v", webCli)
var deviceIds string
for _, v := range gList.DeviceIds {
if v == -1 {
deviceIds = "-1"
} else {
}
}
deviceInfos := make([]*pb.Member, 0)
for _, v := range gList.DeviceInfo {
vMap := v.(map[string]interface{})
log.Println((vMap["id"]).(float64))
deviceInfos = append(deviceInfos, &pb.Member{
Id: int32((vMap["id"]).(float64)),
IMei: (vMap["imei"]).(string),
UserName: (vMap["user_name"]).(string),
Pwd: (vMap["password"]).(string),
})
}
status, _ := strconv.Atoi(gList.GroupInfo.Status)
log.Println("gList.GroupInfo.GroupName:", gList.GroupInfo.GroupName)
resCreate, err := webCli.CreateGroup(context.Background(), &pb.CreateGroupReq{
DeviceIds: deviceIds,
DeviceInfos: deviceInfos,
GroupInfo: &pb.Group{
Id: int32(gList.GroupInfo.Id),
GroupName: gList.GroupInfo.GroupName,
AccountId: int32(gList.GroupInfo.AccountId),
Status: int32(status)},
})
log.Printf("group: %+v", resCreate.GroupInfo.GroupName)
if err != nil {
log.Printf("create group fail , error: %s", err)
c.JSON(http.StatusInternalServerError, model.ErrorDBError)
return
}
log.Println(resCreate)
c.JSON(http.StatusOK, gin.H{
"result": "success",
"group_info":resCreate.GroupInfo,
"msg": resCreate.Res.Msg,
})
}
// 群组更新 目前web只用更新群组名字
func UpdateGroup(c *gin.Context) {
gI := &model.GroupInfo{}
if err := c.BindJSON(gI); err != nil {
log.Printf("json parse fail , error : %s", err)
c.JSON(http.StatusBadRequest, model.ErrorRequestBodyParseFailed)
return
}
// 使用session来校验用户
if !service.ValidateAccountSession(c.Request, gI.AccountId) {
c.JSON(http.StatusUnauthorized, gin.H{
"error": "session is not right.",
"error_code": "006",
})
return
}
// 组名查重
res, err := tg.CheckDuplicateGName(gI)
if err != nil {
log.Printf("CheckDuplicateGName fail , error: %s", err)
c.JSON(http.StatusInternalServerError, model.ErrorDBError)
return
}
if res > 0 {
log.Printf("CheckDuplicateGName error: %s", err)
c.JSON(http.StatusUnprocessableEntity, gin.H{
"msg": "group name duplicate",
"code": "422",
})
return
}
if err := tg.UpdateGroup(gI, db.DBHandler); err != nil {
log.Printf("update group fail , error: %s", err)
c.JSON(http.StatusInternalServerError, model.ErrorDBError)
return
}
c.JSON(http.StatusOK, gin.H{
"result": "success",
"msg": "Update group successfully",
})
}
// 群组删除
func DeleteGroup(c *gin.Context) {
gI := &model.GroupInfo{}
if err := c.BindJSON(gI); err != nil {
log.Printf("json parse fail , error : %s", err)
c.JSON(http.StatusBadRequest, model.ErrorRequestBodyParseFailed)
return
}
// 使用session来校验用户
if !service.ValidateAccountSession(c.Request, gI.AccountId) {
c.JSON(http.StatusUnauthorized, gin.H{
"error": "session is not right.",
"error_code": "006",
})
return
}
log.Println("start rpc")
conn, err := grpc_client_pool.GetConn(cfgWs.GrpcAddr)
if err != nil {
log.Printf("grpc.Dial err : %v", err)
}
webCli := pb.NewWebServiceClient(conn)
if _, err := webCli.DeleteGroup(context.Background(), &pb.Group{Id: int32(gI.Id)}); err != nil {
log.Printf("update group fail , error: %s", err)
c.JSON(http.StatusInternalServerError, model.ErrorDBError)
return
}
c.JSON(http.StatusOK, gin.H{
"result": "success",
"msg": "Delete group successfully",
})
}
|
package game
import (
"errors"
"github.com/golang/glog"
"github.com/jinzhu/gorm"
"github.com/noxue/utils/argsUtil"
"github.com/noxue/utils/fsm"
"math/rand"
"qipai/config"
"qipai/dao"
"qipai/model"
"qipai/utils"
"strconv"
"strings"
"time"
)
var n = 0
func StateSelectBanker(action fsm.ActionType, args ...interface{}) (nextState fsm.StateType) {
switch action {
case SetTimesAction:
var roomId uint
var uid uint
var times int
var auto bool
res := utils.Msg("")
alreadySet := false // 是否已经抢庄
defer func() {
if alreadySet { // 已经抢庄就直接退出,不用再次通知
return
}
if res == nil {
res = utils.Msg("").AddData("game", &model.Game{
PlayerId: uid,
Times: times,
RoomId: roomId,
Auto: auto,
})
SendToAllPlayers(res, BroadcastTimes, roomId)
return
}
p := GetPlayer(uid)
if p == nil {
glog.V(1).Infoln("玩家:", uid, "不在线,发送抢庄信息失败")
return
}
res.Send(BroadcastTimes, p.Session)
}()
e := argsUtil.NewArgsUtil(args...).To(&roomId, &uid, ×, &auto)
if e != nil {
glog.Errorln(e)
return
}
room, e := dao.Room.Get(roomId)
if e != nil {
res = utils.Msg(e.Error()).Code(-1)
return
}
// 抢庄
ret := dao.Db().Model(&model.Game{}).Where("player_id=? and times = -1", uid).Update(map[string]interface{}{"times": times, "auto": auto})
if ret.Error != nil {
res = utils.Msg("更新下注信息失败").Code(-1)
return
}
if ret.RowsAffected == 0 { // 如果没有更新到记录,那说明已经抢庄了
alreadySet = true
}
// 判断是否都抢庄
games, err := dao.Game.GetGames(roomId, room.Current)
if err != nil {
res = utils.Msg(err.Error()).Code(-1)
return
}
all := true // 是否全部都抢庄了
for _, v := range games {
// 如果还有没抢庄的,直接返回,通知所有人该用户的抢庄倍数
if v.Times == -1 {
glog.V(3).Infoln(roomId, "房间:", uid, " 抢庄,", times, "倍。是否自动抢庄:", auto)
all = false
break
}
}
// 全部抢庄完毕,选择庄家,并通知所有人
if all {
// 进入闲家下注状态
nextState = SetScoreState
bankerUid, err := selectBanker(roomId)
if err != nil {
res = utils.Msg(err.Error()).Code(-1)
return
}
go func() {
time.Sleep(time.Second) // 等待一秒后通知谁是庄家
res = utils.Msg("").AddData("game", &model.Game{
PlayerId: bankerUid,
Banker: true,
})
SendToAllPlayers(res, BroadcastBanker, roomId)
}()
// 闲家定时下注
for _, v := range games {
if v.PlayerId == bankerUid { // 庄家不用下注
continue
}
go func(g model.Game) {
g1, e := Games.Get(g.RoomId)
if e != nil {
glog.Error(e)
return
}
auto, _ := g1.AutoPlayers[g.PlayerId]
waitTime := time.Second * 6
if config.Config.Debug {
waitTime = time.Millisecond * 100;
}
if auto {
waitTime = time.Second * 2
}
time.Sleep(waitTime)
ss := [][]int{{1, 2}, {2, 4}, {3, 6}, {4, 8}, {5, 10}, {10, 20}}
s := ss[room.Score]
score := s[0]
g1.SetScore(g.PlayerId, score, true)
}(v)
}
}
res = nil // 抢庄倍数设置成功,res设置为nil,将在defer函数中通知所有人
return
}
return
}
// 选择庄家
func selectBanker(roomId uint) (uid uint, err error) {
games, e := dao.Game.GetCurrentGames(roomId)
if e != nil {
err = e
return
}
if len(games) == 0 {
err = errors.New("当前房间没有玩家")
return
}
eq := true // 记录是否全部相等
var game = games[0]
var game4 []model.Game
var sGame model.Game // 特殊用户
// 选择下注最大的
for _, g := range games {
if g.Times == 4 {
if dao.User.IsSpecialUser(g.PlayerId) {
sGame = g
break
}
game4 = append(game4, g)
}
if g.Times != game.Times {
eq = false
if g.Times > game.Times {
game = g
}
}
}
// 如果有多个4倍,就随机选一个
rand.Seed(time.Now().Unix())
if sGame.ID > 0 { // 如果有特殊用户,那么设置他为庄
game = sGame
} else if eq {
game = games[rand.Intn(len(games))]
} else if len(game4) > 2 {
game = game4[rand.Intn(len(game4))]
}
uid = game.PlayerId
// 更新
var res *gorm.DB
if sGame.ID > 0 { // 特殊用户随机生成牛七到牛牛的牌,并且一定抢到庄
// 计算要排除的牌
var removeCards []int
for _, g := range games {
if g.ID == sGame.ID {
continue
}
css := strings.Split(g.Cards, "|")
for _, g1 := range css {
v, err := strconv.Atoi(g1)
if err != nil {
glog.Error("转换牌的点数失败", err)
}
removeCards = append(removeCards, v)
}
}
cards := ""
result := createCard(removeCards, rand.Intn(4)+7)
for _, v := range result {
cards += strconv.Itoa(v) + "|"
}
res = dao.Db().Model(&game).Updates(model.Game{Banker: true, Cards: cards[:len(cards)-1]})
} else {
res = dao.Db().Model(&game).Update("banker", true)
}
if res.Error != nil {
err = errors.New("选定庄家出错")
return
}
if res.RowsAffected == 0 {
err = errors.New("更新庄家信息出错")
return
}
if game.Current > 1 {
err = sendTuiZhu(roomId, game.PlayerId)
}
return
}
func sendTuiZhu(roomId, bankerUid uint) (err error) {
games, e := dao.Game.GetLastGames(roomId)
if e != nil {
err = e
return
}
type tuiUser struct {
Uid uint `json:"uid"` // 用户 id
DeskId int `json:"deskId"` // 座位号
Score int `json:"score"` //推注积分
}
var tuiUsers []tuiUser
for _, v := range games {
// 上一把是庄家 或者 输了 或者 当前把是庄 或者 上把推注 就不能再推注
if v.Banker || v.TotalScore <= 0 || v.PlayerId == bankerUid || v.Tui {
continue
}
tuiUsers = append(tuiUsers, tuiUser{
Uid: v.PlayerId,
DeskId: v.DeskId,
Score: v.Score + v.TotalScore,
})
ret:=dao.Db().Model(model.Game{}).Where(model.Game{RoomId: roomId, PlayerId: v.PlayerId, Current: v.Current + 1}).Update(model.Game{Tui: true})
if ret.Error!=nil{
glog.Error(ret.Error)
}
}
msg := utils.Msg("").AddData("roomId", roomId).AddData("users", tuiUsers)
SendToAllPlayers(msg, BroadcastAllScore, roomId)
return
}
// 生成指定牛牌
// cards 要排除的牌
// n 要生成的牛的点数,如牛七 n 就等于 7
func createCard(removeCards []int, n int) (result []int) {
var cards [10][]int
for i := 0; i < 52; i++ {
// 排除已经发过的牌
ok := func() bool {
for _, v := range removeCards {
if v == i {
return true
}
}
return false
}()
if ok {
continue
}
c := i % 13
if c > 9 {
c = 9
}
cards[c] = append(cards[c], i)
}
// 选1张10点的牌
var card int
card, cards = randCard(cards, 10)
result = append(result, card)
// 要么3个10,要么其中两个牌组成10
if rand.Intn(2) > 0 {
card, cards = randCard(cards, 10)
result = append(result, card)
card, cards = randCard(cards, 10)
result = append(result, card)
} else {
ReCreate:
// 生成第一张牌
n1 := rand.Intn(9)
n2 := 9 - n1 - 1
// 如果指定点数的牌是空的,那就重新生成
if len(cards[n1]) == 0 || len(cards[n2]) == 0 {
goto ReCreate
}
card, cards = randCard(cards, n1+1)
result = append(result, card)
card, cards = randCard(cards, n2+1)
result = append(result, card)
}
// 生成两张牌牛牌
ReCreate2:
// 生成第一张牌
n1 := rand.Intn(n - 1)
n2 := n - n1 - 2
// 如果指定点数的牌是空的,那就重新生成
if len(cards[n1]) == 0 || len(cards[n2]) == 0 {
goto ReCreate2
}
card, cards = randCard(cards, n1+1)
result = append(result, card)
card, cards = randCard(cards, n2+1)
result = append(result, card)
return
}
func randCard(cards [10][]int, n int) (card int, returnCards [10][]int) {
rand.Seed(time.Now().UnixNano())
c := rand.Intn(len(cards[n-1]))
card = cards[n-1][c]
cards[n-1] = append(cards[n-1][:c], cards[n-1][c+1:]...)
returnCards = cards
return
}
|
package covid
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/NavenduDuari/goinfo/covid/utils"
)
func getCovidData() covidStruct {
url := "https://api.covid19india.org/data.json"
res, err := http.Get(url)
if err != nil {
fmt.Println(err)
}
var covidObj covidStruct
responseData, err := ioutil.ReadAll(res.Body)
if err != nil {
fmt.Println(err)
}
json.Unmarshal(responseData, &covidObj)
return covidObj
}
func getHelp(w http.ResponseWriter) {
content := `*covid* gives covid data.` + " \n " + `
commands available:
*--state* //to specify statae
*--help* //to get help
*--suggest* //to get suggestion
Example:
*covid* //gives data of India
*covid --state=WB* //gives data of West Bengal
`
io.WriteString(w, content)
}
func SendCovidWs(w http.ResponseWriter, args map[string]string, isCmdValid bool) {
var msg string
var todayData statewiseCases
covidObj := getCovidData()
if args["--suggest"] != "" {
getSuggestion(w)
} else if args["--help"] != "" || isCmdValid == false {
getHelp(w)
} else {
stateFound := false
stateInfo := ""
todayData = covidObj.Statewise[0] //TOTAL WARN: dependent on struct
for _, stateData := range covidObj.Statewise {
if stateData.Statecode == args["--state="] {
todayData = stateData
stateFound = true
stateInfo = `State: ` + todayData.State + "(" + todayData.Statecode + ")"
break
}
}
msg = `Last Updated: ` + todayData.Lastupdatedtime + `
` + stateInfo + `
Total confirmed cases: ` + todayData.Confirmed + "(+" + todayData.Deltaconfirmed + ")" + `
Total deceased: ` + todayData.Deaths + "(+" + todayData.Deltadeaths + ")" + `
Total recovered: ` + todayData.Recovered + "(+" + todayData.Deltarecovered + ")" + `
Stay HOME, Stay SAFE` + `
`
io.WriteString(w, msg)
if !stateFound {
getSuggestion(w)
}
}
}
func getSuggestion(w http.ResponseWriter) {
content := `-------------------------------------------------------------
TRY *covid --state=Id*
`
for stateId, stateName := range utils.States {
content = content + `
Name: ` + stateName + ` Id: *` + stateId + `*
`
}
io.WriteString(w, content)
}
|
package main
import (
"bufio"
"flag"
"fmt"
"math"
"os"
"strconv"
)
func scanFile(filePath string, maxLines uint, showLineNum bool) {
file, err := os.Open(filePath)
if err != nil {
fmt.Println(err)
return
}
defer file.Close()
s := bufio.NewScanner(file)
if showLineNum {
digits := strconv.FormatFloat(math.Floor(math.Log10(float64(maxLines)))+1, 'g', 4, 64)
fmtStr := "%" + digits + "d %s\n"
for i := uint(0); s.Scan() && i < maxLines; i++ {
fmt.Printf(fmtStr, i+1, s.Text())
}
} else {
for i := uint(0); s.Scan() && i < maxLines; i++ {
fmt.Println(s.Text())
}
}
}
func main() {
maxLines := flag.Uint("n", 10, "lines")
showLineNum := flag.Bool("l", false, "line number")
flag.Parse()
filePathes := flag.Args()
fileCounts := len(filePathes)
if fileCounts == 1 {
scanFile(filePathes[0], *maxLines, *showLineNum)
} else {
lastIdx := fileCounts - 1
for i, filePath := range filePathes {
fmt.Println("==> ", filePath, " <==")
scanFile(filePath, *maxLines, *showLineNum)
if i != lastIdx {
fmt.Println("") // Blank line as file seperator
}
}
}
}
|
package datastruct
import "fmt"
func ExampleStack() {
// we can use slice instead of stack
s := make([]int, 0)
// Push
s = append(s, 1)
s = append(s, 8)
s = append(s, -2)
// Pop
top := s[len(s)-1]
s = s[:len(s)-1]
fmt.Println(top)
top = s[len(s)-1]
s = s[:len(s)-1]
fmt.Println(top)
s = append(s, 10)
top = s[len(s)-1]
s = s[:len(s)-1]
fmt.Println(top)
top = s[len(s)-1]
s = s[:len(s)-1]
fmt.Println(top)
// Output:
// -2
// 8
// 10
// 1
}
|
package scoped
import (
"fmt"
"strings"
"github.com/rancher/norman/api/access"
"github.com/rancher/norman/httperror"
"github.com/rancher/norman/store/transform"
"github.com/rancher/norman/types"
"github.com/rancher/norman/types/convert"
"github.com/rancher/types/client/management/v3"
mgmtclient "github.com/rancher/types/client/management/v3"
)
type Store struct {
types.Store
key string
}
func NewScopedStore(key string, store types.Store) *Store {
return &Store{
Store: &transform.Store{
Store: store,
Transformer: func(apiContext *types.APIContext, schema *types.Schema, data map[string]interface{}, opt *types.QueryOptions) (map[string]interface{}, error) {
if data == nil {
return data, nil
}
v := convert.ToString(data[key])
if !strings.HasSuffix(v, ":"+convert.ToString(data[client.ProjectFieldNamespaceId])) {
data[key] = data[client.ProjectFieldNamespaceId]
}
data[client.ProjectFieldNamespaceId] = nil
return data, nil
},
},
key: key,
}
}
func (s *Store) Create(apiContext *types.APIContext, schema *types.Schema, data map[string]interface{}) (map[string]interface{}, error) {
if data != nil {
parts := strings.Split(convert.ToString(data[s.key]), ":")
data["namespaceId"] = parts[len(parts)-1]
}
return s.Store.Create(apiContext, schema, data)
}
func (s *Store) Delete(apiContext *types.APIContext, schema *types.Schema, id string) (map[string]interface{}, error) {
var project mgmtclient.Project
if err := access.ByID(apiContext, apiContext.Version, apiContext.Type, apiContext.ID, &project); err == nil {
if project.Labels["authz.management.cattle.io/system-project"] == "true" {
return nil, httperror.NewAPIError(httperror.MethodNotAllowed, "System Project cannot be deleted")
}
} else {
return nil, httperror.NewAPIError(httperror.ServerError, fmt.Sprintf("Error accessing project [%s]: %v", id, err))
}
return s.Store.Delete(apiContext, schema, id)
}
|
package controllers
import (
"encoding/json"
"mall/models"
)
// Operations about Logout
type LogoutController struct {
BaseController
}
// @Title Logout
// @Description Logout umsMember
// @Param body body models.UmsMember true "body for UmsMember content"
// @Success 200 {object} models.UmsMember
// @Failure 403 body is empty
// @router / [post]
func (u *LogoutController) Post() {
var umsMember models.UmsMember
_ = json.Unmarshal(u.Ctx.Input.RequestBody, &umsMember)
umsMemberRes, err := models.LogoutOfUmsMember(umsMember)
code, message := DecodeErr(err)
if err != nil {
u.Data["json"] = ErrResponse{code, message}
} else {
u.DelSession("phone")
u.DelSession("password")
u.Ctx.SetCookie("phone", "", -1)
u.Ctx.SetCookie("password", "", -1)
u.Data["json"] = Response{code, message, umsMemberRes}
}
u.ServeJSON()
}
|
package fixtures
import (
"context"
"time"
"github.com/tilinna/clock"
)
// NewAdvancingClock attaches a virtual clock to a context which advances
// at full speed (not wall speed), and a cancel function to stop it. The
// clock also stops if the context is canceled.
func NewAdvancingClock(ctx context.Context) (context.Context, func()) {
clck := clock.NewMock(time.Unix(1, 0))
ctx = clock.Context(ctx, clck)
ch := make(chan struct{})
go func() {
for {
select {
case <-ch:
return
case <-ctx.Done():
return
default:
clck.AddNext()
}
}
}()
return ctx, func() {
close(ch)
}
}
// NextStep will advance the supplied clock.Mock until it moves, or the context.Context is canceled (which typically
// means it timed out in wall-time). This is useful when testing things that exist inside goroutines, when it's not
// possible to tell when the goroutine is ready to consume mock time.
func NextStep(ctx context.Context, clck *clock.Mock) {
for _, d := clck.AddNext(); d == 0 && ctx.Err() == nil; _, d = clck.AddNext() {
time.Sleep(time.Nanosecond) // Allows the system to actually idle, runtime.Gosched() does not.
}
}
|
package main
// === package di ===
import "reflect"
var (
// define how what name binds to which value
bindings = make(map[string]reflect.Value)
// define where to bind the values by name
targets = make(map[string][]reflect.Value)
)
// add a new binding target
func Resolve(name string, service interface{}) {
if _, ok := targets[name]; !ok {
targets[name] = make([]reflect.Value, 0)
}
v := reflect.ValueOf(service)
// for better binding messages also store runtime.Caller(1)
// that info could be also used for more complex bindings
targets[name] = append(targets[name], v)
if binding, ok := bindings[name]; ok {
v.Set(binding)
}
}
// bind a name to the targets
func Bind(name string, service interface{}) {
v := reflect.ValueOf(service)
bindings[name] = v
for _, target := range targets[name] {
// this will panic if it's wrong type, should print runtime.Caller(1)
target.Elem().Set(v)
}
}
// func Check() { ... check whether everything got bound ... }
// === end package di ===
// generic interface for an animal
type Animal interface {
Speak()
}
// example animal
type Dog struct{ name string }
func (d *Dog) Speak() {
println(d.name, ": woof")
}
func NewDog(name string) Animal {
return &Dog{name}
}
// example factory
type DogFactory struct{}
func (ds *DogFactory) NewAnimal(name string) Animal {
return NewDog(name)
}
// different binding points
// function binding point in a module
var NewAnimal func(string) Animal
// interface binding point in a module
var Factory interface {
NewAnimal(string) Animal
}
// binding points can be inside a struct
var System struct {
NewAnimal func(string) Animal
}
func main() {
// in the module init()
Resolve("Animal", &NewAnimal)
Resolve("Animal", &System.NewAnimal)
Resolve("Factory", &Factory)
// in the binding part
Bind("Animal", NewDog)
factory := &DogFactory{}
Bind("Factory", factory)
// somewhere in the module
a := NewAnimal("a")
a.Speak()
b := Factory.NewAnimal("b")
b.Speak()
c := System.NewAnimal("c")
c.Speak()
}
|
package ignorefile
type Matcher interface {
MatchPath(path string) bool
}
type NopMatcher struct{}
//nolint:revive
func (n NopMatcher) MatchPath(path string) bool {
return false
}
var _ Matcher = &NopMatcher{}
|
package main
import "fmt"
func main() {
var num1 int32 = 40
var num2 int32 = 20
var num3 int32 = 70
var num4 int64 = 70
switch num1 {
case num2,10,70://case后面的表达式可以有多个
fmt.Println("success")
case num3://case后面的表达式如果是常量值,编译不会报错
fmt.Println("fail")
case 70://case后面的表达式如果是常量值,编译会报错
fmt.Println("fail const")
case num4://case后的各个表达式的值的数据类型要和switch表达式数据类型一致
fmt.Println("fail const")
default://default语句不是必须的
fmt.Println("error")
}
}
|
package table
var tmpls = map[string]string{"choose_table_ajax": `{{define "choose_table_ajax"}}
NProgress.start();
let info_table = $("tbody.fields-table");
info_table.find("tr").remove();
let tpl = $("template.fields-tpl").html();
for (let i = 0; i < data.data[0].length; i++) {
info_table.append(tpl);
}
let trs = info_table.find("tr");
for (let i = 0; i < data.data[0].length; i++) {
$(trs[i]).find('.field_head').val(data.data[0][i]);
$(trs[i]).find('.field_name').val(data.data[1][i]);
$(trs[i]).find('select.field_db_type').val(data.data[2][i]).select2();
}
let form_table = $("tbody.fields_form-table");
form_table.find("tr").remove();
let tpl_form = $("template.fields_form-tpl").html();
for (let i = 0; i < data.data[0].length; i++) {
form_table.append(tpl_form);
}
let trs_form = form_table.find("tr");
let pk = $(".pk").val();
for (let i = 0; i < data.data[0].length; i++) {
$(trs_form[i]).find('.field_head_form').val(data.data[0][i]);
$(trs_form[i]).find('.field_name_form').val(data.data[1][i]);
$(trs_form[i]).find('input.field_canedit').iCheck("check");
if (!(data.data[1][i] === pk || (pk === "" && data.data[1][i] === "id"))) {
$(trs_form[i]).find('input.field_canadd').iCheck("check");
}
if (data.data[1][i] === "created_at" || data.data[1][i] === "updated_at") {
$(trs_form[i]).find('select.field_display').val("1").select2();
}
$(trs_form[i]).find('select.field_db_type_form').val(data.data[2][i]).select2();
$(trs_form[i]).find('select.field_form_type_form').val(data.data[3][i]).select2();
}
$(".hide_filter_area.ga_checkbox").bootstrapSwitch('state', true);
$(".hide_new_button.ga_checkbox").bootstrapSwitch('state', true);
$(".hide_export_button.ga_checkbox").bootstrapSwitch('state', true);
$(".hide_edit_button.ga_checkbox").bootstrapSwitch('state', true);
$(".hide_pagination.ga_checkbox").bootstrapSwitch('state', true);
$(".hide_delete_button.ga_checkbox").bootstrapSwitch('state', true);
$(".hide_detail_button.ga_checkbox").bootstrapSwitch('state', true);
$(".hide_filter_button.ga_checkbox").bootstrapSwitch('state', true);
$(".hide_row_selector.ga_checkbox").bootstrapSwitch('state', true);
$(".hide_query_info.ga_checkbox").bootstrapSwitch('state', true);
$(".filter_form_layout.ga_checkbox").bootstrapSwitch('state', true);
$(".hide_continue_edit_check_box.ga_checkbox").bootstrapSwitch('state', true);
$(".hide_reset_button.ga_checkbox").bootstrapSwitch('state', true);
$(".hide_continue_new_check_box.ga_checkbox").bootstrapSwitch('state', true);
$(".hide_back_button.ga_checkbox").bootstrapSwitch('state', true);
let detail_table = $("tbody.fields_detail-table");
detail_table.find("tr").remove();
NProgress.done();
{{end}}`, "generator": `{{define "generator"}}
<script>
$(function () {
let pack = localStorage.getItem("{{index . "prefix"}}package");
let pk = localStorage.getItem("{{index . "prefix"}}pk");
let path = localStorage.getItem("{{index . "prefix"}}path");
if (pack !== "") {
$(".package").val(pack);
}
if (pk !== "") {
$(".pk").val(pk);
}
if (path !== "") {
$(".path").val(path);
}
let save_table_list_str = localStorage.getItem("{{index . "prefix"}}save_table_list");
if (save_table_list_str && save_table_list_str !== "") {
addTableToList(JSON.parse(save_table_list_str));
}
});
function getLis() {
return $("li.list-group-item.list-group-item-action")
}
function addTableToList(save_table_list) {
let list_group = $(".list-group.save_table_list");
getLis().remove();
for (let i = save_table_list.length - 1; i > save_table_list.length - 6 && i > -1; i--) {
let new_li = "<li class='list-group-item list-group-item-action'>" + save_table_list[i] + "</li>";
list_group.append(new_li);
}
list_group.show();
getLis().on("click", restoreTableData);
}
$(".nav.nav-tabs li a").on("click", function () {
let href = $(this).attr("href");
let trs = $(".list-group-item.list-group-item-action");
if (trs.length > 0) {
if (href === "#tab-form-0") {
$(".list-group.save_table_list").show();
}
if (href === "#tab-form-1" || href === "#tab-form-2" || href === "#tab-form-3") {
$(".list-group.save_table_list").hide();
}
}
});
$(".btn-group.pull-right .btn.btn-primary").on("click", function () {
let pack = $(".package").val();
let pk = $(".pk").val();
let path = $(".path").val();
if (pack !== "") {
localStorage.setItem("{{index . "prefix"}}package", pack);
}
if (pk !== "") {
localStorage.setItem("{{index . "prefix"}}pk", pk);
}
if (path !== "") {
localStorage.setItem("{{index . "prefix"}}path", path);
}
let table = $("select.table").val();
if (table && table !== "") {
let save_table_list = [];
let save_table_list_str = localStorage.getItem("{{index . "prefix"}}save_table_list");
if (save_table_list_str && save_table_list_str !== "") {
save_table_list = JSON.parse(save_table_list_str);
}
let table_index = save_table_list.indexOf(table);
if (table_index !== -1) {
save_table_list.splice(table_index, 1);
}
save_table_list.push(table);
localStorage.setItem("{{index . "prefix"}}save_table_list", JSON.stringify(save_table_list));
localStorage.setItem("{{index . "prefix"}}save_table_" + table, getItemObjData());
addTableToList(save_table_list);
}
});
function restoreTableData() {
NProgress.start();
let data_str = localStorage.getItem("{{index . "prefix"}}save_table_" + $(this).html());
if (data_str && data_str !== "") {
let data = JSON.parse(data_str);
$(".package").val(data.package);
$(".pk").val(data.pk);
$(".path").val(data.path);
$("select.conn").val(data.conn).select2();
conn_req_refresh($("select.table"), false, "select");
setTimeout(function () {
$("select.table").val(data.table).select2();
}, 2000);
$(".extra_import_package").val(data.extra_import_package).select2();
$(".table_title").val(data.table_title);
$(".table_description").val(data.table_description);
$(".form_title").val(data.form_title);
$(".form_description").val(data.form_description);
$(".detail_title").val(data.detail_title);
$(".detail_description").val(data.detail_description);
$("select.detail_display").val(data.detail_display).select2();
if (data.detail_display !== "0") {
$("label[for='detail_title']").parent().show();
$("label[for='detail_description']").parent().show();
$("label[for='fields_detail']").parent().show();
}
if (extra_codeeditor && data.extra_code && data.extra_code !== "") {
extra_codeeditor.setValue(decodeURIComponent(data.extra_code));
}
let info_table = $("tbody.fields-table");
info_table.find("tr").remove();
let tpl = $("template.fields-tpl").html();
for (let i = 0; i < data.infos.length; i++) {
info_table.append(tpl);
}
let trs = info_table.find("tr");
for (let i = 0; i < trs.length; i++) {
$(trs[i]).find('.field_head').val(data.infos[i][0]);
$(trs[i]).find('.field_name').val(data.infos[i][1]);
checkItemSwitch($(trs[i]).find('input.field_filterable'), data.infos[i][2]);
checkItemSwitch($(trs[i]).find('input.field_sortable'), data.infos[i][3]);
checkItemSwitch($(trs[i]).find('input.field_hide'), data.infos[i][4]);
checkItemSwitch($(trs[i]).find('input.info_field_editable'), data.infos[i][5]);
$(trs[i]).find('select.field_db_type').val(data.infos[i][6]).select2();
}
let form_table = $("tbody.fields_form-table");
form_table.find("tr").remove();
let tpl_form = $("template.fields_form-tpl").html();
for (let i = 0; i < data.forms.length; i++) {
form_table.append(tpl_form);
}
let trs_form = form_table.find("tr");
for (let i = 0; i < trs_form.length; i++) {
$(trs_form[i]).find('.field_head_form').val(data.forms[i][0]);
$(trs_form[i]).find('.field_name_form').val(data.forms[i][1]);
checkItemSwitch($(trs_form[i]).find('input.field_canedit'), data.forms[i][2]);
checkItemSwitch($(trs_form[i]).find('input.field_canadd'), data.forms[i][3]);
$(trs_form[i]).find('.field_default').val(data.forms[i][4]);
$(trs_form[i]).find('select.field_display').val(data.forms[i][5]).select2();
$(trs_form[i]).find('select.field_db_type_form').val(data.forms[i][6]).select2();
$(trs_form[i]).find('select.field_form_type_form').val(data.forms[i][7]).select2();
}
let detail_table = $("tbody.fields_detail-table");
detail_table.find("tr").remove();
let tpl_detail = $("template.fields_detail-tpl").html();
for (let i = 0; i < data.details.length; i++) {
detail_table.append(tpl_detail);
}
let trs_detail = detail_table.find("tr");
for (let i = 0; i < trs_detail.length; i++) {
$(trs_detail[i]).find('.field_head').val(data.details[i][0]);
$(trs_detail[i]).find('.field_name').val(data.details[i][1]);
$(trs_detail[i]).find('select.field_db_type').val(data.details[i][2]).select2();
}
toggleItemSwitchOpposite($(".permission.ga_checkbox"), data.permission);
toggleItemSwitch($(".hide_filter_area.ga_checkbox"), data.hide_filter_area);
toggleItemSwitch($(".hide_new_button.ga_checkbox"), data.hide_new_button);
toggleItemSwitch($(".hide_export_button.ga_checkbox"), data.hide_export_button);
toggleItemSwitch($(".hide_edit_button.ga_checkbox"), data.hide_edit_button);
toggleItemSwitch($(".hide_pagination.ga_checkbox"), data.hide_pagination);
toggleItemSwitch($(".hide_delete_button.ga_checkbox"), data.hide_delete_button);
toggleItemSwitch($(".hide_detail_button.ga_checkbox"), data.hide_detail_button);
toggleItemSwitch($(".hide_filter_button.ga_checkbox"), data.hide_filter_button);
toggleItemSwitch($(".hide_row_selector.ga_checkbox"), data.hide_row_selector);
toggleItemSwitch($(".hide_query_info.ga_checkbox"), data.hide_query_info);
toggleItemSwitch($(".filter_form_layout.ga_checkbox"), data.filter_form_layout);
toggleItemSwitch($(".hide_continue_edit_check_box.ga_checkbox"), data.hide_continue_edit_check_box);
toggleItemSwitch($(".hide_reset_button.ga_checkbox"), data.hide_reset_button);
toggleItemSwitch($(".hide_continue_new_check_box.ga_checkbox"), data.hide_continue_new_check_box);
toggleItemSwitch($(".hide_back_button.ga_checkbox"), data.hide_back_button);
}
NProgress.done();
}
function toggleItemSwitch(obj, val) {
if (val === "n") {
$(obj).bootstrapSwitch('state', true);
} else {
$(obj).bootstrapSwitch('state', false);
}
}
function toggleItemSwitchOpposite(obj, val) {
if (val === "n") {
$(obj).bootstrapSwitch('state', false);
} else {
$(obj).bootstrapSwitch('state', true);
}
}
function checkItemSwitch(obj, val) {
if (val === "y") {
$(obj).iCheck("check")
} else {
$(obj).iCheck("uncheck")
}
}
function getItemSwitchValue(obj) {
if ($(obj).hasClass("checked")) {
return "y"
}
return "n"
}
function getItemObjData() {
let data = {};
data.conn = $("select.conn").val();
data.package = $(".package").val();
data.pk = $(".pk").val();
data.path = $(".path").val();
data.extra_code = $("#extra_code_input").val();
data.table = $("select.table").val();
data.table_title = $(".table_title").val();
data.table_description = $(".table_description").val();
data.form_title = $(".form_title").val();
data.form_description = $(".form_description").val();
data.extra_import_package = $(".extra_import_package").val();
data.detail_title = $(".detail_title").val();
data.detail_description = $(".detail_description").val();
data.detail_display = $("select.detail_display").val();
let infos = [];
let trs = $("tbody.fields-table").find("tr");
for (let i = 0; i < trs.length; i++) {
infos[i] = [];
infos[i].push($(trs[i]).find('.field_head').val());
infos[i].push($(trs[i]).find('.field_name').val());
infos[i].push(getItemSwitchValue($(trs[i]).find('input.field_filterable').parent()));
infos[i].push(getItemSwitchValue($(trs[i]).find('input.field_sortable').parent()));
infos[i].push(getItemSwitchValue($(trs[i]).find('input.field_hide').parent()));
infos[i].push(getItemSwitchValue($(trs[i]).find('input.info_field_editable').parent()));
infos[i].push($(trs[i]).find('select.field_db_type').val());
}
data.infos = infos;
let forms = [];
let trs_form = $("tbody.fields_form-table").find("tr");
for (let i = 0; i < trs_form.length; i++) {
forms[i] = [];
forms[i].push($(trs_form[i]).find('.field_head_form').val());
forms[i].push($(trs_form[i]).find('.field_name_form').val());
forms[i].push(getItemSwitchValue($(trs_form[i]).find('input.field_canedit').parent()));
forms[i].push(getItemSwitchValue($(trs_form[i]).find('input.field_canadd').parent()));
forms[i].push($(trs_form[i]).find('.field_default').val());
forms[i].push($(trs_form[i]).find('select.field_display').val());
forms[i].push($(trs_form[i]).find('select.field_db_type_form').val());
forms[i].push($(trs_form[i]).find('select.field_form_type_form').val());
}
data.forms = forms;
let details = [];
let detail_trs = $("tbody.fields_detail-table").find("tr");
for (let i = 0; i < detail_trs.length; i++) {
details[i] = [];
details[i].push($(detail_trs[i]).find('.field_head').val());
details[i].push($(detail_trs[i]).find('.field_name').val());
details[i].push($(detail_trs[i]).find('select.field_db_type').val());
}
data.details = details;
data.permission = $("input[name='permission']").val();
data.hide_filter_area = $("input[name='hide_filter_area']").val();
data.hide_new_button = $("input[name='hide_new_button']").val();
data.hide_export_button = $("input[name='hide_export_button']").val();
data.hide_edit_button = $("input[name='hide_edit_button']").val();
data.hide_pagination = $("input[name='hide_pagination']").val();
data.hide_delete_button = $("input[name='hide_delete_button']").val();
data.hide_detail_button = $("input[name='hide_detail_button']").val();
data.hide_filter_button = $("input[name='hide_filter_button']").val();
data.hide_row_selector = $("input[name='hide_row_selector']").val();
data.hide_query_info = $("input[name='hide_query_info']").val();
data.filter_form_layout = $("select.filter_form_layout").val();
data.hide_continue_edit_check_box = $('input[name="hide_continue_edit_check_box"]').val();
data.hide_reset_button = $('input[name="hide_reset_button"]').val();
data.hide_continue_new_check_box = $('input[name="hide_continue_new_check_box"]').val();
data.hide_back_button = $('input[name="hide_back_button"]').val();
return JSON.stringify(data)
}
function conn_req_refresh(selectObj, box, event) {
$.ajax({
url: "\/admin\/operation\/_tool_choose_conn",
type: 'post',
dataType: 'text',
data: {
'value': $("select.conn").val(),
'event': event
},
success: function (data) {
if (typeof (data) === "string") {
data = JSON.parse(data);
}
if (data.code === 0) {
if (selectObj.length > 0) {
if (typeof (data.data) === "object") {
if (box) {
conn_updateBoxSelections(selectObj, data.data)
} else {
if (typeof (selectObj.attr("multiple")) !== "undefined") {
selectObj.html("");
}
selectObj.select2({
data: data.data
});
}
} else {
if (box) {
selectObj.val(data.data).select2()
} else {
}
}
} else {
$('.table').val(data.data);
}
} else {
swal(data.msg, '', 'error');
}
},
error: function () {
alert('error')
}
});
}
</script>
<style>
.save_table_list {
position: absolute;
right: 45px;
top: 200px;
background-color: white;
width: 300px;
min-height: 50px;
z-index: 9999;
display: none;
}
.list-group-item.list-head {
background-color: #5a5a5a;
border-color: #5a5a5a;
font-weight: bold;
color: white;
}
.list-group-item.list-group-item-action {
cursor: pointer;
}
</style>
{{end}}`}
|
package main
import (
"classpath"
"flag"
"fmt"
"os"
"rtda/heap"
"strings"
)
// Cmd
/**
该结构体来表示命令行参数与选项
成员说明:
1. helpFlag 帮助选项
2. versionFlag 版本选项
3. cpOption 类路径选项
4. XjreOption Java虚拟机将使用JDK的启动类路径来寻找和加载Java标准库中的类.该参数指定加载的jre的目录。
5. class 指定类路径
6. args 参数
*/
type Cmd struct {
helpFlag bool
versionFlag bool
verboseClassFlag bool
verboseInstFlag bool
cpOption string
XjreOption string
class string
args []string
}
/**
使用flag包来帮助解析命令行的参数与选项
基本的流程是:
1. 对指定的选项进行设置名称,默认值,提示等
2. 调用flag.Parse()进行解析,解析失败时调用printUsage()方法进行提示
3. 通过flag.Args()方法来获取命令行参数,对命令行参数进行解析。
- para-1:类名
- para-2++:其他参数
4. 返回cmd结构体
关于flag的使用,参考官方文档:https://golang.org/pkg/flag/
*/
func parseCmd() *Cmd {
//Go语言的推导声明,编译器会自动根据右值类型推断出左值的对应类型。
cmd := &Cmd{}
flag.Usage = printUsage
flag.BoolVar(&cmd.helpFlag, "help", false, "print help message")
flag.BoolVar(&cmd.helpFlag, "?", false, "print help message")
flag.BoolVar(&cmd.versionFlag, "version", false, "print version and exit")
flag.StringVar(&cmd.cpOption, "classpath", "", "classpath")
flag.StringVar(&cmd.cpOption, "cp", "", "classpath")
flag.StringVar(&cmd.XjreOption, "Xjre", "", "path to jre")
flag.Parse()
args := flag.Args()
if len(args) > 0 {
cmd.class = args[0]
cmd.args = args[1:]
}
return cmd
}
/**
该函数打印提示信息
*/
func printUsage() {
fmt.Printf("Usage: %s [-options] class [args...]\n", os.Args[0])
}
/**
目前版本的JVM啥也没实现,直接打印输出信息表示JVM启动
*/
func startJVM(cmd *Cmd) {
cp := classpath.Parse(cmd.XjreOption, cmd.cpOption)
classLoader := heap.NewClassLoader(cp, cmd.verboseClassFlag)
className := strings.Replace(cmd.class, ".", "/", -1)
mainClass := classLoader.LoadClass(className)
mainMethod := mainClass.GetMainMethod()
if mainMethod != nil {
interpret(mainMethod, cmd.verboseInstFlag, cmd.args)
} else {
fmt.Printf("Main method not found in class %s\n", cmd.class)
}
}
|
package git
import (
"io/ioutil"
"testing"
)
func TestResetToCommit(t *testing.T) {
t.Parallel()
repo := createTestRepo(t)
defer cleanupTestRepo(t, repo)
seedTestRepo(t, repo)
// create commit to reset to
commitId, _ := updateReadme(t, repo, "testing reset")
// create commit to reset from
nextCommitId, _ := updateReadme(t, repo, "will be reset")
// confirm that we wrote "will be reset" to the readme
newBytes, err := ioutil.ReadFile(pathInRepo(repo, "README"))
checkFatal(t, err)
if string(newBytes) != "will be reset" {
t.Fatalf("expected %s to equal 'will be reset'", string(newBytes))
}
// confirm that the head of the repo is the next commit id
head, err := repo.Head()
checkFatal(t, err)
if head.Target().String() != nextCommitId.String() {
t.Fatalf(
"expected to be at latest commit %s, but was %s",
nextCommitId.String(),
head.Target().String(),
)
}
commitToResetTo, err := repo.LookupCommit(commitId)
checkFatal(t, err)
repo.ResetToCommit(commitToResetTo, ResetHard, &CheckoutOptions{})
// check that the file now reads "testing reset" like it did before
bytes, err := ioutil.ReadFile(pathInRepo(repo, "README"))
checkFatal(t, err)
if string(bytes) != "testing reset" {
t.Fatalf("expected %s to equal 'testing reset'", string(bytes))
}
}
|
package name_test
import (
"fmt"
"github.com/QisFj/godry/name"
)
func ExampleToCamelCase() {
for _, s := range []string{
"a",
"aa",
"aa_aa",
"http_request",
"battery_life_value",
"id0_value",
} {
fmt.Println(name.ToCamelCase(s))
}
// Output:
// A
// Aa
// AaAa
// HttpRequest
// BatteryLifeValue
// Id0Value
}
func ExampleToSnakeCase() {
for _, s := range []string{
"A",
"AA",
"AaAa",
"HTTPRequest",
"BatteryLifeValue",
"Id0Value",
"ID0Value",
"UserID",
"User.ID",
"User.Name",
} {
fmt.Println(name.ToSnakeCase(s))
}
// Output:
// a
// aa
// aa_aa
// http_request
// battery_life_value
// id0_value
// id0_value
// user_id
// user.id
// user.name
}
func ExampleToSnakeCase_badcase() {
fmt.Println(name.ToSnakeCase("UserIDs"))
// Output: user_i_ds
}
|
package http
import (
"github.com/gin-gonic/gin"
"fmt"
"net/http"
)
var Router = gin.Default()
func init() {
Router.GET("/", func(c *gin.Context) {
c.String(http.StatusOK, "Hello World")
})
Router.GET("/post", func(c *gin.Context) {
id := c.Query("id")
page := c.DefaultQuery("page", "0")
name := c.PostForm("name")
message := c.PostForm("message")
fmt.Printf("id: %s; page: %s; name: %s; message: %s \n", id, page, name, message)
c.JSON(http.StatusOK, gin.H{
"status_code": http.StatusOK,
})
})
Router.GET("/form_post", func(c *gin.Context) {
message := c.Query("message")
nick := c.DefaultQuery("nick", "anonymous")
c.JSON(200, gin.H{
"status": "posted",
"message": message,
"nick": nick,
})
})
}
|
package flow
import (
"context"
"errors"
"path/filepath"
"time"
"github.com/direktiv/direktiv/pkg/flow/bytedata"
"github.com/direktiv/direktiv/pkg/flow/database"
"github.com/direktiv/direktiv/pkg/flow/database/recipient"
"github.com/direktiv/direktiv/pkg/flow/grpc"
"github.com/direktiv/direktiv/pkg/refactor/core"
"github.com/direktiv/direktiv/pkg/refactor/filestore"
"github.com/direktiv/direktiv/pkg/refactor/mirror"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/emptypb"
)
func (flow *flow) Node(ctx context.Context, req *grpc.NodeRequest) (*grpc.NodeResponse, error) {
flow.sugar.Debugf("Handling gRPC request: %s", this())
var file *filestore.File
var err error
var ns *core.Namespace
err = flow.runSqlTx(ctx, func(tx *sqlTx) error {
ns, err = tx.DataStore().Namespaces().GetByName(ctx, req.GetNamespace())
if err != nil {
return err
}
file, err = tx.FileStore().ForRootNamespaceAndName(ns.ID, defaultRootName).GetFile(ctx, req.GetPath())
return err
})
if err != nil {
return nil, err
}
resp := &grpc.NodeResponse{}
resp.Node = bytedata.ConvertFileToGrpcNode(file)
resp.Namespace = ns.Name
return resp, nil
}
func (flow *flow) Directory(ctx context.Context, req *grpc.DirectoryRequest) (*grpc.DirectoryResponse, error) {
flow.sugar.Debugf("Handling gRPC request: %s", this())
var node *filestore.File
var files []*filestore.File
var isMirrorNamespace bool
var err error
var ns *core.Namespace
err = flow.runSqlTx(ctx, func(tx *sqlTx) error {
ns, err = tx.DataStore().Namespaces().GetByName(ctx, req.GetNamespace())
if err != nil {
return err
}
_, err = tx.DataStore().Mirror().GetConfig(ctx, ns.ID)
if errors.Is(err, mirror.ErrNotFound) {
isMirrorNamespace = false
} else if err != nil {
return err
} else {
isMirrorNamespace = true
}
node, err = tx.FileStore().ForRootNamespaceAndName(ns.ID, defaultRootName).GetFile(ctx, req.GetPath())
if err != nil {
return err
}
files, err = tx.FileStore().ForRootNamespaceAndName(ns.ID, defaultRootName).ReadDirectory(ctx, req.GetPath())
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, err
}
resp := new(grpc.DirectoryResponse)
resp.Namespace = ns.Name
resp.Node = bytedata.ConvertFileToGrpcNode(node)
if isMirrorNamespace && node.Path == "/" {
resp.Node.ExpandedType = "git"
}
resp.Children = new(grpc.DirectoryChildren)
resp.Children.PageInfo = new(grpc.PageInfo)
resp.Children.PageInfo.Total = int32(len(files))
resp.Children.Results = bytedata.ConvertFilesToGrpcNodeList(files)
return resp, nil
}
func (flow *flow) DirectoryStream(req *grpc.DirectoryRequest, srv grpc.Flow_DirectoryStreamServer) error {
flow.sugar.Debugf("Handling gRPC request: %s", this())
ctx := srv.Context()
resp, err := flow.Directory(ctx, req)
if err != nil {
return err
}
// mock streaming response.
for {
select {
case <-ctx.Done():
return nil
default:
err = srv.Send(resp)
if err != nil {
return err
}
time.Sleep(time.Second * 5)
}
}
}
func (flow *flow) CreateDirectory(ctx context.Context, req *grpc.CreateDirectoryRequest) (*grpc.CreateDirectoryResponse, error) {
flow.sugar.Debugf("Handling gRPC request: %s", this())
var file *filestore.File
tx, err := flow.beginSqlTx(ctx)
if err != nil {
return nil, err
}
defer tx.Rollback()
ns, err := tx.DataStore().Namespaces().GetByName(ctx, req.GetNamespace())
if err != nil {
return nil, err
}
file, _, err = tx.FileStore().ForRootNamespaceAndName(ns.ID, defaultRootName).CreateFile(ctx, req.GetPath(), filestore.FileTypeDirectory, "", nil)
if err != nil {
return nil, err
}
flow.logger.Infof(ctx, ns.ID, database.GetAttributes(recipient.Namespace, ns), "Created directory '%s'.", file.Path)
// Broadcast
err = flow.BroadcastDirectory(ctx, BroadcastEventTypeCreate,
broadcastDirectoryInput{
Path: req.GetPath(),
Parent: file.Dir(),
}, ns)
if err != nil {
return nil, err
}
if err := tx.Commit(ctx); err != nil {
return nil, err
}
var resp grpc.CreateDirectoryResponse
resp.Namespace = ns.Name
resp.Node = bytedata.ConvertFileToGrpcNode(file)
return &resp, nil
}
func (flow *flow) DeleteNode(ctx context.Context, req *grpc.DeleteNodeRequest) (*emptypb.Empty, error) {
flow.sugar.Debugf("Handling gRPC request: %s", this())
tx, err := flow.beginSqlTx(ctx)
if err != nil {
return nil, err
}
defer tx.Rollback()
ns, err := tx.DataStore().Namespaces().GetByName(ctx, req.GetNamespace())
if err != nil {
return nil, err
}
file, err := tx.FileStore().ForRootNamespaceAndName(ns.ID, defaultRootName).GetFile(ctx, req.GetPath())
if errors.Is(err, filestore.ErrNotFound) && req.GetIdempotent() {
var resp emptypb.Empty
return &resp, nil
}
if err != nil {
return nil, err
}
if file.Typ == filestore.FileTypeDirectory {
isEmptyDir, err := tx.FileStore().ForRootNamespaceAndName(ns.ID, defaultRootName).IsEmptyDirectory(ctx, req.GetPath())
if err != nil {
return nil, err
}
if !isEmptyDir && !req.GetRecursive() {
return nil, status.Error(codes.InvalidArgument, "refusing to delete non-empty directory without explicit recursive argument")
}
}
if file.Path == "/" {
return nil, status.Error(codes.InvalidArgument, "cannot delete root node")
}
err = tx.FileStore().ForFile(file).Delete(ctx, req.GetRecursive())
if err != nil {
return nil, err
}
if err := tx.Commit(ctx); err != nil {
return nil, err
}
if file.Typ == filestore.FileTypeWorkflow {
metricsWf.WithLabelValues(ns.Name, ns.Name).Dec()
metricsWfUpdated.WithLabelValues(ns.Name, file.Path, ns.Name).Inc()
// Broadcast Event
err = flow.BroadcastWorkflow(ctx, BroadcastEventTypeDelete,
broadcastWorkflowInput{
Name: file.Name(),
Path: file.Path,
Parent: file.Dir(),
Live: false,
}, ns)
if err != nil {
return nil, err
}
} else {
// Broadcast Event
err = flow.BroadcastDirectory(ctx, BroadcastEventTypeDelete,
broadcastDirectoryInput{
Path: file.Path,
Parent: file.Dir(),
}, ns)
if err != nil {
return nil, err
}
}
flow.logger.Infof(ctx, ns.ID, database.GetAttributes(recipient.Namespace, ns), "Deleted %s '%s'.", file.Typ, file.Path)
var resp emptypb.Empty
return &resp, nil
}
func (flow *flow) RenameNode(ctx context.Context, req *grpc.RenameNodeRequest) (*grpc.RenameNodeResponse, error) {
flow.sugar.Debugf("Handling gRPC request: %s", this())
tx, err := flow.beginSqlTx(ctx)
if err != nil {
return nil, err
}
defer tx.Rollback()
ns, err := tx.DataStore().Namespaces().GetByName(ctx, req.GetNamespace())
if err != nil {
return nil, err
}
file, err := tx.FileStore().ForRootNamespaceAndName(ns.ID, defaultRootName).GetFile(ctx, req.GetOld())
if err != nil {
return nil, err
}
if file.Path == "/" {
return nil, status.Error(codes.InvalidArgument, "cannot rename root node")
}
if file.Typ == filestore.FileTypeWorkflow {
if filepath.Ext(req.GetNew()) != ".yaml" && filepath.Ext(req.GetNew()) != ".yml" {
return nil, status.Error(codes.InvalidArgument, "workflow name should have either .yaml or .yaml extension")
}
}
err = tx.FileStore().ForFile(file).SetPath(ctx, req.GetNew())
if err != nil {
return nil, err
}
// TODO: question if parent dir need to get updated_at change.
if err := tx.Commit(ctx); err != nil {
return nil, err
}
flow.logger.Infof(ctx, ns.ID, database.GetAttributes(recipient.Namespace, ns), "Renamed %s from '%s' to '%s'.", file.Typ, req.GetOld(), req.GetNew())
var resp grpc.RenameNodeResponse
resp.Namespace = ns.Name
resp.Node = bytedata.ConvertFileToGrpcNode(file)
return &resp, nil
}
func (flow *flow) CreateNodeAttributes(ctx context.Context, req *grpc.CreateNodeAttributesRequest) (*emptypb.Empty, error) {
flow.sugar.Debugf("Handling gRPC request: %s", this())
tx, err := flow.beginSqlTx(ctx)
if err != nil {
return nil, err
}
defer tx.Rollback()
ns, err := tx.DataStore().Namespaces().GetByName(ctx, req.GetNamespace())
if err != nil {
return nil, err
}
file, err := tx.FileStore().ForRootNamespaceAndName(ns.ID, defaultRootName).GetFile(ctx, req.GetPath())
if err != nil {
return nil, err
}
annotations, err := tx.DataStore().FileAnnotations().Get(ctx, file.ID)
if errors.Is(err, core.ErrFileAnnotationsNotSet) {
annotations = &core.FileAnnotations{
FileID: file.ID,
Data: map[string]string{},
}
} else if err != nil {
return nil, err
}
annotations.Data = annotations.Data.AppendFileUserAttributes(req.GetAttributes())
err = tx.DataStore().FileAnnotations().Set(ctx, annotations)
if err != nil {
return nil, err
}
if err := tx.Commit(ctx); err != nil {
return nil, err
}
var resp emptypb.Empty
return &resp, nil
}
func (flow *flow) DeleteNodeAttributes(ctx context.Context, req *grpc.DeleteNodeAttributesRequest) (*emptypb.Empty, error) {
flow.sugar.Debugf("Handling gRPC request: %s", this())
tx, err := flow.beginSqlTx(ctx)
if err != nil {
return nil, err
}
defer tx.Rollback()
ns, err := tx.DataStore().Namespaces().GetByName(ctx, req.GetNamespace())
if err != nil {
return nil, err
}
file, err := tx.FileStore().ForRootNamespaceAndName(ns.ID, defaultRootName).GetFile(ctx, req.GetPath())
if err != nil {
return nil, err
}
annotations, err := tx.DataStore().FileAnnotations().Get(ctx, file.ID)
if errors.Is(err, core.ErrFileAnnotationsNotSet) {
return nil, status.Error(codes.InvalidArgument, "file annotations are not set")
} else if err != nil {
return nil, err
}
annotations.Data = annotations.Data.ReduceFileUserAttributes(req.GetAttributes())
err = tx.DataStore().FileAnnotations().Set(ctx, annotations)
if err != nil {
return nil, err
}
if err := tx.Commit(ctx); err != nil {
return nil, err
}
var resp emptypb.Empty
return &resp, nil
}
|
package main
import (
"./s3go" // import straight from github? commit?
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
// "strconv"
"time"
)
func dothing(r *s3.SDBRequest, cred *s3.SecurityCredentials) {
r.AddCredentials(cred)
req, err := r.HttpRequest()
if err != nil {
log.Fatal(err)
}
log.Println(req)
response, err := http.DefaultClient.Do(req)
if err != nil {
log.Fatal(err)
}
defer response.Body.Close()
contents, err := ioutil.ReadAll(response.Body)
if err != nil {
log.Fatal(err)
// TODO: interpret errors here
}
log.Println(string(contents))
}
func main() {
cred := &s3.SecurityCredentials{AWSAccessKeyId: os.Getenv("AWS_ACCESS_KEY"), AWSSecretAccessKey: os.Getenv("AWS_SECRET_KEY")}
//2. get xml response
m := s3.Strmap{
"Action": "ListDomains",
}
r := s3.NewSDBRequest(m)
dothing(r, cred)
nplays := 1234
m = s3.Strmap{
"Action": "PutAttributes",
"DomainName": "bn_bs",
"ItemName": "1777xy",
"Attribute.1.Name": "upload_date",
//"Attribute.1.Value": strconv.FormatInt(time.Now().Unix(), 10),
// lexographically sortable date. not exactly RFC8601 but looks ok to me
// http://docs.aws.amazon.com/AmazonSimpleDB/latest/DeveloperGuide/Dates.html
// choose UTC/GMT so dates in different timezones are comparable
"Attribute.1.Value": time.Now().UTC().Format(time.RFC3339),
// lexographically pad out for 1billion plays
"Attribute.2.Name": "plays",
"Attribute.2.Value": fmt.Sprintf("%.9d", nplays),
}
r = s3.NewSDBRequest(m)
dothing(r, cred)
// sort attr must must appear in predicate
top10 := "select * from `bn_bs` where `plays` is not null order by `plays` desc limit 10"
m = s3.Strmap{
"Action": "Select",
"SelectExpression": top10,
}
r = s3.NewSDBRequest(m)
dothing(r, cred)
}
|
package rule
import (
"net/http"
"github.com/sirupsen/logrus"
)
type orRule struct {
rules []Rule
}
// NewOrRule :
func NewOrRule(rules []Rule) Rule {
return orRule{
rules: rules,
}
}
// Execute Execute And Rule
func (r orRule) Execute(req *http.Request) bool {
logrus.WithFields(logrus.Fields{
"type": "rule",
"app": "rigis",
}).Trace("OrRule:Start")
if len(r.rules) == 0 {
logrus.WithFields(logrus.Fields{
"type": "rule",
"app": "rigis",
}).Trace("OrRule:Failed")
return false
}
for i := 0; i < len(r.rules); i++ {
if r.rules[i].Execute(req) {
logrus.WithFields(logrus.Fields{
"type": "rule",
"app": "rigis",
}).Trace("OrRule:Success")
return true
}
}
logrus.WithFields(logrus.Fields{
"type": "rule",
"app": "rigis",
}).Trace("OrRule:Failed")
return false
}
|
package main
import (
"fmt"
"log"
"os"
"github.com/awslabs/aws-sdk-go/aws"
"github.com/awslabs/aws-sdk-go/gen/cloudwatch"
)
var region = "us-west-2"
// Connect will provide a valid RDS client
func Connect() *cloudwatch.CloudWatch {
creds := aws.Creds(os.Getenv("AWS_ACCESS_KEY"), os.Getenv("AWS_SECRET_KEY"), "") // HL
return cloudwatch.New(creds, region, nil)
}
// ListMetrics will ...list metrics
func ListMetrics(c *cloudwatch.CloudWatch) {
resp, err := c.ListMetrics(&cloudwatch.ListMetricsInput{}) // HL
if err != nil {
log.Fatalln(err)
}
for i, m := range resp.Metrics {
fmt.Printf("%3d: %s\n", i+1, *m.MetricName)
}
}
//
func main() {
c := Connect()
ListMetrics(c)
}
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package core
import (
"bytes"
"context"
"fmt"
"slices"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/util/plancodec"
"github.com/pingcap/tidb/util/tracing"
)
// extractJoinGroup extracts all the join nodes connected with continuous
// Joins to construct a join group. This join group is further used to
// construct a new join order based on a reorder algorithm.
//
// For example: "InnerJoin(InnerJoin(a, b), LeftJoin(c, d))"
// results in a join group {a, b, c, d}.
func extractJoinGroup(p LogicalPlan) *joinGroupResult {
joinMethodHintInfo := make(map[int]*joinMethodHint)
var (
group []LogicalPlan
joinOrderHintInfo []*tableHintInfo
eqEdges []*expression.ScalarFunction
otherConds []expression.Expression
joinTypes []*joinTypeWithExtMsg
hasOuterJoin bool
)
join, isJoin := p.(*LogicalJoin)
if isJoin && join.preferJoinOrder {
// When there is a leading hint, the hint may not take effect for other reasons.
// For example, the join type is cross join or straight join, or exists the join algorithm hint, etc.
// We need to return the hint information to warn
joinOrderHintInfo = append(joinOrderHintInfo, join.hintInfo)
}
// If the variable `tidb_opt_advanced_join_hint` is false and the join node has the join method hint, we will not split the current join node to join reorder process.
if !isJoin || (join.preferJoinType > uint(0) && !p.SCtx().GetSessionVars().EnableAdvancedJoinHint) || join.StraightJoin ||
(join.JoinType != InnerJoin && join.JoinType != LeftOuterJoin && join.JoinType != RightOuterJoin) ||
((join.JoinType == LeftOuterJoin || join.JoinType == RightOuterJoin) && join.EqualConditions == nil) {
if joinOrderHintInfo != nil {
// The leading hint can not work for some reasons. So clear it in the join node.
join.hintInfo = nil
}
return &joinGroupResult{
group: []LogicalPlan{p},
joinOrderHintInfo: joinOrderHintInfo,
basicJoinGroupInfo: &basicJoinGroupInfo{},
}
}
// If the session var is set to off, we will still reject the outer joins.
if !p.SCtx().GetSessionVars().EnableOuterJoinReorder && (join.JoinType == LeftOuterJoin || join.JoinType == RightOuterJoin) {
return &joinGroupResult{
group: []LogicalPlan{p},
joinOrderHintInfo: joinOrderHintInfo,
basicJoinGroupInfo: &basicJoinGroupInfo{},
}
}
// `leftHasHint` and `rightHasHint` are used to record whether the left child and right child are set by the join method hint.
leftHasHint, rightHasHint := false, false
if isJoin && p.SCtx().GetSessionVars().EnableAdvancedJoinHint && join.preferJoinType > uint(0) {
// If the current join node has the join method hint, we should store the hint information and restore it when we have finished the join reorder process.
if join.leftPreferJoinType > uint(0) {
joinMethodHintInfo[join.children[0].ID()] = &joinMethodHint{join.leftPreferJoinType, join.hintInfo}
leftHasHint = true
}
if join.rightPreferJoinType > uint(0) {
joinMethodHintInfo[join.children[1].ID()] = &joinMethodHint{join.rightPreferJoinType, join.hintInfo}
rightHasHint = true
}
}
hasOuterJoin = hasOuterJoin || (join.JoinType != InnerJoin)
// If the left child has the hint, it means there are some join method hints want to specify the join method based on the left child.
// For example: `select .. from t1 join t2 join (select .. from t3 join t4) t5 where ..;` If there are some join method hints related to `t5`, we can't split `t5` into `t3` and `t4`.
// So we don't need to split the left child part. The right child part is the same.
if join.JoinType != RightOuterJoin && !leftHasHint {
lhsJoinGroupResult := extractJoinGroup(join.children[0])
lhsGroup, lhsEqualConds, lhsOtherConds, lhsJoinTypes, lhsJoinOrderHintInfo, lhsJoinMethodHintInfo, lhsHasOuterJoin := lhsJoinGroupResult.group, lhsJoinGroupResult.eqEdges, lhsJoinGroupResult.otherConds, lhsJoinGroupResult.joinTypes, lhsJoinGroupResult.joinOrderHintInfo, lhsJoinGroupResult.joinMethodHintInfo, lhsJoinGroupResult.hasOuterJoin
noExpand := false
// If the filters of the outer join is related with multiple leaves of the outer join side. We don't reorder it for now.
if join.JoinType == LeftOuterJoin {
extractedCols := make([]*expression.Column, 0, 8)
extractedCols = expression.ExtractColumnsFromExpressions(extractedCols, join.OtherConditions, nil)
extractedCols = expression.ExtractColumnsFromExpressions(extractedCols, join.LeftConditions, nil)
extractedCols = expression.ExtractColumnsFromExpressions(extractedCols, expression.ScalarFuncs2Exprs(join.EqualConditions), nil)
affectedGroups := 0
for i := range lhsGroup {
for _, col := range extractedCols {
if lhsGroup[i].Schema().Contains(col) {
affectedGroups++
break
}
}
if affectedGroups > 1 {
noExpand = true
break
}
}
}
if noExpand {
return &joinGroupResult{
group: []LogicalPlan{p},
basicJoinGroupInfo: &basicJoinGroupInfo{},
}
}
group = append(group, lhsGroup...)
eqEdges = append(eqEdges, lhsEqualConds...)
otherConds = append(otherConds, lhsOtherConds...)
joinTypes = append(joinTypes, lhsJoinTypes...)
joinOrderHintInfo = append(joinOrderHintInfo, lhsJoinOrderHintInfo...)
for ID, joinMethodHint := range lhsJoinMethodHintInfo {
joinMethodHintInfo[ID] = joinMethodHint
}
hasOuterJoin = hasOuterJoin || lhsHasOuterJoin
} else {
group = append(group, join.children[0])
}
// You can see the comments in the upside part which we try to split the left child part. It's the same here.
if join.JoinType != LeftOuterJoin && !rightHasHint {
rhsJoinGroupResult := extractJoinGroup(join.children[1])
rhsGroup, rhsEqualConds, rhsOtherConds, rhsJoinTypes, rhsJoinOrderHintInfo, rhsJoinMethodHintInfo, rhsHasOuterJoin := rhsJoinGroupResult.group, rhsJoinGroupResult.eqEdges, rhsJoinGroupResult.otherConds, rhsJoinGroupResult.joinTypes, rhsJoinGroupResult.joinOrderHintInfo, rhsJoinGroupResult.joinMethodHintInfo, rhsJoinGroupResult.hasOuterJoin
noExpand := false
// If the filters of the outer join is related with multiple leaves of the outer join side. We don't reorder it for now.
if join.JoinType == RightOuterJoin {
extractedCols := make([]*expression.Column, 0, 8)
extractedCols = expression.ExtractColumnsFromExpressions(extractedCols, join.OtherConditions, nil)
extractedCols = expression.ExtractColumnsFromExpressions(extractedCols, join.RightConditions, nil)
extractedCols = expression.ExtractColumnsFromExpressions(extractedCols, expression.ScalarFuncs2Exprs(join.EqualConditions), nil)
affectedGroups := 0
for i := range rhsGroup {
for _, col := range extractedCols {
if rhsGroup[i].Schema().Contains(col) {
affectedGroups++
break
}
}
if affectedGroups > 1 {
noExpand = true
break
}
}
}
if noExpand {
return &joinGroupResult{
group: []LogicalPlan{p},
basicJoinGroupInfo: &basicJoinGroupInfo{},
}
}
group = append(group, rhsGroup...)
eqEdges = append(eqEdges, rhsEqualConds...)
otherConds = append(otherConds, rhsOtherConds...)
joinTypes = append(joinTypes, rhsJoinTypes...)
joinOrderHintInfo = append(joinOrderHintInfo, rhsJoinOrderHintInfo...)
for ID, joinMethodHint := range rhsJoinMethodHintInfo {
joinMethodHintInfo[ID] = joinMethodHint
}
hasOuterJoin = hasOuterJoin || rhsHasOuterJoin
} else {
group = append(group, join.children[1])
}
eqEdges = append(eqEdges, join.EqualConditions...)
tmpOtherConds := make(expression.CNFExprs, 0, len(join.OtherConditions)+len(join.LeftConditions)+len(join.RightConditions))
tmpOtherConds = append(tmpOtherConds, join.OtherConditions...)
tmpOtherConds = append(tmpOtherConds, join.LeftConditions...)
tmpOtherConds = append(tmpOtherConds, join.RightConditions...)
if join.JoinType == LeftOuterJoin || join.JoinType == RightOuterJoin || join.JoinType == LeftOuterSemiJoin || join.JoinType == AntiLeftOuterSemiJoin {
for range join.EqualConditions {
abType := &joinTypeWithExtMsg{JoinType: join.JoinType}
// outer join's other condition should be bound with the connecting edge.
// although we bind the outer condition to **anyone** of the join type, it will be extracted **only once** when make a new join.
abType.outerBindCondition = tmpOtherConds
joinTypes = append(joinTypes, abType)
}
} else {
for range join.EqualConditions {
abType := &joinTypeWithExtMsg{JoinType: join.JoinType}
joinTypes = append(joinTypes, abType)
}
otherConds = append(otherConds, tmpOtherConds...)
}
return &joinGroupResult{
group: group,
hasOuterJoin: hasOuterJoin,
joinOrderHintInfo: joinOrderHintInfo,
basicJoinGroupInfo: &basicJoinGroupInfo{
eqEdges: eqEdges,
otherConds: otherConds,
joinTypes: joinTypes,
joinMethodHintInfo: joinMethodHintInfo,
},
}
}
type joinReOrderSolver struct {
}
type jrNode struct {
p LogicalPlan
cumCost float64
}
type joinTypeWithExtMsg struct {
JoinType
outerBindCondition []expression.Expression
}
func (s *joinReOrderSolver) optimize(_ context.Context, p LogicalPlan, opt *logicalOptimizeOp) (LogicalPlan, error) {
tracer := &joinReorderTrace{cost: map[string]float64{}, opt: opt}
tracer.traceJoinReorder(p)
p, err := s.optimizeRecursive(p.SCtx(), p, tracer)
tracer.traceJoinReorder(p)
appendJoinReorderTraceStep(tracer, p, opt)
return p, err
}
// optimizeRecursive recursively collects join groups and applies join reorder algorithm for each group.
func (s *joinReOrderSolver) optimizeRecursive(ctx sessionctx.Context, p LogicalPlan, tracer *joinReorderTrace) (LogicalPlan, error) {
if _, ok := p.(*LogicalCTE); ok {
return p, nil
}
var err error
result := extractJoinGroup(p)
curJoinGroup, joinTypes, joinOrderHintInfo, hasOuterJoin := result.group, result.joinTypes, result.joinOrderHintInfo, result.hasOuterJoin
if len(curJoinGroup) > 1 {
for i := range curJoinGroup {
curJoinGroup[i], err = s.optimizeRecursive(ctx, curJoinGroup[i], tracer)
if err != nil {
return nil, err
}
}
originalSchema := p.Schema()
// Not support outer join reorder when using the DP algorithm
isSupportDP := true
for _, joinType := range joinTypes {
if joinType.JoinType != InnerJoin {
isSupportDP = false
break
}
}
baseGroupSolver := &baseSingleGroupJoinOrderSolver{
ctx: ctx,
basicJoinGroupInfo: result.basicJoinGroupInfo,
}
joinGroupNum := len(curJoinGroup)
useGreedy := joinGroupNum > ctx.GetSessionVars().TiDBOptJoinReorderThreshold || !isSupportDP
leadingHintInfo, hasDiffLeadingHint := checkAndGenerateLeadingHint(joinOrderHintInfo)
if hasDiffLeadingHint {
ctx.GetSessionVars().StmtCtx.AppendWarning(ErrInternal.GenWithStack("We can only use one leading hint at most, when multiple leading hints are used, all leading hints will be invalid"))
}
if leadingHintInfo != nil && leadingHintInfo.leadingJoinOrder != nil {
if useGreedy {
ok, leftJoinGroup := baseGroupSolver.generateLeadingJoinGroup(curJoinGroup, leadingHintInfo, hasOuterJoin)
if !ok {
ctx.GetSessionVars().StmtCtx.AppendWarning(ErrInternal.GenWithStack("leading hint is inapplicable, check if the leading hint table is valid"))
} else {
curJoinGroup = leftJoinGroup
}
} else {
ctx.GetSessionVars().StmtCtx.AppendWarning(ErrInternal.GenWithStack("leading hint is inapplicable for the DP join reorder algorithm"))
}
}
if useGreedy {
groupSolver := &joinReorderGreedySolver{
baseSingleGroupJoinOrderSolver: baseGroupSolver,
}
p, err = groupSolver.solve(curJoinGroup, tracer)
} else {
dpSolver := &joinReorderDPSolver{
baseSingleGroupJoinOrderSolver: baseGroupSolver,
}
dpSolver.newJoin = dpSolver.newJoinWithEdges
p, err = dpSolver.solve(curJoinGroup, tracer)
}
if err != nil {
return nil, err
}
schemaChanged := false
if len(p.Schema().Columns) != len(originalSchema.Columns) {
schemaChanged = true
} else {
for i, col := range p.Schema().Columns {
if !col.Equal(nil, originalSchema.Columns[i]) {
schemaChanged = true
break
}
}
}
if schemaChanged {
proj := LogicalProjection{
Exprs: expression.Column2Exprs(originalSchema.Columns),
}.Init(p.SCtx(), p.SelectBlockOffset())
proj.SetSchema(originalSchema)
proj.SetChildren(p)
p = proj
}
return p, nil
}
if len(curJoinGroup) == 1 && joinOrderHintInfo != nil {
ctx.GetSessionVars().StmtCtx.AppendWarning(ErrInternal.GenWithStack("leading hint is inapplicable, check the join type or the join algorithm hint"))
}
newChildren := make([]LogicalPlan, 0, len(p.Children()))
for _, child := range p.Children() {
newChild, err := s.optimizeRecursive(ctx, child, tracer)
if err != nil {
return nil, err
}
newChildren = append(newChildren, newChild)
}
p.SetChildren(newChildren...)
return p, nil
}
// checkAndGenerateLeadingHint used to check and generate the valid leading hint.
// We are allowed to use at most one leading hint in a join group. When more than one,
// all leading hints in the current join group will be invalid.
// For example: select /*+ leading(t3) */ * from (select /*+ leading(t1) */ t2.b from t1 join t2 on t1.a=t2.a) t4 join t3 on t4.b=t3.b
// The Join Group {t1, t2, t3} contains two leading hints includes leading(t3) and leading(t1).
// Although they are in different query blocks, they are conflicting.
// In addition, the table alias 't4' cannot be recognized because of the join group.
func checkAndGenerateLeadingHint(hintInfo []*tableHintInfo) (*tableHintInfo, bool) {
leadingHintNum := len(hintInfo)
var leadingHintInfo *tableHintInfo
hasDiffLeadingHint := false
if leadingHintNum > 0 {
leadingHintInfo = hintInfo[0]
// One join group has one leading hint at most. Check whether there are different join order hints.
for i := 1; i < leadingHintNum; i++ {
if hintInfo[i] != hintInfo[i-1] {
hasDiffLeadingHint = true
break
}
}
if hasDiffLeadingHint {
leadingHintInfo = nil
}
}
return leadingHintInfo, hasDiffLeadingHint
}
type joinMethodHint struct {
preferredJoinMethod uint
joinMethodHintInfo *tableHintInfo
}
// basicJoinGroupInfo represents basic information for a join group in the join reorder process.
type basicJoinGroupInfo struct {
eqEdges []*expression.ScalarFunction
otherConds []expression.Expression
joinTypes []*joinTypeWithExtMsg
// `joinMethodHintInfo` is used to map the sub-plan's ID to the join method hint.
// The sub-plan will join the join reorder process to build the new plan.
// So after we have finished the join reorder process, we can reset the join method hint based on the sub-plan's ID.
joinMethodHintInfo map[int]*joinMethodHint
}
type joinGroupResult struct {
group []LogicalPlan
hasOuterJoin bool
joinOrderHintInfo []*tableHintInfo
*basicJoinGroupInfo
}
// nolint:structcheck
type baseSingleGroupJoinOrderSolver struct {
ctx sessionctx.Context
curJoinGroup []*jrNode
leadingJoinGroup LogicalPlan
*basicJoinGroupInfo
}
func (s *baseSingleGroupJoinOrderSolver) generateLeadingJoinGroup(curJoinGroup []LogicalPlan, hintInfo *tableHintInfo, hasOuterJoin bool) (bool, []LogicalPlan) {
var leadingJoinGroup []LogicalPlan
leftJoinGroup := make([]LogicalPlan, len(curJoinGroup))
copy(leftJoinGroup, curJoinGroup)
queryBlockNames := *(s.ctx.GetSessionVars().PlannerSelectBlockAsName.Load())
for _, hintTbl := range hintInfo.leadingJoinOrder {
match := false
for i, joinGroup := range leftJoinGroup {
tableAlias := extractTableAlias(joinGroup, joinGroup.SelectBlockOffset())
if tableAlias == nil {
continue
}
if hintTbl.dbName.L == tableAlias.dbName.L && hintTbl.tblName.L == tableAlias.tblName.L && hintTbl.selectOffset == tableAlias.selectOffset {
match = true
leadingJoinGroup = append(leadingJoinGroup, joinGroup)
leftJoinGroup = append(leftJoinGroup[:i], leftJoinGroup[i+1:]...)
break
}
}
if match {
continue
}
// consider query block alias: select /*+ leading(t1, t2) */ * from (select ...) t1, t2 ...
groupIdx := -1
for i, joinGroup := range leftJoinGroup {
blockOffset := joinGroup.SelectBlockOffset()
if blockOffset > 1 && blockOffset < len(queryBlockNames) {
blockName := queryBlockNames[blockOffset]
if hintTbl.dbName.L == blockName.DBName.L && hintTbl.tblName.L == blockName.TableName.L {
// this can happen when multiple join groups are from the same block, for example:
// select /*+ leading(tx) */ * from (select * from t1, t2 ...) tx, ...
// `tx` is split to 2 join groups `t1` and `t2`, and they have the same block offset.
// TODO: currently we skip this case for simplification, we can support it in the future.
if groupIdx != -1 {
groupIdx = -1
break
}
groupIdx = i
}
}
}
if groupIdx != -1 {
leadingJoinGroup = append(leadingJoinGroup, leftJoinGroup[groupIdx])
leftJoinGroup = append(leftJoinGroup[:groupIdx], leftJoinGroup[groupIdx+1:]...)
}
}
if len(leadingJoinGroup) != len(hintInfo.leadingJoinOrder) || leadingJoinGroup == nil {
return false, nil
}
leadingJoin := leadingJoinGroup[0]
leadingJoinGroup = leadingJoinGroup[1:]
for len(leadingJoinGroup) > 0 {
var usedEdges []*expression.ScalarFunction
var joinType *joinTypeWithExtMsg
leadingJoin, leadingJoinGroup[0], usedEdges, joinType = s.checkConnection(leadingJoin, leadingJoinGroup[0])
if hasOuterJoin && usedEdges == nil {
// If the joinGroups contain the outer join, we disable the cartesian product.
return false, nil
}
leadingJoin, s.otherConds = s.makeJoin(leadingJoin, leadingJoinGroup[0], usedEdges, joinType)
leadingJoinGroup = leadingJoinGroup[1:]
}
s.leadingJoinGroup = leadingJoin
return true, leftJoinGroup
}
// generateJoinOrderNode used to derive the stats for the joinNodePlans and generate the jrNode groups based on the cost.
func (s *baseSingleGroupJoinOrderSolver) generateJoinOrderNode(joinNodePlans []LogicalPlan, tracer *joinReorderTrace) ([]*jrNode, error) {
joinGroup := make([]*jrNode, 0, len(joinNodePlans))
for _, node := range joinNodePlans {
_, err := node.recursiveDeriveStats(nil)
if err != nil {
return nil, err
}
cost := s.baseNodeCumCost(node)
joinGroup = append(joinGroup, &jrNode{
p: node,
cumCost: cost,
})
tracer.appendLogicalJoinCost(node, cost)
}
return joinGroup, nil
}
// baseNodeCumCost calculate the cumulative cost of the node in the join group.
func (s *baseSingleGroupJoinOrderSolver) baseNodeCumCost(groupNode LogicalPlan) float64 {
cost := groupNode.StatsInfo().RowCount
for _, child := range groupNode.Children() {
cost += s.baseNodeCumCost(child)
}
return cost
}
// checkConnection used to check whether two nodes have equal conditions or not.
func (s *baseSingleGroupJoinOrderSolver) checkConnection(leftPlan, rightPlan LogicalPlan) (leftNode, rightNode LogicalPlan, usedEdges []*expression.ScalarFunction, joinType *joinTypeWithExtMsg) {
joinType = &joinTypeWithExtMsg{JoinType: InnerJoin}
leftNode, rightNode = leftPlan, rightPlan
for idx, edge := range s.eqEdges {
lCol := edge.GetArgs()[0].(*expression.Column)
rCol := edge.GetArgs()[1].(*expression.Column)
if leftPlan.Schema().Contains(lCol) && rightPlan.Schema().Contains(rCol) {
joinType = s.joinTypes[idx]
usedEdges = append(usedEdges, edge)
} else if rightPlan.Schema().Contains(lCol) && leftPlan.Schema().Contains(rCol) {
joinType = s.joinTypes[idx]
if joinType.JoinType != InnerJoin {
rightNode, leftNode = leftPlan, rightPlan
usedEdges = append(usedEdges, edge)
} else {
newSf := expression.NewFunctionInternal(s.ctx, ast.EQ, edge.GetType(), rCol, lCol).(*expression.ScalarFunction)
usedEdges = append(usedEdges, newSf)
}
}
}
return
}
// makeJoin build join tree for the nodes which have equal conditions to connect them.
func (s *baseSingleGroupJoinOrderSolver) makeJoin(leftPlan, rightPlan LogicalPlan, eqEdges []*expression.ScalarFunction, joinType *joinTypeWithExtMsg) (LogicalPlan, []expression.Expression) {
remainOtherConds := make([]expression.Expression, len(s.otherConds))
copy(remainOtherConds, s.otherConds)
var (
otherConds []expression.Expression
leftConds []expression.Expression
rightConds []expression.Expression
// for outer bind conditions
obOtherConds []expression.Expression
obLeftConds []expression.Expression
obRightConds []expression.Expression
)
mergedSchema := expression.MergeSchema(leftPlan.Schema(), rightPlan.Schema())
remainOtherConds, leftConds = expression.FilterOutInPlace(remainOtherConds, func(expr expression.Expression) bool {
return expression.ExprFromSchema(expr, leftPlan.Schema()) && !expression.ExprFromSchema(expr, rightPlan.Schema())
})
remainOtherConds, rightConds = expression.FilterOutInPlace(remainOtherConds, func(expr expression.Expression) bool {
return expression.ExprFromSchema(expr, rightPlan.Schema()) && !expression.ExprFromSchema(expr, leftPlan.Schema())
})
remainOtherConds, otherConds = expression.FilterOutInPlace(remainOtherConds, func(expr expression.Expression) bool {
return expression.ExprFromSchema(expr, mergedSchema)
})
if joinType.JoinType == LeftOuterJoin || joinType.JoinType == RightOuterJoin || joinType.JoinType == LeftOuterSemiJoin || joinType.JoinType == AntiLeftOuterSemiJoin {
// the original outer join's other conditions has been bound to the outer join Edge,
// these remained other condition here shouldn't be appended to it because on-mismatch
// logic will produce more append-null rows which is banned in original semantic.
remainOtherConds = append(remainOtherConds, otherConds...) // nozero
remainOtherConds = append(remainOtherConds, leftConds...) // nozero
remainOtherConds = append(remainOtherConds, rightConds...) // nozero
otherConds = otherConds[:0]
leftConds = leftConds[:0]
rightConds = rightConds[:0]
}
if len(joinType.outerBindCondition) > 0 {
remainOBOtherConds := make([]expression.Expression, len(joinType.outerBindCondition))
copy(remainOBOtherConds, joinType.outerBindCondition)
remainOBOtherConds, obLeftConds = expression.FilterOutInPlace(remainOBOtherConds, func(expr expression.Expression) bool {
return expression.ExprFromSchema(expr, leftPlan.Schema()) && !expression.ExprFromSchema(expr, rightPlan.Schema())
})
remainOBOtherConds, obRightConds = expression.FilterOutInPlace(remainOBOtherConds, func(expr expression.Expression) bool {
return expression.ExprFromSchema(expr, rightPlan.Schema()) && !expression.ExprFromSchema(expr, leftPlan.Schema())
})
// _ here make the linter happy.
_, obOtherConds = expression.FilterOutInPlace(remainOBOtherConds, func(expr expression.Expression) bool {
return expression.ExprFromSchema(expr, mergedSchema)
})
// case like: (A * B) left outer join C on (A.a = C.a && B.b > 0) will remain B.b > 0 in remainOBOtherConds (while this case
// has been forbidden by: filters of the outer join is related with multiple leaves of the outer join side in #34603)
// so noway here we got remainOBOtherConds remained.
}
return s.newJoinWithEdges(leftPlan, rightPlan, eqEdges,
append(otherConds, obOtherConds...), append(leftConds, obLeftConds...), append(rightConds, obRightConds...), joinType.JoinType), remainOtherConds
}
// makeBushyJoin build bushy tree for the nodes which have no equal condition to connect them.
func (s *baseSingleGroupJoinOrderSolver) makeBushyJoin(cartesianJoinGroup []LogicalPlan) LogicalPlan {
resultJoinGroup := make([]LogicalPlan, 0, (len(cartesianJoinGroup)+1)/2)
for len(cartesianJoinGroup) > 1 {
resultJoinGroup = resultJoinGroup[:0]
for i := 0; i < len(cartesianJoinGroup); i += 2 {
if i+1 == len(cartesianJoinGroup) {
resultJoinGroup = append(resultJoinGroup, cartesianJoinGroup[i])
break
}
newJoin := s.newCartesianJoin(cartesianJoinGroup[i], cartesianJoinGroup[i+1])
for i := len(s.otherConds) - 1; i >= 0; i-- {
cols := expression.ExtractColumns(s.otherConds[i])
if newJoin.schema.ColumnsIndices(cols) != nil {
newJoin.OtherConditions = append(newJoin.OtherConditions, s.otherConds[i])
s.otherConds = append(s.otherConds[:i], s.otherConds[i+1:]...)
}
}
resultJoinGroup = append(resultJoinGroup, newJoin)
}
cartesianJoinGroup, resultJoinGroup = resultJoinGroup, cartesianJoinGroup
}
// other conditions may be possible to exist across different cartesian join group, resolving cartesianJoin first then adding another selection.
if len(s.otherConds) > 0 {
additionSelection := LogicalSelection{
Conditions: s.otherConds,
}.Init(cartesianJoinGroup[0].SCtx(), cartesianJoinGroup[0].SelectBlockOffset())
additionSelection.SetChildren(cartesianJoinGroup[0])
cartesianJoinGroup[0] = additionSelection
}
return cartesianJoinGroup[0]
}
func (s *baseSingleGroupJoinOrderSolver) newCartesianJoin(lChild, rChild LogicalPlan) *LogicalJoin {
offset := lChild.SelectBlockOffset()
if offset != rChild.SelectBlockOffset() {
offset = -1
}
join := LogicalJoin{
JoinType: InnerJoin,
reordered: true,
}.Init(s.ctx, offset)
join.SetSchema(expression.MergeSchema(lChild.Schema(), rChild.Schema()))
join.SetChildren(lChild, rChild)
s.setNewJoinWithHint(join)
return join
}
func (s *baseSingleGroupJoinOrderSolver) newJoinWithEdges(lChild, rChild LogicalPlan,
eqEdges []*expression.ScalarFunction, otherConds, leftConds, rightConds []expression.Expression, joinType JoinType) LogicalPlan {
newJoin := s.newCartesianJoin(lChild, rChild)
newJoin.EqualConditions = eqEdges
newJoin.OtherConditions = otherConds
newJoin.LeftConditions = leftConds
newJoin.RightConditions = rightConds
newJoin.JoinType = joinType
return newJoin
}
// setNewJoinWithHint sets the join method hint for the join node.
// Before the join reorder process, we split the join node and collect the join method hint.
// And we record the join method hint and reset the hint after we have finished the join reorder process.
func (s *baseSingleGroupJoinOrderSolver) setNewJoinWithHint(newJoin *LogicalJoin) {
lChild := newJoin.Children()[0]
rChild := newJoin.Children()[1]
if joinMethodHint, ok := s.joinMethodHintInfo[lChild.ID()]; ok {
newJoin.leftPreferJoinType = joinMethodHint.preferredJoinMethod
newJoin.hintInfo = joinMethodHint.joinMethodHintInfo
}
if joinMethodHint, ok := s.joinMethodHintInfo[rChild.ID()]; ok {
newJoin.rightPreferJoinType = joinMethodHint.preferredJoinMethod
newJoin.hintInfo = joinMethodHint.joinMethodHintInfo
}
newJoin.setPreferredJoinType()
}
// calcJoinCumCost calculates the cumulative cost of the join node.
func (*baseSingleGroupJoinOrderSolver) calcJoinCumCost(join LogicalPlan, lNode, rNode *jrNode) float64 {
return join.StatsInfo().RowCount + lNode.cumCost + rNode.cumCost
}
func (*joinReOrderSolver) name() string {
return "join_reorder"
}
func appendJoinReorderTraceStep(tracer *joinReorderTrace, plan LogicalPlan, opt *logicalOptimizeOp) {
if len(tracer.initial) < 1 || len(tracer.final) < 1 {
return
}
action := func() string {
return fmt.Sprintf("join order becomes %v from original %v", tracer.final, tracer.initial)
}
reason := func() string {
buffer := bytes.NewBufferString("join cost during reorder: [")
var joins []string
for join := range tracer.cost {
joins = append(joins, join)
}
slices.Sort(joins)
for i, join := range joins {
if i > 0 {
buffer.WriteString(",")
}
fmt.Fprintf(buffer, "[%s, cost:%v]", join, tracer.cost[join])
}
buffer.WriteString("]")
return buffer.String()
}
opt.appendStepToCurrent(plan.ID(), plan.TP(), reason, action)
}
func allJoinOrderToString(tt []*tracing.PlanTrace) string {
if len(tt) == 1 {
return joinOrderToString(tt[0])
}
buffer := bytes.NewBufferString("[")
for i, t := range tt {
if i > 0 {
buffer.WriteString(",")
}
buffer.WriteString(joinOrderToString(t))
}
buffer.WriteString("]")
return buffer.String()
}
// joinOrderToString let Join(DataSource, DataSource) become '(t1*t2)'
func joinOrderToString(t *tracing.PlanTrace) string {
if t.TP == plancodec.TypeJoin {
buffer := bytes.NewBufferString("(")
for i, child := range t.Children {
if i > 0 {
buffer.WriteString("*")
}
buffer.WriteString(joinOrderToString(child))
}
buffer.WriteString(")")
return buffer.String()
} else if t.TP == plancodec.TypeDataSource {
return t.ExplainInfo[6:]
}
return ""
}
// extractJoinAndDataSource will only keep join and dataSource operator and remove other operators.
// For example: Proj->Join->(Proj->DataSource, DataSource) will become Join->(DataSource, DataSource)
func extractJoinAndDataSource(t *tracing.PlanTrace) []*tracing.PlanTrace {
roots := findRoots(t)
if len(roots) < 1 {
return nil
}
rr := make([]*tracing.PlanTrace, 0, len(roots))
for _, root := range roots {
simplify(root)
rr = append(rr, root)
}
return rr
}
// simplify only keeps Join and DataSource operators, and discard other operators.
func simplify(node *tracing.PlanTrace) {
if len(node.Children) < 1 {
return
}
for valid := false; !valid; {
valid = true
newChildren := make([]*tracing.PlanTrace, 0)
for _, child := range node.Children {
if child.TP != plancodec.TypeDataSource && child.TP != plancodec.TypeJoin {
newChildren = append(newChildren, child.Children...)
valid = false
} else {
newChildren = append(newChildren, child)
}
}
node.Children = newChildren
}
for _, child := range node.Children {
simplify(child)
}
}
func findRoots(t *tracing.PlanTrace) []*tracing.PlanTrace {
if t.TP == plancodec.TypeJoin || t.TP == plancodec.TypeDataSource {
return []*tracing.PlanTrace{t}
}
//nolint: prealloc
var r []*tracing.PlanTrace
for _, child := range t.Children {
r = append(r, findRoots(child)...)
}
return r
}
type joinReorderTrace struct {
opt *logicalOptimizeOp
initial string
final string
cost map[string]float64
}
func (t *joinReorderTrace) traceJoinReorder(p LogicalPlan) {
if t == nil || t.opt == nil || t.opt.tracer == nil {
return
}
if len(t.initial) > 0 {
t.final = allJoinOrderToString(extractJoinAndDataSource(p.BuildPlanTrace()))
return
}
t.initial = allJoinOrderToString(extractJoinAndDataSource(p.BuildPlanTrace()))
}
func (t *joinReorderTrace) appendLogicalJoinCost(join LogicalPlan, cost float64) {
if t == nil || t.opt == nil || t.opt.tracer == nil {
return
}
joinMapKey := allJoinOrderToString(extractJoinAndDataSource(join.BuildPlanTrace()))
t.cost[joinMapKey] = cost
}
|
package main
import "fmt"
func sum(nums ...int){
fmt.Println(nums," ")
total := 0
for sum := range nums {
total += sum
}
fmt.Println("total sum",total)
}
func main(){
sum(1,2)
sum(2,3,4)
sum(4,5,6,7)
nums := []int {1,2,3,4,5,6}
sum(nums...)
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//217. Contains Duplicate
//Given an array of integers, find if the array contains any duplicates. Your function should return true if any value appears at least twice in the array, and it should return false if every element is distinct.
//func containsDuplicate(nums []int) bool {
//}
// Time Is Money |
package main
func (this *Application) StatusAction(args []string) {
}
|
package storage
// LRUStrategy is the cache eviction strategy which uses pseudo LRU algorithm.
type LRUStrategy struct {
}
// NewLRUStrategy return the pointer of LRUStrategy.
func NewLRUStrategy() *LRUStrategy {
return &LRUStrategy{}
}
// TouchPage is the process which is happened when page use.
func (s *LRUStrategy) TouchPage(bm *BufferManager, pid int64) {
// Increment ages.
for _, frame := range bm.bufferPool {
frame.IncHitCount()
}
// The page use resets count.
bm.bufferPool[bm.dict[pid]].SetHitCount(0)
}
// ChooseVictim selects the eviction target.
func (s *LRUStrategy) ChooseVictim(bm *BufferManager) int64 {
// Choose the frame which stores the oldest page.
max := int64(-1)
fidx := int64(-1)
for i, frame := range bm.bufferPool {
if max < frame.HitCount() {
max = frame.HitCount()
fidx = int64(i)
}
}
return fidx
}
|
package main
import (
"net"
"strings"
)
func isDNSError(err error) bool {
errMsg := err.Error()
return strings.Contains(errMsg, "No such host") ||
strings.Contains(errMsg, "GetAddrInfoW") ||
strings.Contains(errMsg, "dial tcp")
}
func isErrOpWrite(err error) bool {
ne, ok := err.(*net.OpError)
if !ok {
return false
}
return ne.Op == "WSASend"
}
func isErrOpRead(err error) bool {
ne, ok := err.(*net.OpError)
if !ok {
return false
}
return ne.Op == "WSARecv"
}
|
package main
import (
"encoding/json"
"fmt"
"log"
"net/http"
)
func main() {
Routes()
log.Println("listener : Started : Listening on : 4000")
http.ListenAndServe(":4000", nil)
/*
1.监听TCP网络地址, 然后通过handler调用服务来处理连接请求
2.已接受连接被配置为启用TCP保持连接
3.handler通常为nil; 此时, 使用默认ServeMux
*/
}
//为网络服务设置路由 ~ 配置路由
func Routes() {
http.HandleFunc("/sendjson", SendJson) //在默认ServeMux(HTTP请求多路复用器)中为给定模式注册处理函数 ~ 配置路由: 将URL映射到对应的处理代码
}
//返回一个简单JSON文档
func SendJson(rw http.ResponseWriter, r *http.Request) {
//用户交互
var name, email string
fmt.Println("请输入姓名和邮箱(以空格分隔):")
fmt.Scanln(&name, &email)
u := struct {
Name string
Email string
}{
//"Bill",
//"bill@qq.com",
name,
email,
}
rw.Header().Set("Content-Type", "application/json") //设置HTTP响应头键值对
rw.WriteHeader(200) //设置状态码
json.NewEncoder(rw).Encode(&u) //1.返回向rw写入数据的*Encoder, 2.把&u中的JSON编码写入*Encoder
}
/*
1.服务端点(endpoint): 与服务宿主机信息无关, 用于分辨某服务地址, 一般不包含宿主的路径
2.构造网络API时, 通常希望直接测试自己的服务的所有服务端点, 而不用启动整个网络服务
*/
|
package equinix
import (
"context"
"fmt"
"log"
"testing"
"github.com/equinix/ecx-go/v2"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
)
const (
priPortEnvVar = "TF_ACC_FABRIC_PRI_PORT_NAME"
secPortEnvVar = "TF_ACC_FABRIC_SEC_PORT_NAME"
awsSpEnvVar = "TF_ACC_FABRIC_AWS_L2_SP_NAME"
azureSpEnvVar = "TF_ACC_FABRIC_AZURE_L2_SP_NAME"
gcpOneSpEnvVar = "TF_ACC_FABRIC_GCP1_L2_SP_NAME"
gcpTwoSpEnvVar = "TF_ACC_FABRIC_GCP2_L2_SP_NAME"
)
func init() {
resource.AddTestSweepers("ECXL2Connection", &resource.Sweeper{
Name: "ECXL2Connection",
F: testSweepECXL2Connections,
})
}
func testSweepECXL2Connections(region string) error {
config, err := sharedConfigForRegion(region)
if err != nil {
return err
}
if err := config.Load(context.Background()); err != nil {
log.Printf("[INFO][SWEEPER_LOG] error loading configuration: %s", err)
return err
}
conns, err := config.ecx.GetL2OutgoingConnections([]string{
ecx.ConnectionStatusNotAvailable,
ecx.ConnectionStatusPendingAutoApproval,
ecx.ConnectionStatusPendingBGPPeering,
ecx.ConnectionStatusProvisioned,
ecx.ConnectionStatusProvisioning,
ecx.ConnectionStatusRejected,
})
if err != nil {
log.Printf("[INFO][SWEEPER_LOG] error fetching ECXL2Connection list: %s", err)
return err
}
nonSweepableCount := 0
for _, conn := range conns {
if !isSweepableTestResource(ecx.StringValue(conn.Name)) {
nonSweepableCount++
continue
}
if err := config.ecx.DeleteL2Connection(ecx.StringValue(conn.UUID)); err != nil {
log.Printf("[INFO][SWEEPER_LOG] error deleting ECXL2Connection resource %s (%s): %s", ecx.StringValue(conn.UUID), ecx.StringValue(conn.Name), err)
} else {
log.Printf("[INFO][SWEEPER_LOG] sent delete request for ECXL2Connection resource %s (%s)", ecx.StringValue(conn.UUID), ecx.StringValue(conn.Name))
}
}
if nonSweepableCount > 0 {
log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonSweepableCount)
}
return nil
}
func TestAccFabricL2Connection_Port_Single_AWS(t *testing.T) {
t.Parallel()
portName, _ := schema.EnvDefaultFunc(priPortEnvVar, "sit-001-CX-SV1-NL-Dot1q-BO-10G-PRI-JUN-33")()
spName, _ := schema.EnvDefaultFunc(awsSpEnvVar, "AWS Direct Connect")()
context := map[string]interface{}{
"port-resourceName": "test",
"port-name": portName.(string),
"connection-resourceName": "test",
"connection-profile_name": spName.(string),
"connection-name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"connection-speed": 50,
"connection-speed_unit": "MB",
"connection-notifications": []string{"marry@equinix.com", "john@equinix.com"},
"connection-purchase_order_number": randString(10),
"connection-vlan_stag": randInt(2000),
"connection-seller_region": "us-west-2",
"connection-seller_metro_code": "SV",
"connection-authorization_key": "123456789012",
}
contextWithChanges := copyMap(context)
contextWithChanges["connection-name"] = fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6))
resourceName := fmt.Sprintf("equinix_ecx_l2_connection.%s", context["connection-resourceName"].(string))
var testConn ecx.L2Connection
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: newTestAccConfig(context).withPort().withConnection().build(),
Check: resource.ComposeTestCheckFunc(
testAccFabricL2ConnectionExists(resourceName, &testConn),
testAccFabricL2ConnectionAttributes(&testConn, context),
resource.TestCheckResourceAttr(resourceName, "status", ecx.ConnectionStatusProvisioned),
resource.TestCheckResourceAttrSet(resourceName, "provider_status"),
resource.TestCheckResourceAttrSet(resourceName, "zside_port_uuid"),
),
},
{
Config: newTestAccConfig(contextWithChanges).withPort().withConnection().build(),
Check: resource.ComposeTestCheckFunc(
testAccFabricL2ConnectionExists(resourceName, &testConn),
testAccFabricL2ConnectionAttributes(&testConn, contextWithChanges),
resource.TestCheckResourceAttr(resourceName, "status", ecx.ConnectionStatusProvisioned),
resource.TestCheckResourceAttrSet(resourceName, "provider_status"),
resource.TestCheckResourceAttrSet(resourceName, "zside_port_uuid"),
),
},
},
})
}
func TestAccFabricL2Connection_Port_HA_Azure(t *testing.T) {
t.Parallel()
priPortName, _ := schema.EnvDefaultFunc(priPortEnvVar, "sit-001-CX-SV1-NL-Dot1q-BO-10G-PRI-JUN-33")()
secPortName, _ := schema.EnvDefaultFunc(secPortEnvVar, "sit-001-CX-SV5-NL-Dot1q-BO-10G-SEC-JUN-36")()
spName, _ := schema.EnvDefaultFunc(azureSpEnvVar, "Azure Express Route")()
context := map[string]interface{}{
"port-resourceName": "test",
"port-name": priPortName.(string),
"port-secondary_resourceName": "test-sec",
"port-secondary_name": secPortName.(string),
"connection-resourceName": "test",
"connection-profile_name": spName.(string),
"connection-name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"connection-speed": 50,
"connection-speed_unit": "MB",
"connection-notifications": []string{"marry@equinix.com", "john@equinix.com"},
"connection-purchase_order_number": randString(10),
"connection-vlan_stag": randInt(2000),
"connection-seller_metro_code": "SV",
"connection-authorization_key": randString(12),
"connection-named_tag": "Public",
"connection-secondary_name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"connection-secondary_vlan_stag": randInt(2000),
}
contextWithChanges := copyMap(context)
contextWithChanges["connection-name"] = fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6))
contextWithChanges["connection-secondary_name"] = fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6))
resourceName := fmt.Sprintf("equinix_ecx_l2_connection.%s", context["connection-resourceName"].(string))
var primary, secondary ecx.L2Connection
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: newTestAccConfig(context).withPort().withConnection().build(),
Check: resource.ComposeTestCheckFunc(
testAccFabricL2ConnectionExists(resourceName, &primary),
testAccFabricL2ConnectionAttributes(&primary, context),
testAccFabricL2ConnectionSecondaryExists(&primary, &secondary),
testAccFabricL2ConnectionSecondaryAttributes(&secondary, context),
resource.TestCheckResourceAttr(resourceName, "status", ecx.ConnectionStatusProvisioned),
resource.TestCheckResourceAttrSet(resourceName, "provider_status"),
testAccFabricL2ConnectionRedundancyAttributes(&primary, &secondary),
),
},
{
Config: newTestAccConfig(contextWithChanges).withPort().withConnection().build(),
Check: resource.ComposeTestCheckFunc(
testAccFabricL2ConnectionExists(resourceName, &primary),
testAccFabricL2ConnectionAttributes(&primary, contextWithChanges),
testAccFabricL2ConnectionSecondaryExists(&primary, &secondary),
testAccFabricL2ConnectionSecondaryAttributes(&secondary, contextWithChanges),
resource.TestCheckResourceAttr(resourceName, "status", ecx.ConnectionStatusProvisioned),
resource.TestCheckResourceAttrSet(resourceName, "provider_status"),
testAccFabricL2ConnectionRedundancyAttributes(&primary, &secondary),
),
},
},
})
}
func TestAccFabricL2Connection_Device_HA_GCP(t *testing.T) {
t.Parallel()
deviceMetro, _ := schema.EnvDefaultFunc(networkDeviceMetroEnvVar, "SV")()
priSPName, _ := schema.EnvDefaultFunc(gcpOneSpEnvVar, "Google Cloud Partner Interconnect Zone 1")()
secSPName, _ := schema.EnvDefaultFunc(gcpTwoSpEnvVar, "Google Cloud Partner Interconnect Zone 2")()
context := map[string]interface{}{
"device-resourceName": "test",
"device-self_managed": true,
"device-byol": true,
"device-name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"device-metro_code": deviceMetro.(string),
"device-type_code": "PA-VM",
"device-package_code": "VM100",
"device-notifications": []string{"marry@equinix.com", "john@equinix.com"},
"device-hostname": fmt.Sprintf("tf-%s", randString(6)),
"device-term_length": 1,
"device-version": "9.0.4",
"device-core_count": 2,
"device-purchase_order_number": randString(10),
"device-order_reference": randString(10),
"device-secondary_name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"device-secondary_hostname": fmt.Sprintf("tf-%s", randString(6)),
"device-secondary_notifications": []string{"secondary@equinix.com"},
"sshkey-resourceName": "test",
"sshkey-name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"sshkey-public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCXdzXBHaVpKpdO0udnB+4JOgUq7APO2rPXfrevvlZrps98AtlwXXVWZ5duRH5NFNfU4G9HCSiAPsebgjY0fG85tcShpXfHfACLt0tBW8XhfLQP2T6S50FQ1brBdURMDCMsD7duOXqvc0dlbs2/KcswHvuUmqVzob3bz7n1bQ48wIHsPg4ARqYhy5LN3OkllJH/6GEfqi8lKZx01/P/gmJMORcJujuOyXRB+F2iXBVYdhjML3Qg4+tEekBcVZOxUbERRZ0pvQ52Y6wUhn2VsjljixyqeOdmD0m6DayDQgSWms6bKPpBqN7zhXXk4qe8bXT4tQQba65b2CQ2A91jw2KgM/YZNmjyUJ+Rf1cQosJf9twqbAZDZ6rAEmj9zzvQ5vD/CGuzxdVMkePLlUK4VGjPu7cVzhXrnq4318WqZ5/lNiCST8NQ0fssChN8ANUzr/p/wwv3faFMVNmjxXTZMsbMFT/fbb2MVVuqNFN65drntlg6/xEao8gZROuRYiakBx8= user@host",
"connection-resourceName": "test",
"connection-name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"connection-profile_name": priSPName.(string),
"connection-speed": 50,
"connection-speed_unit": "MB",
"connection-notifications": []string{"marry@equinix.com", "john@equinix.com"},
"connection-purchase_order_number": randString(10),
"connection-seller_metro_code": "SV",
"connection-seller_region": "us-west1",
"connection-authorization_key": "131f5adc-021d-4fe1-fff3-4019be1d6ef7/us-west1/1",
"connection-device_interface_id": 5,
"connection-secondary_name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"connection-secondary_profile_name": secSPName.(string),
"connection-secondary_speed": 100,
"connection-secondary_speed_unit": "MB",
"connection-secondary_seller_metro_code": "SV",
"connection-secondary_seller_region": "us-west1",
"connection-secondary_authorization_key": "531ba3dc-121d-5ee1-acf3-402343ac3af7/us-west1/2",
"connection-secondary_device_interface_id": 5,
}
connResourceName := fmt.Sprintf("equinix_ecx_l2_connection.%s", context["connection-resourceName"].(string))
var primary, secondary ecx.L2Connection
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: newTestAccConfig(context).withDevice().withSSHKey().
withConnection().build(),
Check: resource.ComposeTestCheckFunc(
testAccFabricL2ConnectionExists(connResourceName, &primary),
testAccFabricL2ConnectionAttributes(&primary, context),
testAccFabricL2ConnectionSecondaryExists(&primary, &secondary),
testAccFabricL2ConnectionSecondaryAttributes(&secondary, context),
),
},
},
})
}
func testAccFabricL2ConnectionExists(resourceName string, conn *ecx.L2Connection) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[resourceName]
if !ok {
return fmt.Errorf("resource not found: %s", resourceName)
}
client := testAccProvider.Meta().(*Config).ecx
if rs.Primary.ID == "" {
return fmt.Errorf("resource has no ID attribute set")
}
resp, err := client.GetL2Connection(rs.Primary.ID)
if err != nil {
return fmt.Errorf("error when fetching L2 connection %v", err)
}
if ecx.StringValue(resp.UUID) != rs.Primary.ID {
return fmt.Errorf("resource ID does not match %v - %v", rs.Primary.ID, resp.UUID)
}
*conn = *resp
return nil
}
}
func testAccFabricL2ConnectionSecondaryExists(primary *ecx.L2Connection, secondary *ecx.L2Connection) resource.TestCheckFunc {
return func(s *terraform.State) error {
client := testAccProvider.Meta().(*Config).ecx
if ecx.StringValue(primary.RedundantUUID) == "" {
return fmt.Errorf("primary connection has no RedundantUUID set")
}
resp, err := client.GetL2Connection(ecx.StringValue(primary.RedundantUUID))
if err != nil {
return fmt.Errorf("error when fetching L2 connection %v", err)
}
*secondary = *resp
return nil
}
}
func testAccFabricL2ConnectionAttributes(conn *ecx.L2Connection, ctx map[string]interface{}) resource.TestCheckFunc {
return func(s *terraform.State) error {
if v, ok := ctx["connection-name"]; ok && ecx.StringValue(conn.Name) != v.(string) {
return fmt.Errorf("name does not match %v - %v", ecx.StringValue(conn.Name), v)
}
if v, ok := ctx["connection-speed"]; ok && ecx.IntValue(conn.Speed) != v.(int) {
return fmt.Errorf("speed does not match %v - %v", ecx.IntValue(conn.Speed), v)
}
if v, ok := ctx["connection-speed_unit"]; ok && ecx.StringValue(conn.SpeedUnit) != v.(string) {
return fmt.Errorf("speedUnit does not match %v - %v", ecx.StringValue(conn.SpeedUnit), v)
}
if v, ok := ctx["connection-notifications"]; ok && !slicesMatch(conn.Notifications, v.([]string)) {
return fmt.Errorf("notifications does not match %v - %v", conn.Notifications, v)
}
if v, ok := ctx["connection-purchase_order_number"]; ok && ecx.StringValue(conn.PurchaseOrderNumber) != v.(string) {
return fmt.Errorf("purchaseOrderNumber does not match %v - %v", ecx.StringValue(conn.PurchaseOrderNumber), v)
}
if v, ok := ctx["connection-device_interface_id"]; ok && ecx.IntValue(conn.DeviceInterfaceID) != v.(int) {
return fmt.Errorf("deviceInterfaceID does not match %v - %v", ecx.IntValue(conn.DeviceInterfaceID), v)
}
if v, ok := ctx["connection-vlan_stag"]; ok && ecx.IntValue(conn.VlanSTag) != v.(int) {
return fmt.Errorf("vlanSTag does not match %v - %v", ecx.IntValue(conn.VlanSTag), v)
}
if v, ok := ctx["connection-vlan_ctag"]; ok && ecx.IntValue(conn.VlanCTag) != v.(int) {
return fmt.Errorf("vlanCTag does not match %v - %v", ecx.IntValue(conn.VlanCTag), v)
}
if v, ok := ctx["connection-zside_port_uuid"]; ok && ecx.StringValue(conn.ZSidePortUUID) != v.(string) {
return fmt.Errorf("zSidePortUUID does not match %v - %v", ecx.StringValue(conn.ZSidePortUUID), v)
}
if v, ok := ctx["connection-zside_vlan_stag"]; ok && ecx.IntValue(conn.ZSideVlanSTag) != v.(int) {
return fmt.Errorf("zSideVlanSTag does not match %v - %v", ecx.IntValue(conn.ZSideVlanSTag), v)
}
if v, ok := ctx["connection-zside_vlan_ctag"]; ok && ecx.IntValue(conn.ZSideVlanCTag) != v.(int) {
return fmt.Errorf("zSideVlanCTag does not match %v - %v", ecx.IntValue(conn.ZSideVlanCTag), v)
}
if v, ok := ctx["connection-named_tag"]; ok && ecx.StringValue(conn.NamedTag) != v.(string) {
return fmt.Errorf("named_tag does not match %v - %v", ecx.StringValue(conn.NamedTag), v)
}
if v, ok := ctx["connection-seller_region"]; ok && ecx.StringValue(conn.SellerRegion) != v.(string) {
return fmt.Errorf("sellerRegion does not match %v - %v", ecx.StringValue(conn.SellerRegion), v)
}
if v, ok := ctx["connection-seller_metro_code"]; ok && ecx.StringValue(conn.SellerMetroCode) != v.(string) {
return fmt.Errorf("sellerMetroCode does not match %v - %v", ecx.StringValue(conn.SellerMetroCode), v)
}
if v, ok := ctx["connection-authorization_key"]; ok && ecx.StringValue(conn.AuthorizationKey) != v.(string) {
return fmt.Errorf("authorizationKey does not match %v - %v", ecx.StringValue(conn.AuthorizationKey), v)
}
return nil
}
}
func testAccFabricL2ConnectionSecondaryAttributes(conn *ecx.L2Connection, ctx map[string]interface{}) resource.TestCheckFunc {
return func(s *terraform.State) error {
if v, ok := ctx["connection-secondary_name"]; ok && ecx.StringValue(conn.Name) != v.(string) {
return fmt.Errorf("connection secondary name does not match %v - %v", ecx.StringValue(conn.Name), v)
}
if v, ok := ctx["connection-secondary_speed"]; ok && ecx.IntValue(conn.Speed) != v.(int) {
return fmt.Errorf("connection secondary speed does not match %v - %v", ecx.IntValue(conn.Speed), v)
}
if v, ok := ctx["connection-secondary_speed_unit"]; ok && ecx.StringValue(conn.SpeedUnit) != v.(string) {
return fmt.Errorf("connection secondary speed unit does not match %v - %v", ecx.StringValue(conn.SpeedUnit), v)
}
if v, ok := ctx["connection-secondary_device_interface_id"]; ok && ecx.IntValue(conn.DeviceInterfaceID) != v.(int) {
return fmt.Errorf("connection secondary device interface id does not match %v - %v", ecx.IntValue(conn.DeviceInterfaceID), v)
}
if v, ok := ctx["connection-secondary_vlan_stag"]; ok && ecx.IntValue(conn.VlanSTag) != v.(int) {
return fmt.Errorf("connection secondary vlanSTag does not match %v - %v", ecx.IntValue(conn.VlanSTag), v)
}
if v, ok := ctx["connection-secondary_vlan_ctag"]; ok && ecx.IntValue(conn.VlanCTag) != v.(int) {
return fmt.Errorf("connection secondary vlanCTag does not match %v - %v", ecx.IntValue(conn.VlanCTag), v)
}
if v, ok := ctx["connection-secondary-seller_region"]; ok && ecx.StringValue(conn.SellerRegion) != v.(string) {
return fmt.Errorf("connection secondary seller region does not match %v - %v", ecx.StringValue(conn.SellerRegion), v)
}
if v, ok := ctx["connection-secondary-seller_metro_code"]; ok && ecx.StringValue(conn.SellerMetroCode) != v.(string) {
return fmt.Errorf("connection secondary seller metro code does not match %v - %v", ecx.StringValue(conn.SellerMetroCode), v)
}
if v, ok := ctx["connection-secondary_authorization_key"]; ok && ecx.StringValue(conn.AuthorizationKey) != v.(string) {
return fmt.Errorf("connection secondary authorization_key code does not match %v - %v", ecx.StringValue(conn.AuthorizationKey), v)
}
return nil
}
}
func testAccFabricL2ConnectionRedundancyAttributes(primary, secondary *ecx.L2Connection) resource.TestCheckFunc {
return func(s *terraform.State) error {
if ecx.StringValue(primary.RedundancyType) != "primary" {
return fmt.Errorf("primary connection redundancy type does not match %v - %v", ecx.StringValue(primary.RedundancyType), "primary")
}
if ecx.StringValue(primary.RedundantUUID) != ecx.StringValue(secondary.UUID) {
return fmt.Errorf("primary connection redundant UUID does not match %v - %v", ecx.StringValue(primary.RedundantUUID), ecx.StringValue(secondary.UUID))
}
if ecx.StringValue(secondary.RedundancyType) != "secondary" {
return fmt.Errorf("secondary connection redundancy type does not match %v - %v", ecx.StringValue(secondary.RedundancyType), "secondary")
}
if ecx.StringValue(secondary.RedundantUUID) != ecx.StringValue(primary.UUID) {
return fmt.Errorf("secondary connection redundant UUID does not match %v - %v", ecx.StringValue(secondary.RedundantUUID), ecx.StringValue(primary.UUID))
}
return nil
}
}
func (t *testAccConfig) withConnection() *testAccConfig {
t.config += testAccFabricL2Connection(t.ctx)
return t
}
func (t *testAccConfig) withPort() *testAccConfig {
t.config += testAccFabricPort(t.ctx)
return t
}
func testAccFabricPort(ctx map[string]interface{}) string {
var config string
config += nprintf(`
data "equinix_ecx_port" "%{port-resourceName}" {
name = "%{port-name}"
}`, ctx)
if _, ok := ctx["port-secondary_resourceName"]; ok {
config += nprintf(`
data "equinix_ecx_port" "%{port-secondary_resourceName}" {
name = "%{port-secondary_name}"
}`, ctx)
}
return config
}
func testAccFabricL2Connection(ctx map[string]interface{}) string {
var config string
config += nprintf(`
data "equinix_ecx_l2_sellerprofile" "pri" {
name = "%{connection-profile_name}"
}`, ctx)
if _, ok := ctx["connection-secondary_profile_name"]; ok {
config += nprintf(`
data "equinix_ecx_l2_sellerprofile" "sec" {
name = "%{connection-secondary_profile_name}"
}`, ctx)
}
config += nprintf(`
resource "equinix_ecx_l2_connection" "%{connection-resourceName}" {
name = "%{connection-name}"
profile_uuid = data.equinix_ecx_l2_sellerprofile.pri.id
speed = %{connection-speed}
speed_unit = "%{connection-speed_unit}"
notifications = %{connection-notifications}
seller_metro_code = "%{connection-seller_metro_code}"
authorization_key = "%{connection-authorization_key}"`, ctx)
if _, ok := ctx["connection-purchase_order_number"]; ok {
config += nprintf(`
purchase_order_number = "%{connection-purchase_order_number}"`, ctx)
}
if _, ok := ctx["connection-seller_region"]; ok {
config += nprintf(`
seller_region = "%{connection-seller_region}"`, ctx)
}
if _, ok := ctx["port-resourceName"]; ok {
config += nprintf(`
port_uuid = data.equinix_ecx_port.%{port-resourceName}.id`, ctx)
}
if _, ok := ctx["device-resourceName"]; ok {
config += nprintf(`
device_uuid = equinix_network_device.%{device-resourceName}.id`, ctx)
}
if _, ok := ctx["connection-vlan_stag"]; ok {
config += nprintf(`
vlan_stag = %{connection-vlan_stag}`, ctx)
}
if _, ok := ctx["connection-vlan_ctag"]; ok {
config += nprintf(`
vlan_ctag = %{connection-vlan_ctag}`, ctx)
}
if _, ok := ctx["connection-named_tag"]; ok {
config += nprintf(`
named_tag = "%{connection-named_tag}"`, ctx)
}
if _, ok := ctx["connection-device_interface_id"]; ok {
config += nprintf(`
device_interface_id = %{connection-device_interface_id}`, ctx)
}
if _, ok := ctx["connection-secondary_name"]; ok {
config += nprintf(`
secondary_connection {
name = "%{connection-secondary_name}"`, ctx)
if _, ok := ctx["connection-secondary_profile_name"]; ok {
config += nprintf(`
profile_uuid = data.equinix_ecx_l2_sellerprofile.sec.id`, ctx)
}
if _, ok := ctx["port-secondary_resourceName"]; ok {
config += nprintf(`
port_uuid = data.equinix_ecx_port.%{port-secondary_resourceName}.id`, ctx)
}
if _, ok := ctx["device-secondary_name"]; ok {
config += nprintf(`
device_uuid = equinix_network_device.%{device-resourceName}.redundant_id`, ctx)
}
if _, ok := ctx["connection-secondary_vlan_stag"]; ok {
config += nprintf(`
vlan_stag = %{connection-secondary_vlan_stag}`, ctx)
}
if _, ok := ctx["connection-secondary_vlan_ctag"]; ok {
config += nprintf(`
vlan_ctag = %{connection-secondary_vlan_ctag}`, ctx)
}
if _, ok := ctx["connection-secondary_device_interface_id"]; ok {
config += nprintf(`
device_interface_id = %{connection-secondary_device_interface_id}`, ctx)
}
if _, ok := ctx["connection-secondary_speed"]; ok {
config += nprintf(`
speed = %{connection-secondary_speed}`, ctx)
}
if _, ok := ctx["connection-secondary_speed_unit"]; ok {
config += nprintf(`
speed_unit = "%{connection-secondary_speed_unit}"`, ctx)
}
if _, ok := ctx["connection-secondary_seller_metro_code"]; ok {
config += nprintf(`
seller_metro_code = "%{connection-secondary_seller_metro_code}"`, ctx)
}
if _, ok := ctx["connection-secondary_seller_region"]; ok {
config += nprintf(`
seller_region = "%{connection-secondary_seller_region}"`, ctx)
}
if _, ok := ctx["connection-secondary_authorization_key"]; ok {
config += nprintf(`
authorization_key = "%{connection-secondary_authorization_key}"`, ctx)
}
config += `
}`
}
config += `
}`
return config
}
|
package main
import (
"fmt"
"github.com/gorilla/mux"
"net/http"
)
type Place struct {
ID int `json: "id"`
Location string `json: "name"`
SMID int `json: "smid"`
}
func getPlaceHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
location, _ := store.GetPlace(vars["location"])
fmt.Println(location)
}
|
package services
import "testing"
var mockRequester IRequester = &struct{}{}
var subject = UsersService{}
func TestGetUsersOk(t *testing.T) {
_requester = mockRequester
response := subject.GetUsers()
if response == nil {
t.Fail()
}
}
|
package routes
import (
"github.com/gofiber/fiber/v2"
"github.com/lucas-stellet/fiber-todo/controllers"
)
// TodoRoute ...
func TodoRoute(route fiber.Router) {
route.Get("", controllers.GetTodos)
route.Get(":id", controllers.GetTodo)
route.Post("", controllers.CreateTodo)
}
|
package history
import (
"context"
"time"
)
type Status int
const (
Success Status = iota
Failure Status = iota
)
type Historizer interface {
SaveSuccessfulQuery(ctx context.Context, cypher, sql string, duration time.Duration) error
SaveFailedQuery(ctx context.Context, cypher, sql string, err error) error
}
type NoopHistorizer struct{}
func (h *NoopHistorizer) SaveSuccessfulQuery(ctx context.Context, cypher, sql string, duration time.Duration) error {
return nil
}
func (h *NoopHistorizer) SaveFailedQuery(ctx context.Context, cypher, sql string, err error) error {
return nil
}
|
// Copyright 2020 Adam Chalkley
//
// https://github.com/atc0005/go-lockss
//
// Licensed under the MIT License. See LICENSE file in the project root for
// full license information.
package main
import (
"errors"
"flag"
"fmt"
"os"
"time"
"github.com/apex/log"
"github.com/atc0005/go-lockss/internal/config"
"github.com/atc0005/go-lockss/internal/lockss"
"github.com/atc0005/go-lockss/internal/portchecks"
)
func main() {
log.Debug("Initializing application")
// lockss.EnableLogging()
lockss.DisableLogging()
// setup application configuration
appCfg, err := config.NewConfig()
switch {
// TODO: How else to guard against nil cfg object?
case appCfg != nil && appCfg.ShowVersion():
config.Branding()
os.Exit(0)
case err == nil:
// do nothing for this one
case errors.Is(err, flag.ErrHelp):
os.Exit(0)
default:
log.Errorf("failed to initialize application: %s", err)
flag.Usage()
os.Exit(1)
}
// if we have set the app logging level to Debug, enable lockss package
// logging too
// if logger, ok := log.Log.(*log.Logger); ok {
// if logger.Level == log.DebugLevel {
// lockss.EnableLogging()
// }
// }
if appCfg.LogLevel() == config.LogLevelDebug {
lockss.EnableLogging()
}
fmt.Printf(
"\n[%v] Starting %s version %q ...\n",
time.Now().Format("2006-01-02 15.04:05"),
config.MyBinaryName(),
config.Version,
)
// If user supplied values, we should use those to retrieve the LOCKSS
// configuration from the central LOCKSS configuration server, otherwise
// try to automatically determine values and go from there.
var lockssCfg *lockss.Config
var cfgSource string
switch {
case appCfg.ConfigServerURL() != "":
log.Debugf(
"ConfigServerURL() is non-empty, using value %q",
appCfg.ConfigServerURL(),
)
cfgSource = appCfg.ConfigServerURL()
var err error
lockssCfg, err = lockss.NewFromURL(appCfg.ConfigServerURL())
if err != nil {
fmt.Println(err)
os.Exit(1)
}
case appCfg.ConfigFile() != "":
log.Debugf(
"ConfigFile() is non-empty, using value %q",
appCfg.ConfigFile(),
)
cfgSource = appCfg.ConfigFile()
var err error
lockssCfg, err = lockss.NewFromFile(appCfg.ConfigFile())
if err != nil {
fmt.Println(err)
os.Exit(1)
}
default:
log.Debug("ConfigServerURL() is empty")
log.Debug("Attempting to automatically retrieve config server value")
var err error
lockssCfg, err = lockss.New()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
cfgSource = lockssCfg.PropsURL()
}
log.Debugf("Full LOCKSS config object: %+v", lockssCfg)
log.Debugf("Full App config object: %+v", appCfg)
peersList, err := lockssCfg.IDInitialV3Peers.List()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if len(peersList) == 0 {
fmt.Println("ERROR: No peers found in LOCKSS configuration file!")
fmt.Println("No peers to check, exiting.")
os.Exit(1)
}
log.Debugf("%d peers listed in %s", len(peersList), cfgSource)
if appCfg.LogLevel() == config.LogLevelDebug {
for idx, peer := range peersList {
log.Debugf(
"Peer %d: [Protocol: %q, IP Address: %q, Port: %d, peer.Network(): %q, peer.String(): %q]",
idx,
peer.Protocol,
peer.IPAddress,
peer.LCAPPort,
peer.Network(),
peer.String(),
)
}
}
ports := append(appCfg.UserNodePorts(), peersList[0].LCAPPort)
ports = portchecks.UniquePorts(ports...)
numPorts := len(ports)
numPeers := len(peersList)
expectedResponses := numPeers * numPorts
log.Debugf("Expected responses: %d", expectedResponses)
// collect results here that are pulled off the channel used by
// goroutines as they complete their work
results := make(portchecks.Results, 0, expectedResponses)
// setup a channel to funnel each result from a port check. The capacity
// is set to mirror the number of peers in the network to reduce
// collection delay.
resultsChan := make(chan portchecks.Result, expectedResponses)
fmt.Printf(
"[%v] Checking %d ports on %d peer nodes ...\n",
time.Now().Format("2006-01-02 15.04:05"),
numPorts,
numPeers,
)
for _, peer := range peersList {
go func(peer lockss.V3Peer, ports []int, connTimeout time.Duration) {
for _, port := range ports {
log.Debugf("Checking port %d on %s ...", port, peer.IPAddress)
resultsChan <- portchecks.CheckPort(peer, port, connTimeout)
}
}(peer, ports, appCfg.PortConnectTimeout())
}
// Collect all responses, continue until we exhaust the number of expected
// responses calculated earlier as our signal to stop collecting responses
remainingResponses := expectedResponses
for remainingResponses > 0 {
result := <-resultsChan
results = append(results, result)
remainingResponses--
if remainingResponses > 0 {
// skip emitting "Waiting" message if we're no longe waiting
log.Debugf("Waiting on %d responses ...", remainingResponses)
continue
}
log.Debug("All responses received")
}
results.PrintSummary()
}
|
package main
import (
"fmt"
"html/template"
"io"
"net/http"
"os"
"regexp"
"sort"
"strconv"
"strings"
"time"
"github.com/labstack/echo"
"github.com/labstack/echo/middleware"
"github.com/parnurzeal/gorequest"
"github.com/patrickmn/go-cache"
"github.com/tidwall/gjson"
)
var cacheDuration = time.Second * 90
var defaultBackoff = time.Second * 30
var maxBackoff = time.Hour
func main() {
e := echo.New()
loggerConfig := middleware.DefaultLoggerConfig
loggerConfig.Skipper = func(c echo.Context) bool {
return c.Path() == "/"
}
e.Use(middleware.LoggerWithConfig(loggerConfig))
e.Use(middleware.Recover())
e.Use(middleware.CORS())
providers := new(providers)
providers.cache = cache.New(cacheDuration, cacheDuration)
routes := new(router)
routes.providers = providers
routes.templates = template.Must(template.ParseGlob("*.html"))
e.GET("/", routes.Index)
e.GET("/avg", routes.Average)
e.GET("/poloniex", routes.Poloniex)
e.GET("/btcaverage", routes.BTCAverage)
e.GET("/invoice", routes.InvoiceViaCoinTigo)
e.GET("/*", routes.Wildcard)
e.Renderer = routes
// go func() {
// _, _ = providers.BitcoinaverageRates()
// time.Sleep(cacheDuration)
// }()
//
// go func() {
// _, _ = providers.CryptocompareBTCDASHAverage()
// time.Sleep(cacheDuration)
// }()
//
// go func() {
// _, _ = providers.DashCasaDASHVESRate()
// time.Sleep(cacheDuration)
// }()
e.Logger.Fatal(e.Start(":3000"))
}
type router struct {
providers *providers
templates *template.Template
}
func (r *router) Render(w io.Writer, name string, data interface{}, c echo.Context) error {
return r.templates.ExecuteTemplate(w, name, data)
}
// The documentation endpoint.
func (r *router) Index(c echo.Context) error {
host := os.Getenv("HOST")
if host == "" {
host = "https://rates.dash-retail.com"
}
return c.Render(http.StatusOK, "apidoc.html", map[string]string{"host": host})
}
// The average BTC/DASH rate from various exchanges according to cryptocompare.com.
func (r *router) Average(c echo.Context) error {
rate, err := r.providers.CryptocompareBTCDASHAverage()
if err != nil {
return err
}
return c.JSON(http.StatusOK, rate)
}
// The average BTC/DASH rate calculated from the last 200 Poloniex trades.
func (r *router) Poloniex(c echo.Context) error {
rate, err := r.providers.PoloniexBTCDASHAverage()
if err != nil {
return err
}
return c.JSON(http.StatusOK, rate)
}
// The current BTC/DASH rate from bitcoinaverage.
func (r *router) BTCAverage(c echo.Context) error {
rate, err := r.providers.BitcoinaverageCurrentBTCDASHRate(defaultBackoff)
if err != nil {
return err
}
return c.JSON(http.StatusOK, rate)
}
// Creates a CoinText invoice.
func (r *router) InvoiceViaCointext(c echo.Context) error {
address := c.QueryParam("addr")
amount, err := strconv.ParseInt(c.QueryParam("amount"), 10, 64)
if err != nil || amount == 0 {
return echo.NewHTTPError(http.StatusBadRequest, "Amount param is invalid")
}
// TODO: Restore the CoinText api request when we have a key...
/*
url := "https://pos-api.cointext.io/create_invoice/"
_, body, errs := gorequest.New().Post(url).Send(map[string]interface{}{
"address": address,
"amount": amount,
"network": "dash",
"api_key": os.Getenv("COINTEXT_API_KEY"),
}).End()
if len(errs) > 1 {
broadcastErr(errs[0])
return echo.NewHTTPError(http.StatusInternalServerError, "Failed to create CoinText invoice")
}
paymentId := gjson.Get(body, "paymentId").String()
if paymentId == "" {
err := echo.NewHTTPError(http.StatusInternalServerError, "Failed to determine CoinText paymentId")
broadcastErr(err)
return err
}
*/
url := "https://api.get-spark.com/invoice"
_, body, errs := gorequest.New().Get(url).Param("addr", address).Param("amount", fmt.Sprintf("%d", amount)).End()
if len(errs) > 1 {
broadcastErr(errs[0])
return echo.NewHTTPError(http.StatusInternalServerError, "Failed to create CoinText invoice")
}
// fmt.Printf("%s - New invoice to %s for %d", c.RealIP(), address, amount)
c.Logger().Printj(map[string]interface{}{
"message": "invoice",
"remote_ip": c.RealIP(),
"address": address,
"amount": amount,
})
rsp := strings.Replace(body, `"`, "", -1)
return c.JSON(http.StatusOK, rsp)
}
func (r *router) InvoiceViaCoinTigo(c echo.Context) error {
url := "https://ctgoapi.ngrok.io/cointigo"
address := c.QueryParam("addr")
amount, err := strconv.ParseInt(c.QueryParam("amount"), 10, 64)
if err != nil || amount == 0 {
return echo.NewHTTPError(http.StatusBadRequest, "Amount param is invalid")
}
_, body, errs := gorequest.New().Post(url).Send(map[string]interface{}{
"coin": "DASH",
"user": "DaSh.OrG",
"method": "create_invoice",
"address": address,
"amount": amount,
}).End()
if len(errs) > 1 {
broadcastErr(errs[0])
return echo.NewHTTPError(http.StatusInternalServerError, "Failed to create CoinText invoice")
}
// fmt.Printf("%s - New invoice to %s for %d", c.RealIP(), address, amount)
c.Logger().Printj(map[string]interface{}{
"message": "invoice",
"remote_ip": c.RealIP(),
"address": address,
"amount": amount,
})
rsp := gjson.Parse(body).Get("invoice").String()
return c.JSON(http.StatusOK, rsp)
}
// The BTC rates from BitcoinAverage converted into DASH rates.
func (r *router) Wildcard(c echo.Context) error {
s := c.Request().URL.Path
s = strings.TrimPrefix(s, "/")
s = strings.TrimSuffix(s, "/")
s = strings.ToUpper(s)
var selectedCurrencies []string
supportedCurrencies := []string{
"AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN", "BAM", "BBD", "BDT", "BGN",
"BHD", "BIF", "BMD", "BND", "BOB", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD", "CAD", "CDF", "CHF", "CLF", "CLP",
"CNH", "CNY", "COP", "CRC", "CUC", "CUP", "CVE", "CZK", "DJF", "DKK", "DOP", "DZD", "EGP", "ERN", "ETB", "EUR",
"FJD", "FKP", "GBP", "GEL", "GGP", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD", "HKD", "HNL", "HRK", "HTG", "HUF",
"IDR", "ILS", "IMP", "INR", "IQD", "IRR", "ISK", "JEP", "JMD", "JOD", "JPY", "KES", "KGS", "KHR", "KMF", "KPW",
"KRW", "KWD", "KYD", "KZT", "LAK", "LBP", "LKR", "LRD", "LSL", "LYD", "MAD", "MDL", "MGA", "MKD", "MMK", "MNT",
"MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MYR", "MZN", "NAD", "NGN", "NIO", "NOK", "NPR", "NZD", "OMR", "PAB",
"PEN", "PGK", "PHP", "PKR", "PLN", "PYG", "QAR", "RON", "RSD", "RUB", "RWF", "SAR", "SBD", "SCR", "SDG", "SEK",
"SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "SVC", "SYP", "SZL", "THB", "TJS", "TMT", "TND", "TOP", "TRY",
"TTD", "TWD", "TZS", "UAH", "UGX", "USD", "UYU", "UZS", "VES", "VND", "VUV", "WST", "XAF", "XAG", "XAU", "XCD",
"XDR", "XOF", "XPD", "XPF", "XPT", "YER", "ZAR", "ZMW", "ZWL",
}
if strings.Index(s, "LIST") != 0 {
o := regexp.MustCompile(`^(/?[A-Z]{3})*$`).MatchString(s)
if o == false {
return echo.NewHTTPError(http.StatusBadRequest, "Malformed currency selection in url")
}
selectedCurrencies = strings.Split(s, "/")
for _, selectedCurrency := range selectedCurrencies {
i := sort.SearchStrings(supportedCurrencies, selectedCurrency)
if supportedCurrencies[i] != selectedCurrency {
return echo.NewHTTPError(http.StatusBadRequest, "Unsupported currency selection in url")
}
}
} else {
selectedCurrencies = supportedCurrencies
}
btcRates, err := r.providers.BitcoinaverageRates(defaultBackoff)
if err != nil {
return err
}
btcDashRate, err := r.providers.CryptocompareBTCDASHAverage()
if err != nil {
return err
}
if btcDashRate == 0 {
btcDashRate, err = r.providers.BitcoinaverageCurrentBTCDASHRate(defaultBackoff)
if err != nil {
return err
}
}
rates := make(map[string]float64)
for _, currency := range selectedCurrencies {
if currency == "VES" {
btcVesRate, err := r.providers.LocalbitcoinsBTCVESRate()
if err != nil {
return err
}
dashVesRate := btcDashRate * btcVesRate
rates["VES"] = dashVesRate
if err != nil {
return err
}
} else {
rates[currency] = btcRates[currency] * btcDashRate
}
}
c.Logger().Printj(map[string]interface{}{
"message": "rates",
"remote_ip": c.RealIP(),
"rates": rates,
})
return c.JSON(http.StatusOK, rates)
}
type providers struct {
cache *cache.Cache
}
func (p *providers) CryptocompareBTCDASHAverage() (rate float64, err error) {
url := "https://min-api.cryptocompare.com/data/generateAvg?fsym=DASH&tsym=BTC&e=Binance,Kraken,Poloniex,Bitfinex"
rateI, found := p.cache.Get(url)
if !found {
fmt.Println("Recaching CryptocompareBTCDASHAverage")
rsp, body, errs := gorequest.New().
Get(url).
Retry(3, 5*time.Second, http.StatusBadRequest, http.StatusInternalServerError, http.StatusServiceUnavailable).
End()
if len(errs) > 1 {
broadcastErr(errs[0])
err = echo.NewHTTPError(http.StatusInternalServerError, "Failed to fetch BTCDASH rate from CryptoCompare")
return
}
if rsp.StatusCode != http.StatusOK {
err = echo.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf("CryptoCompare returned bad status code: %d", rsp.StatusCode))
broadcastErr(err)
return
}
rate, err = strconv.ParseFloat(gjson.Get(body, "RAW.PRICE").String()[1:], 10)
p.cache.SetDefault(url, rate)
} else {
rate, _ = rateI.(float64)
}
return
}
func (p *providers) PoloniexBTCDASHAverage() (rate float64, err error) {
url := "https://poloniex.com/public?command=returnTradeHistory¤cyPair=BTC_DASH"
rateI, found := p.cache.Get(url)
if !found {
fmt.Println("Recaching PoloniexBTCDASHAverage")
rsp, body, errs := gorequest.New().
Get(url).
Retry(3, 5*time.Second, http.StatusBadRequest, http.StatusInternalServerError, http.StatusServiceUnavailable).
End()
if len(errs) > 1 {
broadcastErr(errs[0])
err = echo.NewHTTPError(http.StatusInternalServerError, "Failed to fetch BTCDASH average rate from Poloniex")
return
}
if rsp.StatusCode != http.StatusOK {
err = echo.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf("Poloniex returned bad status code: %d", rsp.StatusCode))
broadcastErr(err)
return
}
rateTotal := 0.0
rateCount := 0
gjson.Get(body, "#.rate").ForEach(func(key, value gjson.Result) bool {
rateTotal += value.Float()
rateCount += 1
return true
})
rate = rateTotal / float64(rateCount)
p.cache.SetDefault(url, rate)
} else {
rate, _ = rateI.(float64)
}
return
}
func (p *providers) BitcoinaverageCurrentBTCDASHRate(backoff time.Duration) (rate float64, err error) {
url := "https://apiv2.bitcoinaverage.com/indices/crypto/ticker/DASHBTC"
rateI, found := p.cache.Get(url)
if !found {
fmt.Println("Recaching BitcoinaverageCurrentBTCDASHRate")
rsp, body, errs := gorequest.New().
Get(url).
Retry(3, 5*time.Second, http.StatusBadRequest, http.StatusInternalServerError, http.StatusServiceUnavailable).
End()
if len(errs) > 1 {
broadcastErr(errs[0])
err = echo.NewHTTPError(http.StatusInternalServerError, "Failed to fetch BTCDASH rate from BitcoinAverage")
return
}
if rsp.StatusCode == http.StatusTooManyRequests {
nextBackoff := time.Duration(float64(backoff.Nanoseconds()) * 1.3)
if nextBackoff > maxBackoff {
nextBackoff = maxBackoff
}
return p.BitcoinaverageCurrentBTCDASHRate(nextBackoff)
}
if rsp.StatusCode != http.StatusOK {
err = echo.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf("BitcoinAverage BTCDASH returned bad status code: %d", rsp.StatusCode))
broadcastErr(err)
return
}
rate = gjson.Get(body, "last").Float()
p.cache.SetDefault(url, rate)
} else {
rate, _ = rateI.(float64)
}
return
}
func (p *providers) BitcoinaverageRates(backoff time.Duration) (rates map[string]float64, err error) {
url := "https://apiv2.bitcoinaverage.com/indices/global/ticker/short?crypto=BTC"
ratesI, found := p.cache.Get(url)
if !found {
fmt.Println("Recaching BitcoinaverageRates")
rates = make(map[string]float64)
rsp, body, errs := gorequest.New().
Get(url).
Retry(3, 5*time.Second, http.StatusBadRequest, http.StatusInternalServerError, http.StatusServiceUnavailable).
End()
if len(errs) > 1 {
broadcastErr(errs[0])
err = echo.NewHTTPError(http.StatusInternalServerError, "Failed to fetch rates from BitcoinAverage")
return
}
if rsp.StatusCode == http.StatusTooManyRequests {
nextBackoff := time.Duration(float64(backoff.Nanoseconds()) * 1.3)
if nextBackoff > maxBackoff {
nextBackoff = maxBackoff
}
return p.BitcoinaverageRates(nextBackoff)
}
if rsp.StatusCode != http.StatusOK {
err = echo.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf("BitcoinAverage returned bad status code: %d", rsp.StatusCode))
broadcastErr(err)
return
}
gjson.Parse(body).ForEach(func(key, value gjson.Result) bool {
rates[key.String()[3:]] = value.Get("last").Float()
return true
})
p.cache.SetDefault(url, rates)
} else {
rates, _ = ratesI.(map[string]float64)
}
return
}
func (p *providers) LocalbitcoinsBTCVESRate() (rate float64, err error) {
url := "https://localbitcoins.com/bitcoinaverage/ticker-all-currencies/"
rateI, found := p.cache.Get(url)
if !found {
fmt.Println("Recaching LocalbitcoinsBTCVESRate")
rsp, body, errs := gorequest.New().
Get(url).
Retry(3, 5*time.Second, http.StatusBadRequest, http.StatusInternalServerError, http.StatusServiceUnavailable).
End()
if len(errs) > 1 {
broadcastErr(errs[0])
err = echo.NewHTTPError(http.StatusInternalServerError, "Failed to fetch BTCVES rate from Localbitcoins")
return
}
if rsp.StatusCode != http.StatusOK {
err = echo.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf("Localbitcoins returned bad status code: %d", rsp.StatusCode))
broadcastErr(err)
return
}
rate = gjson.Get(body, "VES.rates.last").Float()
fmt.Println("LAST", rate)
p.cache.SetDefault(url, rate)
} else {
rate, _ = rateI.(float64)
}
return
}
func broadcastErr(err error) {
webhookUrl := os.Getenv("DISCORD_WEBHOOK_URL")
if webhookUrl != "" {
jsn := `
{
"username": "Dash Rates API",
"embeds": [
{
"title": "ERROR",
"description": "` + err.Error() + `",
"color": 15340307
}
]
}
`
gorequest.
New().
AppendHeader("content-type", "application/json").
Post(webhookUrl).
Send(jsn).
End()
}
}
|
package pool
import (
"testing"
"time"
)
func Test_queueImpl_put(t *testing.T) {
t.Run("Put on Queue", func(t *testing.T) {
q := queueImpl{}
q.put(&workerImpl{
id: "1",
queuedAt: time.Now(),
run: func() error {
return func(id string) error {
return nil
}("1")
},
})
if len(q.queued) != 1 {
t.Errorf("unexpeced size of queue: %d", len(q.queued))
}
if q.len() != 1 {
t.Errorf("queue dont looks like it increased lenth of queued workers, got: %d", q.len())
}
})
}
func Test_queueImpl_Get(t *testing.T) {
t.Run("Get from Queue", func(t *testing.T) {
q := queueImpl{}
q.put(&workerImpl{
id: "1",
queuedAt: time.Now(),
run: func() error {
return func(id string) error {
return nil
}("1")
},
})
if len(q.queued) != 1 {
t.Errorf("unexpeced size of queue: %d", len(q.queued))
}
if q.len() != 1 {
t.Errorf("queue dont looks like it increased lenth of queued workers, got: %d", q.len())
}
count := 0
w1 := q.get(&count)
if w1.ID() != "1" {
t.Errorf("unexpected id as received: %s", w1.ID())
}
w2 := q.get(nil)
if w2 != nil {
t.Errorf("unexpected worker from queue it was supposed to be empty but got: %+v", w2)
}
})
}
|
/*
* This file is part of impacca. Copyright (C) 2013 and above Shogun <shogun@cowtech.it>.
* Licensed under the MIT license, which can be found at https://choosealicense.com/licenses/mit.
*/
package utils
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
"time"
"github.com/Masterminds/semver"
"github.com/ShogunPanda/impacca/configuration"
)
func commitVersioning(version *semver.Version, commit, tag, dryRun bool) {
versionString := version.String()
versionMessage := strings.TrimSpace(fmt.Sprintf(configuration.Current.CommitMessages.Versioning, versionString))
// Commit changes
if commit && NotifyExecution(dryRun, "Will execute", "Executing", ": {primary}git commit --all --message=\"%s\"{-} ...", versionMessage) {
result := Execute(true, "git", "commit", "--all", fmt.Sprintf("--message=%s", versionMessage))
result.Verify("git", "Cannot commit version change")
}
// Tag the version
if tag && NotifyExecution(dryRun, "Will execute", "Executing", ": {primary}git tag -f v%s{-} ...", versionString) {
result := Execute(true, "git", "tag", "--force", "v"+versionString)
result.Verify("git", "Cannot tag GIT version")
}
}
// GetVersions return all current GIT versions.
func GetVersions() semver.Collection {
result := Execute(false, "git", "tag")
result.Verify("git", "Cannot list GIT tags")
var versions semver.Collection
for _, tag := range strings.Split(strings.TrimSpace(result.Stdout), "\n") {
if !versionMatcher.MatchString(tag) {
continue
}
version, err := semver.NewVersion(versionMatcher.ReplaceAllString(tag, ""))
if err != nil {
Fail("Cannot parse GIT tag {errorPrimary}%s{-} as a version, will skip it: {errorPrimary}%s{-}", tag, err.Error())
continue
}
versions = append(versions, version)
}
// Sort versions
sort.Sort(versions)
return versions
}
// GetCurrentVersion return the current version.
func GetCurrentVersion() *semver.Version {
// Get the current version
versions := GetVersions()
if len(versions) == 0 {
version, _ := semver.NewVersion("0.0.0")
return version
}
return versions[len(versions)-1]
}
// GetVersionDate return the date of a version.
func GetVersionDate(version *semver.Version) time.Time {
result := Execute(false, "git", "log", "--format=%aI", "-n 1", fmt.Sprintf("v%s", version.String()))
result.Verify("git", "Cannot list GIT commits date")
date, err := time.Parse(time.RFC3339, strings.TrimSpace(result.Stdout))
if err != nil {
Fatal("Cannot parse git commit date: {errorPrimary}%s{-}", err.Error())
}
return date
}
// ChangeVersion changes the current version.
func ChangeVersion(version *semver.Version, change string) *semver.Version {
newVersion := &semver.Version{}
var err error
switch change {
case "patch":
*newVersion = version.IncPatch()
case "minor":
*newVersion = version.IncMinor()
case "major":
*newVersion = version.IncMajor()
default:
newVersion, err = semver.NewVersion(change)
if err != nil {
Fatal("Cannot parse {errorPrimary}%s{-} as a version: {errorPrimary}%s{-}", change, err.Error())
}
}
return newVersion
}
// UpdateVersion updates the current version.
func UpdateVersion(newVersion, currentVersion *semver.Version, dryRun bool) {
switch DetectPackageManager() {
case NpmPackageManager:
UpdateNpmVersion(newVersion, currentVersion, true, true, dryRun)
case GemPackageManager:
UpdateGemVersion(newVersion, currentVersion, true, true, dryRun)
default:
UpdatePlainVersion(newVersion, currentVersion, true, true, dryRun)
}
}
// UpdateNpmVersion updates the current version using NPM.
func UpdateNpmVersion(newVersion, currentVersion *semver.Version, commit, tag, dryRun bool) {
versionString := newVersion.String()
versionMessage := strings.TrimSpace(configuration.Current.CommitMessages.Versioning)
if !NotifyExecution(dryRun, "Will execute", "Executing", ": {primary}npm version %s --message=%s{-} ...", versionString, versionMessage) {
return
}
result := Execute(true, "npm", "version", versionString, fmt.Sprintf("--message=%s", versionMessage))
result.Verify("npm", "Cannot update NPM version")
}
// UpdateGemVersion updates the current version by manipulating the version file.
func UpdateGemVersion(newVersion, currentVersion *semver.Version, commit, tag, dryRun bool) {
cwd, _ := os.Getwd()
files, _ := filepath.Glob(filepath.Join(cwd, "*/*/version.rb"))
if len(files) != 1 {
Fatal("Found no or more than one possible gem version files.")
}
// Open the version file
versionFile := files[0]
rawVersionContents, err := ioutil.ReadFile(versionFile)
if err != nil {
Fatal("Cannot read gem version file {errorPrimary}%s{-}: {errorPrimary}%s{-}", versionFile, err.Error())
}
if !dryRun {
versionContents := string(rawVersionContents)
// Replace contents
versionContents = regexp.MustCompile("(?m)^(?:(\\s*MAJOR)\\s*=\\s*\\d+)$").ReplaceAllString(versionContents, fmt.Sprintf("$1 = %d", newVersion.Major()))
versionContents = regexp.MustCompile("(?m)^(?:(\\s*MINOR)\\s*=\\s*\\d+)$").ReplaceAllString(versionContents, fmt.Sprintf("$1 = %d", newVersion.Minor()))
versionContents = regexp.MustCompile("(?m)^(?:(\\s*PATCH)\\s*=\\s*\\d+)$").ReplaceAllString(versionContents, fmt.Sprintf("$1 = %d", newVersion.Patch()))
err := ioutil.WriteFile(versionFile, []byte(versionContents), 0644)
if err != nil {
Fatal("Cannot update gem version file {errorPrimary}%s{-}: {errorPrimary}%s{-}", versionFile, err.Error())
}
}
commitVersioning(newVersion, commit, tag, dryRun)
}
// UpdatePlainVersion updates the current version according to a plain managament.
func UpdatePlainVersion(newVersion, currentVersion *semver.Version, commit, tag, dryRun bool) {
versionString := newVersion.String()
versionMessage := strings.TrimSpace(fmt.Sprintf(configuration.Current.CommitMessages.Versioning, versionString))
cwd, _ := os.Getwd()
stat, err := os.Stat(filepath.Join(cwd, "Impaccafile"))
if err == nil && stat.IsDir() == false && stat.Mode()&0111 != 0 {
if NotifyExecution(dryRun, "Will execute", "Executing", ": {primary}./Impaccafile %s %s{-} ...", newVersion, currentVersion) {
result := Execute(true, filepath.Join(cwd, "Impaccafile"), versionString, currentVersion.String())
result.Verify("git", "Cannot execute the Impaccafile")
}
if commit {
if NotifyExecution(dryRun, "Will execute", "Executing", ": {primary}git commit --all --message \"%s\"{-} ...", versionMessage) {
result := Execute(true, "git", "commit", "--all", fmt.Sprintf("--message=%s", versionMessage))
result.Verify("Impaccafile", "Cannot commit Impaccafile changes")
}
}
}
commitVersioning(newVersion, false, tag, dryRun)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.