text
stringlengths 11
4.05M
|
|---|
/*
Given a pattern and a string s, find if s follows the same pattern.
Here follow means a full match, such that there is a bijection between a letter in pattern and a non-empty word in s.
Example 1:
Input: pattern = "abba", s = "dog cat cat dog"
Output: true
Example 2:
Input: pattern = "abba", s = "dog cat cat fish"
Output: false
Example 3:
Input: pattern = "aaaa", s = "dog cat cat dog"
Output: false
Constraints:
1 <= pattern.length <= 300
pattern contains only lower-case English letters.
1 <= s.length <= 3000
s contains only lowercase English letters and spaces ' '.
s does not contain any leading or trailing spaces.
All the words in s are separated by a single space.
*/
package main
import (
"reflect"
"strings"
)
func main() {
assert(wordpattern("abba", "dog cat cat dog") == true)
assert(wordpattern("abba", "dog cat cat fish") == false)
assert(wordpattern("aaaa", "dog cat cat dog") == false)
assert(wordpattern("aaaa", "dog dog dog dog") == true)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func wordpattern(p, s string) bool {
a := []rune(p)
b := strings.Split(s, " ")
return reflect.DeepEqual(count(a), count(b))
}
func count[T rune | string](a []T) map[int]int {
h := make(map[int]int)
m := make(map[T]int)
i := 0
for _, v := range a {
if _, ok := m[v]; !ok {
m[v], i = i, i+1
}
h[m[v]]++
}
return h
}
|
package api
type RobotMessage struct {
ID int `json:"id" sqlite:"id,INTEGER PRIMARY KEY autoincrement"` // 消息ID, 自动递增
RobotID string `json:"robotId" sqlite:"robotId,VARCHAR(64)"` // 机器人ID
Process string `json:"process" sqlite:"process,VARCHAR(64)"` // 机器人流程名称
Level string `json:"level" sqlite:"level,VARCHAR(16)"` // 日志级别
Ct int64 `json:"ct" sqlite:"ct,INTEGER"` // 日志时间
Content string `json:"content" sqlite:"content,TEXT"` // 日志描述
Pulled bool `json:"-" sqlite:"pulled,BOOLEAN"` // server端是否已拉取此条msg
}
|
func hIndex(citations []int) int {
sort.Ints(citations)
n:=len(citations)
for i,v:=range citations{
if n-i<=v{
return n-i
}
}
return 0
}
|
package base
import (
"errors"
"fmt"
"gengine/context"
"reflect"
)
type RuleEntity struct {
RuleName string
Salience int64
RuleDescription string
RuleContent *RuleContent
Vars map[string]reflect.Value //belongs to current rule,rule execute finish, it will be clear
}
func (r *RuleEntity) AcceptString(s string) error {
if r.RuleName == "" {
r.RuleName = s
return nil
}
if r.RuleDescription == "" {
r.RuleDescription = s
return nil
}
return errors.New(fmt.Sprintf("value = %s set twice!", s))
}
func (r *RuleEntity) AcceptInteger(val int64) error {
r.Salience = val
return nil
}
/*func (r *RuleEntity) Initialize(dc *context.DataContext) {
r.dataCtx = dc
if r.RuleContent != nil {
r.RuleContent.Initialize(dc)
}
}
*/
func (r *RuleEntity) Execute(dc *context.DataContext) (interface{}, error, bool) {
r.Vars = make(map[string]reflect.Value)
defer r.clearMap()
v, e, b := r.RuleContent.Execute(dc, r.Vars)
if v == reflect.ValueOf(nil) {
return nil, e, b
}
return v.Interface(), e, b
}
func (r *RuleEntity) clearMap() {
r.Vars = make(map[string]reflect.Value)
}
|
package mail_relay
import (
"fmt"
"errors"
"strings"
"bytes"
"net/http"
"io/ioutil"
"encoding/json"
log "github.com/sirupsen/logrus"
apis "texas_real_foods/pkg/utils/api_accessors"
)
var (
// generate new event channel to process requests
eventChannel = make(chan MailRelayRequest)
// define custom errors
ErrInvalidZipCodeResponse = errors.New("Received invalid response from zipcode API")
ErrInvalidAPIResponse = errors.New("Received invalid API response")
ErrZipCodeNotFound = errors.New("Cannot find zipcode entry in API")
ErrUnauthorized = errors.New("Received unauthorized response from API")
ErrInvalidJSONResponse = errors.New("Received invalid JSON response from zipcode API")
ErrRequestLimitReached = errors.New("Reached request limit on API")
)
func TriggerMailChimpAsync(request MailRelayRequest, data apis.ZipCodeData) {
if err := TriggerMailChimp(request, data); err != nil {
// insert failed message into event log
persistence.UpdateMailEntry(request.EntryId, "failed", false)
} else {
// insert success message into event log
persistence.UpdateMailEntry(request.EntryId, "completed", true)
}
}
// function used to add a new member to a given mail chimp list
func TriggerMailChimp(request MailRelayRequest, data apis.ZipCodeData) error {
// function used to relay sign up request to mail chimp server
log.Info(fmt.Sprintf("relaying request %+v", request))
mailChimpRequest := map[string]interface{}{
"email_address": request.Email,
"status": "subscribed",
"email_type": "html",
"merge_fields": map[string]string{
"FNAME": request.FirstName,
"LNAME": request.LastName,
"ZIP": data.ZipCode,
"CB": "",
"DATEB": request.DateOfBirth,
"COUNTY": strings.Replace(data.County, " County", "", -1),
"METRO": "",
"ECON": data.EconomicRegion,
},
"vip": false,
"language": "en",
"tags": []string{},
"source": "API - Mail Relay",
}
log.Debug(fmt.Sprintf("making request to mail chimp server with body %+v", mailChimpRequest))
// convert request to JSON format and add to request
jsonRequest, err := json.Marshal(mailChimpRequest)
if err != nil {
log.Error(fmt.Errorf("unable to convert mail chimp request to JSON format: %+v", err))
return err
}
// generate url based on list ID and base API url
url := fmt.Sprintf("%s/list/%s/members", mailChimpConfig.APIUrl, mailChimpConfig.ListID)
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonRequest))
if err != nil {
log.Error(fmt.Errorf("unable to trigger mail chimp request: %+v", err))
return err
}
// set basic auth on request
req.SetBasicAuth("psauerborn", mailChimpConfig.APIKey)
client := http.Client{}
resp, err := client.Do(req)
if err != nil {
log.Error(fmt.Errorf("unable to trigger mail chimp request: %+v", err))
return err
}
defer resp.Body.Close()
switch resp.StatusCode {
case 200:
log.Info("successfully triggered mail chimp request")
return nil
default:
body, _ := ioutil.ReadAll(resp.Body)
log.Error(fmt.Errorf("unable to trigger mail chimp request: received response %s", string(body)))
return ErrInvalidAPIResponse
}
}
// function used to get zip code data from utils API
func GetZipCodeData(zipcode string) (apis.ZipCodeData, error) {
log.Info(fmt.Sprintf("requesting zip code data for code '%s'", zipcode))
accessor := apis.NewUtilsAPIAccessorFromConfig(utilsAPIConfig)
var data apis.ZipCodeData
data, err := accessor.GetZipCodeData(zipcode)
if err != nil {
log.Error(fmt.Errorf("unable to retrieve zip code data for code %s: %+v", zipcode, err))
return data, err
}
log.Debug(fmt.Sprintf("successfully retrieve zip code data %+v", data))
return data, nil
}
|
package perfect
import (
"errors"
)
type Classification string
const ClassificationDeficient Classification = "Deficient"
const ClassificationAbundant Classification = "Abundant"
const ClassificationPerfect Classification = "Perfect"
const ClassificationUndefined Classification = "Undefined"
var ErrOnlyPositive = errors.New("Only positive numbers are unsupported")
func Classify(i int64) (Classification, error) {
if i <= 0 {
return ClassificationUndefined, ErrOnlyPositive
}
var aliquotSum int64
for f := int64(1); f <= i/int64(2); f++ {
if i%f == 0 {
aliquotSum += f
}
}
switch {
case aliquotSum < i:
return ClassificationDeficient, nil
case aliquotSum > i:
return ClassificationAbundant, nil
case aliquotSum == i:
return ClassificationPerfect, nil
}
return ClassificationUndefined, errors.New("Undefined")
}
|
package cmd
import (
"fmt"
"github.com/alewgbl/fdwctl/internal/config"
"github.com/alewgbl/fdwctl/internal/logger"
"github.com/alewgbl/fdwctl/internal/util"
"github.com/spf13/cobra"
)
var (
rootCmd = &cobra.Command{
Use: "fdwctl",
Short: "A management CLI for PostgreSQL postgres_fdw Foreign Data Wrapper",
}
logFormat string
logLevel string
noLogo bool
AppVersion string
connectionString string
configFile string
)
func Execute() error {
return rootCmd.Execute()
}
func init() {
cobra.OnInitialize(initCommand, initConfig)
rootCmd.PersistentFlags().StringVar(&configFile, "config", "", "location of program configuration file")
rootCmd.PersistentFlags().StringVar(&logFormat, "logformat", logger.TextFormat, "log output format [text, json, elastic]")
rootCmd.PersistentFlags().StringVar(&logLevel, "loglevel", logger.TraceLevel, "log message level [trace, debug, info, warn, error, fatal, panic]")
rootCmd.PersistentFlags().StringVar(&connectionString, "connection", "", "database connection string")
rootCmd.PersistentFlags().BoolVar(&noLogo, "nologo", false, "suppress program name and version message")
rootCmd.AddCommand(listCmd)
rootCmd.AddCommand(createCmd)
rootCmd.AddCommand(dropCmd)
rootCmd.AddCommand(editCmd)
rootCmd.AddCommand(desiredStateCmd)
}
func initCommand() {
logger.SetFormat(logFormat)
logger.SetLevel(logLevel)
if !noLogo {
appVer := "dev"
if AppVersion != "" {
appVer = AppVersion
}
fmt.Printf("fdwctl v%s\n", appVer)
}
}
func initConfig() {
var err error
log := logger.Root().
WithField("function", "initConfig")
if configFile == "" {
configFile = config.UserConfigFile()
}
log.Debugf("configFile: %s", configFile)
err = config.Load(config.Instance(), configFile)
if err != nil {
log.Errorf("error initializing config: %s", err)
return
}
connString := util.StringCoalesce(connectionString, config.Instance().FDWConnection)
log.Tracef("connString: %s", connString)
if connString == "" {
log.Fatal("database connection string is required")
}
config.Instance().FDWConnection = connString
}
|
package lintcode
/**
* @param nums: The integer array.
* @param target: Target to find.
* @return: The first position of target. Position starts from 0.
*/
func binarySearch(nums []int, target int) int {
if len(nums) == 0 {
return -1
}
start := 0
end := len(nums) - 1
for end-start > 1 {
mid := (start + end) / 2
if nums[mid] == target {
end--
} else if nums[mid] < target {
start = mid
} else {
end = mid
}
}
if nums[start] == target {
return start
}
if nums[end] == target {
return end
}
return -1
}
|
package 动态规划
func waysToStep(n int) int {
const mod = 1000000007
ways := make([]int, n+3)
ways[0], ways[1], ways[2] = 1, 1, 2
for i := 3; i <= n; i++ {
ways[i] = ways[i-1] + ways[i-2] + ways[i-3]
ways[i] %= mod
}
return ways[n]
}
/*
题目链接: https://leetcode-cn.com/problems/three-steps-problem-lcci/submissions/
总结
1. 这题和青蛙跳楼梯的思路是一样的
2. 可以通过状态压缩,将空间复杂度优化为O(1)
*/
|
package main
import (
"github.com/gin-gonic/gin"
"fmt"
"net/http"
"time"
)
func main() {
// gin based on net/http and each request handled by individual goroutine
router := gin.Default()
router.GET("/async/:id", func(context *gin.Context) {
contextCopy := context.Copy()
go func() {
id := contextCopy.Param("id")
fmt.Println(id)
context.JSON(http.StatusOK, gin.H{"id": id})
}()
})
router.GET("/sync/:id", func(context *gin.Context) {
id := context.Param("id")
fmt.Println(id)
time.Sleep(5 * time.Second)
context.JSON(http.StatusOK, gin.H{"id": id})
})
router.Run()
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//633. Sum of Square Numbers
//Given a non-negative integer c, your task is to decide whether there're two integers a and b such that a2 + b2 = c.
//Example 1:
//Input: 5
//Output: True
//Explanation: 1 * 1 + 2 * 2 = 5
//Example 2:
//Input: 3
//Output: False
//func judgeSquareSum(c int) bool {
//}
// Time Is Money
|
package main
import (
"os"
"reflect"
"testing"
)
func TestNewDeck(t *testing.T) {
d := newDeck()
if len(d) != 52 {
t.Errorf("Expected size of deck to be 52, but got [%v]", len(d))
}
if d[0] != "Ace of Diamonds" {
t.Errorf("Expected first card to be Ace of Diamonds, but got [%v]", d[0])
}
if d[len(d) - 1] != "King of Clubs" {
t.Errorf("Expected last card to be King of Clubs, but got [%v]", d[len(d) - 1])
}
}
func TestSaveToFileAndNewDeckFromFile(t *testing.T) {
testFile := "_decktesting"
os.Remove(testFile)
d := newDeck()
d.saveToFile(testFile)
loadedDeck := newDeckFromFile(testFile)
if !reflect.DeepEqual(d, loadedDeck) {
t.Errorf("Expected new and loaded decks to be same")
}
os.Remove(testFile)
}
|
package utils
func ErrToString(err error) string {
if err != nil {
return err.Error()
}
return "<clean>"
}
|
package main
import (
"container/heap"
"errors"
"fmt"
)
type Project struct {
profit int
capital int
}
type Projects []Project
func (h Projects) Len() int { return len(h) }
func (h Projects) Less(i, j int) bool {
if h[i].profit == h[j].profit {
return h[i].capital < h[j].capital
}
return h[i].profit > h[j].profit
}
func (h Projects) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h *Projects) Push(x interface{}) {
*h = append(*h, x.(Project))
}
func (h *Projects) Pop() interface{} {
old := *h
n := len(old)
x := old[n-1]
*h = old[0 : n-1]
return x
}
const (
defaultLength = 10000
)
type Stack struct {
top int
size int
element []interface{}
}
/*
*
根据给定的大小初始话stack
*/
func NewStackBySize(size int) *Stack {
return &Stack{size, size, make([]interface{}, size)}
}
/*
*
根据默认的大小初始话stack
*/
func NewStack() *Stack {
return NewStackBySize(defaultLength)
}
/*
*
判断stack是否为空
*/
func (stack *Stack) IsEmpty() bool {
return stack.top == stack.size
}
/*
*
判断stack是否已经满了
*/
func (stack *Stack) IsFull() bool {
return stack.top == 0
}
/*
*
清空stack
*/
func (stack *Stack) Clear() {
stack.top = stack.size
}
/*
*
弹出一个元素
*/
func (stack *Stack) Pop() (interface{}, error) {
if stack.IsEmpty() == true {
return nil, errors.New("The Stack is empty")
}
stack.top = stack.top + 1
return stack.element[stack.top-1], nil
}
/*
*
压入一个元素
*/
func (stack *Stack) Push(e interface{}) error {
if stack.IsFull() == true {
return errors.New("The Stack is full")
}
stack.top = stack.top - 1
stack.element[stack.top] = e
return nil
}
func (stack *Stack) PrintStack() {
i := stack.top
for {
if i == stack.size {
break
}
fmt.Print(stack.element[i], "\n")
i = i + 1
}
}
func findMaximizedCapital(k int, w int, profits []int, capital []int) int {
projects := make(Projects, 0, len(profits))
for i := 0; i < len(profits); i++ {
projects = append(projects, Project{profit: profits[i], capital: capital[i]})
}
heap.Init(&projects)
s := NewStack()
surplus := 0
for i := 0; i < k; i++ {
if projects.Len() == 0 {
break
}
p := heap.Pop(&projects).(Project)
if p.capital <= w {
surplus = k - i
heap.Push(&projects, p)
break
}
_ = s.Push(p)
for projects.Len() > 0 {
p = heap.Pop(&projects).(Project)
if p.capital <= w {
w += p.profit
break
}
_ = s.Push(p)
}
for !s.IsEmpty() {
o, _ := s.Pop()
heap.Push(&projects, o)
}
}
for i := 0; i < surplus; i++ {
w += heap.Pop(&projects).(Project).profit
}
return w
}
func main() {
//fmt.Println(findMaximizedCapital(2, 0, []int{1, 2, 3}, []int{0, 1, 1}))
fmt.Println(findMaximizedCapital(1, 2, []int{1, 2, 3}, []int{1, 1, 2}))
}
|
package main
import (
"fmt"
"os"
"github.com/codegangsta/cli"
"github.com/EverythingMe/meduza/mdzctl/codegen"
)
var langs = []string{"py", "go"}
var genCommand = cli.Command{
Name: "gen",
Usage: "Generate models from a schema file",
Flags: []cli.Flag{
cli.StringFlag{
Name: "lang, l",
Usage: "language to generate either py or go",
},
cli.StringFlag{
Name: "file, f",
Usage: "schema file to read. pass - to read from stdin",
},
cli.StringFlag{
Name: "output, o",
Usage: "(optional) write output to this file. otherwise to stdout",
},
},
Action: gen,
}
func gen(c *cli.Context) {
lang := c.String("lang")
file := c.String("file")
output := c.String("output")
if file == "" {
fmt.Fprintln(os.Stderr, "No schema file given")
return
}
var b []byte
var err error
if file == "-" {
b, err = codegen.Generate(lang, os.Stdin)
} else {
b, err = codegen.GenerateFile(lang, file)
}
if err != nil {
fmt.Fprintln(os.Stderr, "Error generating:", err)
return
}
if output != "" {
fp, err := os.Create(output)
if err != nil {
fmt.Fprintln(os.Stderr, "Error creating out file:", err)
return
}
defer fp.Close()
if _, err = fp.Write(b); err != nil {
fmt.Fprintln(os.Stderr, "Error writing out file:", err)
return
}
fmt.Println("Generated code written to ", output)
} else {
fmt.Println(string(b))
}
}
|
package im
import (
"encoding/json"
"log"
"strings"
"github.com/astaxie/beego/context"
"github.com/wst-libs/wst-sdk/errors"
"github.com/wst-libs/wst-sdk/im/sdk"
"github.com/wst-libs/wst-sdk/utils"
)
func SetOutPutHeader(ctx *context.Context) {
ctx.Output.Header("Connection", "close")
ctx.Output.Header("Content-Type", "application/json")
ctx.Output.Header("Server", utils.Version)
}
func ParseInputHander(ctx *context.Context) error {
if !strings.Contains(ctx.Input.Header("Content-Type"), "application/json") {
log.Println("not application/json.")
// return error.Error()("Content-Type not application/json")
// return errors.New("Content-Type is application/json")
}
return nil
}
func checkRegisterCommon(info RegisteredUsers) (bool, error) {
if len(info.Id) > 32 || len(info.Id) <= 0 {
return false, errors.New("user id too long or short")
}
if len(info.Name) > 32 || len(info.Name) <= 0 {
return false, errors.New("user name too long or short")
}
if len(info.Portrait) > 32 || len(info.Portrait) <= 0 {
return false, errors.New("user portrait too long or short")
}
return true, nil
}
// Processing user registration
func RegisterUsersHandler(ctx *context.Context) []byte {
// Parse request body to json
var request RequestRegisteredUsers
err := json.Unmarshal(ctx.Input.RequestBody, &request)
if err != nil {
log.Println("Error: ", err.Error())
return errors.ParseJsonFailed()
}
// Check request common param
com := errors.IsCommonErr(request.RequestCommon)
if com.Code != 0 {
outerr, _ := json.Marshal(com)
return outerr
}
// Parse request uid
// uid := ctx.Input.Param(":uid")
// err = errors.VerifyUid(uid)
// if err != nil {
// ret := utils.ResponseCommon{
// Code: errors.UidErr,
// Message: err.Error(),
// Version: request.RequestCommon.Version,
// SeqNum: request.RequestCommon.SeqNum,
// From: request.RequestCommon.From,
// To: request.RequestCommon.To,
// Type: request.RequestCommon.Type,
// Number: request.RequestCommon.Number,
// }
// out, _ := json.Marshal(ret)
// return out
// }
_, err = checkRegisterCommon(request.RegisteredUsers)
if err != nil {
return errors.ImplementErr(errors.UserInfoErr, request.RequestCommon, err.Error())
}
s := sdk.NewRCServer()
token, err := s.GetTokenFromUser(request.RegisteredUsers.Id, request.RegisteredUsers.Name, request.RegisteredUsers.Portrait)
if err != nil {
return errors.ImplementErr(errors.UserTokenErr, request.RequestCommon, err.Error())
}
v := ResponseRegisteredUsers{
utils.ResponseCommon{
Version: utils.Version,
SeqNum: request.RequestCommon.SeqNum,
From: request.RequestCommon.From,
To: request.RequestCommon.To,
Type: request.RequestCommon.Type,
Number: request.RequestCommon.Number,
Code: 0,
Message: "success",
},
utils.TOKEN{
Token: token,
},
}
out, err := json.Marshal(v)
if err != nil {
}
return out
}
// Processing create session
func CreateSessionHandler(ctx *context.Context) []byte {
// Parse request body to json
var request RequestCreateSession
err := json.Unmarshal(ctx.Input.RequestBody, &request)
if err != nil {
log.Println("Error: ", err.Error())
return errors.ParseJsonFailed()
}
// Check request common param
com := errors.IsCommonErr(request.RequestCommon)
if com.Code != 0 {
outerr, _ := json.Marshal(com)
return outerr
}
// Get session id by uri
sid := ctx.Input.Param(":sid")
err = errors.VerifyUid(sid)
if err != nil {
ret := utils.ResponseCommon{
Code: errors.UidErr,
Message: err.Error(),
Version: request.RequestCommon.Version,
SeqNum: request.RequestCommon.SeqNum,
From: request.RequestCommon.From,
To: request.RequestCommon.To,
Type: request.RequestCommon.Type,
Number: request.RequestCommon.Number,
}
out, _ := json.Marshal(ret)
return out
}
s := sdk.NewRCServer()
err = s.CreateChatRoom(sid, request.Name)
if err != nil {
return errors.ImplementErr(errors.CreateSessionErr, request.RequestCommon, err.Error())
}
v := ResponseCreateSession{
utils.ResponseCommon{
Version: utils.Version,
SeqNum: request.RequestCommon.SeqNum,
From: request.RequestCommon.From,
To: request.RequestCommon.To,
Type: request.RequestCommon.Type,
Number: request.RequestCommon.Number,
Message: "success",
Code: 0,
},
utils.ID{
Id: sid,
},
}
out, err := json.Marshal(v)
if err != nil {
}
return out
}
// Processing delete session
func DeleteSessionHandler(ctx *context.Context) []byte {
// Parse request body to json
var request RequestDelSession
err := json.Unmarshal(ctx.Input.RequestBody, &request)
if err != nil {
log.Println("Error: ", err.Error())
return errors.ParseJsonFailed()
}
// Check request common params
com := errors.IsCommonErr(request.RequestCommon)
if com.Code != 0 {
outerr, _ := json.Marshal(com)
return outerr
}
// Get session id by request params
ids := make([]string, request.Size)
for i, v := range request.List {
ids[i] = v.Id
}
s := sdk.NewRCServer()
err = s.DeleteChatRoom(ids)
if err != nil {
return errors.ImplementErr(errors.DeleteSessionErr, request.RequestCommon, err.Error())
}
v := ResponseDelSession{
utils.ResponseCommon{
Version: utils.Version,
SeqNum: request.RequestCommon.SeqNum,
From: request.RequestCommon.From,
To: request.RequestCommon.To,
Type: request.RequestCommon.Type,
Number: request.RequestCommon.Number,
Message: "success",
Code: 0,
},
}
out, err := json.Marshal(v)
if err != nil {
}
return out
}
// Processing delete session by session id
func DeleteSessionByIDHandler(ctx *context.Context) []byte {
// Parse request body to json
var request RequestDelSession
err := json.Unmarshal(ctx.Input.RequestBody, &request)
if err != nil {
log.Println("Error: ", err.Error())
return errors.ParseJsonFailed()
}
// Check request common
com := errors.IsCommonErr(request.RequestCommon)
if com.Code != 0 {
outerr, _ := json.Marshal(com)
return outerr
}
sid := ctx.Input.Param(":sid")
s := sdk.NewRCServer()
err = s.DeleteChatRoom([]string{sid})
if err != nil {
return errors.ImplementErr(errors.DeleteSessionErr, request.RequestCommon, err.Error())
}
v := utils.ResponseCommon{
Version: utils.Version,
SeqNum: request.RequestCommon.SeqNum,
From: request.RequestCommon.From,
To: request.RequestCommon.To,
Type: request.RequestCommon.Type,
Number: request.RequestCommon.Number,
Message: "success",
Code: 0,
}
out, err := json.Marshal(v)
if err != nil {
}
return out
}
// Processing delete session by user id (deprecated)
func DeleteSessionByUIDHandler(ctx *context.Context) []byte {
var request RequestDelSession
err := json.Unmarshal(ctx.Input.RequestBody, &request)
if err != nil {
log.Println("Error: ", err.Error())
return errors.ParseJsonFailed()
}
com := errors.IsCommonErr(request.RequestCommon)
if com.Code != 0 {
outerr, _ := json.Marshal(com)
return outerr
}
// s := sdk.NewRCServer()
v := ResponseDelSession{
utils.ResponseCommon{
Version: "V1.0",
},
}
out, err := json.Marshal(v)
if err != nil {
}
return out
}
// Processing get session (deprecated)
func GetSessionHandler(ctx *context.Context) []byte {
v := ResponseGetSession{
utils.ResponseCommon{
Version: "V1.0",
},
GetSession{},
}
// s := sdk.NewRCServer()
// s.GetAllChatRoom()
out, err := json.Marshal(v)
if err != nil {
}
return out
}
// Processing get session infomation by session id
func GetSessionByIDHandler(ctx *context.Context) []byte {
sid := ctx.Input.Param(":sid")
s := sdk.NewRCServer()
info, err := s.GetChatRoomById(sid)
if err != nil {
return errors.ImplementErr(errors.DeleteSessionErr, utils.RequestCommon{}, err.Error())
}
v := ResponseGetSession{
utils.ResponseCommon{
Version: utils.Version,
SeqNum: 1,
From: "",
To: "",
Type: "",
Number: "",
Code: 0,
},
GetSession{
List: []utils.RoomInfo{
utils.RoomInfo{
Id: info.Id,
Name: info.Name,
CreateTime: info.Time,
},
},
Size: 1,
},
}
out, err := json.Marshal(v)
if err != nil {
}
return out
}
// Processing get session by user id
func GetSessionByUIDHandler(ctx *context.Context) []byte {
v := ResponseGetSession{
utils.ResponseCommon{
Version: utils.Version,
SeqNum: 1,
From: "",
To: "",
Type: "",
Number: "",
Code: 0,
},
GetSession{},
}
out, err := json.Marshal(v)
if err != nil {
}
return out
}
// Processing get users by session id
func GetUsersBySessionIDHandler(ctx *context.Context) []byte {
uid := ctx.Input.Param(":sid")
s := sdk.NewRCServer()
users, err := s.GetUsersByRoomId(uid)
if err != nil {
return errors.ImplementErr(errors.DeleteSessionErr, utils.RequestCommon{}, err.Error())
}
var userIds []utils.ID
for i, v := range users {
userIds[i].Id = v
}
v := ResponseGetSessionUsers{
utils.ResponseCommon{
Version: utils.Version,
SeqNum: 1,
From: "",
To: "",
Type: "",
Number: "",
Code: 0,
},
GetSessionUsers{
List: userIds,
Size: len(users),
},
}
out, err := json.Marshal(v)
if err != nil {
log.Println("GetUsersBySessionId Error: ", err.Error())
}
return out
}
func PutSessionByUIDHandler(ctx *context.Context) []byte {
var request RequestJoinSession
err := json.Unmarshal(ctx.Input.RequestBody, &request)
if err != nil {
log.Println("Error: ", err.Error())
return errors.ParseJsonFailed()
}
com := errors.IsCommonErr(request.RequestCommon)
if com.Code != 0 {
outerr, _ := json.Marshal(com)
return outerr
}
uid := ctx.Input.Param(":uid")
sid := ctx.Input.Param(":sid")
s := sdk.NewRCServer()
s.JoinRoomByUserId(uid, sid)
v := ResponseJoinSession{
utils.ResponseCommon{
Version: "V1.0",
},
}
out, err := json.Marshal(v)
if err != nil {
}
return out
}
func PostMessageToUserByIDHandler(ctx *context.Context) []byte {
var request RequestSendMessage
err := json.Unmarshal(ctx.Input.RequestBody, &request)
if err != nil {
log.Println("Error: ", err.Error())
return errors.ParseJsonFailed()
}
com := errors.IsCommonErr(request.RequestCommon)
if com.Code != 0 {
outerr, _ := json.Marshal(com)
return outerr
}
sourceId := request.Uid
targetId := ctx.Input.Param(":uid")
content := request.Content
log.Println("source id: ", sourceId)
log.Println("target id: ", targetId)
log.Println("content is: ", content)
s := sdk.NewRCServer()
s.SendMsgUserToUsers(sourceId, targetId, content)
v := ResponseSendMessage{
utils.ResponseCommon{
Version: utils.Version,
},
}
out, err := json.Marshal(v)
if err != nil {
}
return out
}
func PostMessageToSessionByIDHandler(ctx *context.Context) []byte {
var request RequestSendMessage
err := json.Unmarshal(ctx.Input.RequestBody, &request)
if err != nil {
log.Println("Error: ", err.Error())
return errors.ParseJsonFailed()
}
com := errors.IsCommonErr(request.RequestCommon)
if com.Code != 0 {
outerr, _ := json.Marshal(com)
return outerr
}
sourceId := request.Uid
targetId := ctx.Input.Param(":sid")
content := request.Content
s := sdk.NewRCServer()
s.SendMsgUserToSession(sourceId, targetId, content)
v := ResponseSendMessage{
utils.ResponseCommon{
Version: utils.Version,
},
}
out, err := json.Marshal(v)
if err != nil {
}
return out
}
func ReqMsgHandler(ctx *context.Context) []byte {
var req ReqMsg
err := json.Unmarshal(ctx.Input.RequestBody, &req)
if err != nil {
log.Println("Error: ", err.Error())
return errors.ParseJsonFailed()
}
com := errors.IsCommonErr(req.RequestCommon)
if com.Code != 0 {
outerr, _ := json.Marshal(com)
return outerr
}
s := sdk.NewRCServer()
switch req.Data.Type {
case "private":
s.SendMsgPrivate(req.Data.SourceId, req.Data.TargetId, req.Data.Content)
case "group":
s.SendMsgGroup(req.Data.SourceId, req.Data.TargetId, req.Data.Content)
case "chatroom":
s.SendMsgRoom(req.Data.SourceId, req.Data.TargetId, req.Data.Content)
default:
return errors.ReqTypeErr(req.RequestCommon)
}
v := utils.ResponseCommon{
Version: utils.Version,
SeqNum: req.SeqNum,
From: req.To,
To: req.From,
Type: req.Type,
Number: req.Number,
Message: "success",
Code: 0,
}
out, _ := json.Marshal(v)
return out
}
|
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"encoding/json"
"fmt"
"io"
"strings"
"unsafe"
"github.com/cznic/mathutil"
"github.com/pingcap/tidb/parser/charset"
"github.com/pingcap/tidb/parser/format"
"github.com/pingcap/tidb/parser/mysql"
)
// UnspecifiedLength is unspecified length.
const (
UnspecifiedLength = -1
)
// TiDBStrictIntegerDisplayWidth represent whether return warnings when integerType with (length) was parsed.
// The default is `false`, it will be parsed as warning, and the result in show-create-table will ignore the
// display length when it set to `true`. This is for compatibility with MySQL 8.0 in which integer max display
// length is deprecated, referring this issue #6688 for more details.
var (
TiDBStrictIntegerDisplayWidth bool
)
// FieldType records field type information.
type FieldType struct {
// tp is type of the field
tp byte
// flag represent NotNull, Unsigned, PriKey flags etc.
flag uint
// flen represent size of bytes of the field
flen int
// decimal represent decimal length of the field
decimal int
// charset represent character set
charset string
// collate represent collate rules of the charset
collate string
// elems is the element list for enum and set type.
elems []string
elemsIsBinaryLit []bool
array bool
// Please keep in mind that jsonFieldType should be updated if you add a new field here.
}
// NewFieldType returns a FieldType,
// with a type and other information about field type.
func NewFieldType(tp byte) *FieldType {
return &FieldType{
tp: tp,
flen: UnspecifiedLength,
decimal: UnspecifiedLength,
}
}
// IsDecimalValid checks whether the decimal is valid.
func (ft *FieldType) IsDecimalValid() bool {
if ft.GetType() == mysql.TypeNewDecimal && (ft.decimal < 0 || ft.decimal > mysql.MaxDecimalScale || ft.flen <= 0 || ft.flen > mysql.MaxDecimalWidth || ft.flen < ft.decimal) {
return false
}
return true
}
// IsVarLengthType Determine whether the column type is a variable-length type
func (ft *FieldType) IsVarLengthType() bool {
switch ft.GetType() {
case mysql.TypeVarchar, mysql.TypeVarString, mysql.TypeJSON, mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob:
return true
default:
return false
}
}
// GetType returns the type of the FieldType.
func (ft *FieldType) GetType() byte {
if ft.array {
return mysql.TypeJSON
}
return ft.tp
}
// GetFlag returns the flag of the FieldType.
func (ft *FieldType) GetFlag() uint {
return ft.flag
}
// GetFlen returns the length of the field.
func (ft *FieldType) GetFlen() int {
return ft.flen
}
// GetDecimal returns the decimal of the FieldType.
func (ft *FieldType) GetDecimal() int {
return ft.decimal
}
// GetCharset returns the field's charset
func (ft *FieldType) GetCharset() string {
return ft.charset
}
// GetCollate returns the collation of the field.
func (ft *FieldType) GetCollate() string {
return ft.collate
}
// GetElems returns the elements of the FieldType.
func (ft *FieldType) GetElems() []string {
return ft.elems
}
// SetType sets the type of the FieldType.
func (ft *FieldType) SetType(tp byte) {
ft.tp = tp
ft.array = false
}
// SetFlag sets the flag of the FieldType.
func (ft *FieldType) SetFlag(flag uint) {
ft.flag = flag
}
// AddFlag adds a flag to the FieldType.
func (ft *FieldType) AddFlag(flag uint) {
ft.flag |= flag
}
// AndFlag and the flag of the FieldType.
func (ft *FieldType) AndFlag(flag uint) {
ft.flag &= flag
}
// ToggleFlag toggle the flag of the FieldType.
func (ft *FieldType) ToggleFlag(flag uint) {
ft.flag ^= flag
}
// DelFlag delete the flag of the FieldType.
func (ft *FieldType) DelFlag(flag uint) {
ft.flag &= ^flag
}
// SetFlen sets the length of the field.
func (ft *FieldType) SetFlen(flen int) {
ft.flen = flen
}
// SetFlenUnderLimit sets the length of the field to the value of the argument
func (ft *FieldType) SetFlenUnderLimit(flen int) {
if ft.GetType() == mysql.TypeNewDecimal {
ft.flen = mathutil.Min(flen, mysql.MaxDecimalWidth)
} else {
ft.flen = flen
}
}
// SetDecimal sets the decimal of the FieldType.
func (ft *FieldType) SetDecimal(decimal int) {
ft.decimal = decimal
}
// SetDecimalUnderLimit sets the decimal of the field to the value of the argument
func (ft *FieldType) SetDecimalUnderLimit(decimal int) {
if ft.GetType() == mysql.TypeNewDecimal {
ft.decimal = mathutil.Min(decimal, mysql.MaxDecimalScale)
} else {
ft.decimal = decimal
}
}
// UpdateFlenAndDecimalUnderLimit updates the length and decimal to the value of the argument
func (ft *FieldType) UpdateFlenAndDecimalUnderLimit(old *FieldType, deltaDecimal int, deltaFlen int) {
if ft.GetType() != mysql.TypeNewDecimal {
return
}
if old.decimal < 0 {
deltaFlen += mysql.MaxDecimalScale
ft.decimal = mysql.MaxDecimalScale
} else {
ft.SetDecimal(old.decimal + deltaDecimal)
}
if old.flen < 0 {
ft.flen = mysql.MaxDecimalWidth
} else {
ft.SetFlenUnderLimit(old.flen + deltaFlen)
}
}
// SetCharset sets the charset of the FieldType.
func (ft *FieldType) SetCharset(charset string) {
ft.charset = charset
}
// SetCollate sets the collation of the FieldType.
func (ft *FieldType) SetCollate(collate string) {
ft.collate = collate
}
// SetElems sets the elements of the FieldType.
func (ft *FieldType) SetElems(elems []string) {
ft.elems = elems
}
// SetElem sets the element of the FieldType.
func (ft *FieldType) SetElem(idx int, element string) {
ft.elems[idx] = element
}
// SetArray sets the array field of the FieldType.
func (ft *FieldType) SetArray(array bool) {
ft.array = array
}
// IsArray return true if the filed type is array.
func (ft *FieldType) IsArray() bool {
return ft.array
}
// ArrayType return the type of the array.
func (ft *FieldType) ArrayType() *FieldType {
if !ft.array {
return ft
}
clone := ft.Clone()
clone.SetArray(false)
return clone
}
// SetElemWithIsBinaryLit sets the element of the FieldType.
func (ft *FieldType) SetElemWithIsBinaryLit(idx int, element string, isBinaryLit bool) {
ft.elems[idx] = element
if isBinaryLit {
// Create the binary literal flags lazily.
if ft.elemsIsBinaryLit == nil {
ft.elemsIsBinaryLit = make([]bool, len(ft.elems))
}
ft.elemsIsBinaryLit[idx] = true
}
}
// GetElem returns the element of the FieldType.
func (ft *FieldType) GetElem(idx int) string {
return ft.elems[idx]
}
// GetElemIsBinaryLit returns the binary literal flag of the element at index idx.
func (ft *FieldType) GetElemIsBinaryLit(idx int) bool {
if len(ft.elemsIsBinaryLit) == 0 {
return false
}
return ft.elemsIsBinaryLit[idx]
}
// CleanElemIsBinaryLit cleans the binary literal flag of the element at index idx.
func (ft *FieldType) CleanElemIsBinaryLit() {
if ft != nil && ft.elemsIsBinaryLit != nil {
ft.elemsIsBinaryLit = nil
}
}
// Clone returns a copy of itself.
func (ft *FieldType) Clone() *FieldType {
ret := *ft
return &ret
}
// Equal checks whether two FieldType objects are equal.
func (ft *FieldType) Equal(other *FieldType) bool {
// We do not need to compare whole `ft.flag == other.flag` when wrapping cast upon an Expression.
// but need compare unsigned_flag of ft.flag.
// When tp is float or double with decimal unspecified, do not check whether flen is equal,
// because flen for them is useless.
// The decimal field can be ignored if the type is int or string.
tpEqual := (ft.GetType() == other.GetType()) || (ft.GetType() == mysql.TypeVarchar && other.GetType() == mysql.TypeVarString) || (ft.GetType() == mysql.TypeVarString && other.GetType() == mysql.TypeVarchar)
flenEqual := ft.flen == other.flen || (ft.EvalType() == ETReal && ft.decimal == UnspecifiedLength)
ignoreDecimal := ft.EvalType() == ETInt || ft.EvalType() == ETString
partialEqual := tpEqual &&
(ignoreDecimal || ft.decimal == other.decimal) &&
ft.charset == other.charset &&
ft.collate == other.collate &&
flenEqual &&
mysql.HasUnsignedFlag(ft.flag) == mysql.HasUnsignedFlag(other.flag)
if !partialEqual || len(ft.elems) != len(other.elems) {
return false
}
for i := range ft.elems {
if ft.elems[i] != other.elems[i] {
return false
}
}
return true
}
// PartialEqual checks whether two FieldType objects are equal.
// If unsafe is true and the objects is string type, PartialEqual will ignore flen.
// See https://github.com/pingcap/tidb/issues/35490#issuecomment-1211658886 for more detail.
func (ft *FieldType) PartialEqual(other *FieldType, unsafe bool) bool {
if !unsafe || ft.EvalType() != ETString || other.EvalType() != ETString {
return ft.Equal(other)
}
partialEqual := ft.charset == other.charset && ft.collate == other.collate && mysql.HasUnsignedFlag(ft.flag) == mysql.HasUnsignedFlag(other.flag)
if !partialEqual || len(ft.elems) != len(other.elems) {
return false
}
for i := range ft.elems {
if ft.elems[i] != other.elems[i] {
return false
}
}
return true
}
// EvalType gets the type in evaluation.
func (ft *FieldType) EvalType() EvalType {
switch ft.GetType() {
case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong,
mysql.TypeBit, mysql.TypeYear:
return ETInt
case mysql.TypeFloat, mysql.TypeDouble:
return ETReal
case mysql.TypeNewDecimal:
return ETDecimal
case mysql.TypeDate, mysql.TypeDatetime:
return ETDatetime
case mysql.TypeTimestamp:
return ETTimestamp
case mysql.TypeDuration:
return ETDuration
case mysql.TypeJSON:
return ETJson
case mysql.TypeEnum, mysql.TypeSet:
if ft.flag&mysql.EnumSetAsIntFlag > 0 {
return ETInt
}
}
return ETString
}
// Hybrid checks whether a type is a hybrid type, which can represent different types of value in specific context.
func (ft *FieldType) Hybrid() bool {
return ft.GetType() == mysql.TypeEnum || ft.GetType() == mysql.TypeBit || ft.GetType() == mysql.TypeSet
}
// Init initializes the FieldType data.
func (ft *FieldType) Init(tp byte) {
ft.tp = tp
ft.flen = UnspecifiedLength
ft.decimal = UnspecifiedLength
}
// CompactStr only considers tp/CharsetBin/flen/Deimal.
// This is used for showing column type in infoschema.
func (ft *FieldType) CompactStr() string {
ts := TypeToStr(ft.GetType(), ft.charset)
suffix := ""
defaultFlen, defaultDecimal := mysql.GetDefaultFieldLengthAndDecimal(ft.GetType())
isDecimalNotDefault := ft.decimal != defaultDecimal && ft.decimal != 0 && ft.decimal != UnspecifiedLength
// displayFlen and displayDecimal are flen and decimal values with `-1` substituted with default value.
displayFlen, displayDecimal := ft.flen, ft.decimal
if displayFlen == UnspecifiedLength {
displayFlen = defaultFlen
}
if displayDecimal == UnspecifiedLength {
displayDecimal = defaultDecimal
}
switch ft.GetType() {
case mysql.TypeEnum, mysql.TypeSet:
// Format is ENUM ('e1', 'e2') or SET ('e1', 'e2')
es := make([]string, 0, len(ft.elems))
for _, e := range ft.elems {
e = format.OutputFormat(e)
es = append(es, e)
}
suffix = fmt.Sprintf("('%s')", strings.Join(es, "','"))
case mysql.TypeTimestamp, mysql.TypeDatetime, mysql.TypeDuration:
if isDecimalNotDefault {
suffix = fmt.Sprintf("(%d)", displayDecimal)
}
case mysql.TypeDouble, mysql.TypeFloat:
// 1. flen Not Default, decimal Not Default -> Valid
// 2. flen Not Default, decimal Default (-1) -> Invalid
// 3. flen Default, decimal Not Default -> Valid
// 4. flen Default, decimal Default -> Valid (hide)
if isDecimalNotDefault {
suffix = fmt.Sprintf("(%d,%d)", displayFlen, displayDecimal)
}
case mysql.TypeNewDecimal:
suffix = fmt.Sprintf("(%d,%d)", displayFlen, displayDecimal)
case mysql.TypeBit, mysql.TypeVarchar, mysql.TypeString, mysql.TypeVarString:
suffix = fmt.Sprintf("(%d)", displayFlen)
case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong:
// Referring this issue #6688, the integer max display length is deprecated in MySQL 8.0.
// Since the length doesn't take any effect in TiDB storage or showing result, we remove it here.
if !TiDBStrictIntegerDisplayWidth {
suffix = fmt.Sprintf("(%d)", displayFlen)
}
case mysql.TypeYear:
suffix = fmt.Sprintf("(%d)", ft.flen)
case mysql.TypeNull:
suffix = "(0)"
}
return ts + suffix
}
// InfoSchemaStr joins the CompactStr with unsigned flag and
// returns a string.
func (ft *FieldType) InfoSchemaStr() string {
suffix := ""
if mysql.HasUnsignedFlag(ft.flag) &&
ft.GetType() != mysql.TypeBit &&
ft.GetType() != mysql.TypeYear {
suffix = " unsigned"
}
return ft.CompactStr() + suffix
}
// String joins the information of FieldType and returns a string.
// Note: when flen or decimal is unspecified, this function will use the default value instead of -1.
func (ft *FieldType) String() string {
strs := []string{ft.CompactStr()}
if mysql.HasUnsignedFlag(ft.flag) {
strs = append(strs, "UNSIGNED")
}
if mysql.HasZerofillFlag(ft.flag) {
strs = append(strs, "ZEROFILL")
}
if mysql.HasBinaryFlag(ft.flag) && ft.GetType() != mysql.TypeString {
strs = append(strs, "BINARY")
}
if IsTypeChar(ft.GetType()) || IsTypeBlob(ft.GetType()) {
if ft.charset != "" && ft.charset != charset.CharsetBin {
strs = append(strs, fmt.Sprintf("CHARACTER SET %s", ft.charset))
}
if ft.collate != "" && ft.collate != charset.CharsetBin {
strs = append(strs, fmt.Sprintf("COLLATE %s", ft.collate))
}
}
return strings.Join(strs, " ")
}
// Restore implements Node interface.
func (ft *FieldType) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord(TypeToStr(ft.GetType(), ft.charset))
precision := UnspecifiedLength
scale := UnspecifiedLength
switch ft.GetType() {
case mysql.TypeEnum, mysql.TypeSet:
ctx.WritePlain("(")
for i, e := range ft.elems {
if i != 0 {
ctx.WritePlain(",")
}
ctx.WriteString(e)
}
ctx.WritePlain(")")
case mysql.TypeTimestamp, mysql.TypeDatetime, mysql.TypeDuration:
precision = ft.decimal
case mysql.TypeUnspecified, mysql.TypeFloat, mysql.TypeDouble, mysql.TypeNewDecimal:
precision = ft.flen
scale = ft.decimal
default:
precision = ft.flen
}
if precision != UnspecifiedLength {
ctx.WritePlainf("(%d", precision)
if scale != UnspecifiedLength {
ctx.WritePlainf(",%d", scale)
}
ctx.WritePlain(")")
}
if mysql.HasUnsignedFlag(ft.flag) {
ctx.WriteKeyWord(" UNSIGNED")
}
if mysql.HasZerofillFlag(ft.flag) {
ctx.WriteKeyWord(" ZEROFILL")
}
if mysql.HasBinaryFlag(ft.flag) && ft.charset != charset.CharsetBin {
ctx.WriteKeyWord(" BINARY")
}
if IsTypeChar(ft.GetType()) || IsTypeBlob(ft.GetType()) {
if ft.charset != "" && ft.charset != charset.CharsetBin {
ctx.WriteKeyWord(" CHARACTER SET " + ft.charset)
}
if ft.collate != "" && ft.collate != charset.CharsetBin {
ctx.WriteKeyWord(" COLLATE ")
ctx.WritePlain(ft.collate)
}
}
return nil
}
// RestoreAsCastType is used for write AST back to string.
func (ft *FieldType) RestoreAsCastType(ctx *format.RestoreCtx, explicitCharset bool) {
switch ft.tp {
case mysql.TypeVarString, mysql.TypeString:
skipWriteBinary := false
if ft.charset == charset.CharsetBin && ft.collate == charset.CollationBin {
ctx.WriteKeyWord("BINARY")
skipWriteBinary = true
} else {
ctx.WriteKeyWord("CHAR")
}
if ft.flen != UnspecifiedLength {
ctx.WritePlainf("(%d)", ft.flen)
}
if !explicitCharset {
break
}
if !skipWriteBinary && ft.flag&mysql.BinaryFlag != 0 {
ctx.WriteKeyWord(" BINARY")
}
if ft.charset != charset.CharsetBin && ft.charset != mysql.DefaultCharset {
ctx.WriteKeyWord(" CHARSET ")
ctx.WriteKeyWord(ft.charset)
}
case mysql.TypeDate:
ctx.WriteKeyWord("DATE")
case mysql.TypeDatetime:
ctx.WriteKeyWord("DATETIME")
if ft.decimal > 0 {
ctx.WritePlainf("(%d)", ft.decimal)
}
case mysql.TypeNewDecimal:
ctx.WriteKeyWord("DECIMAL")
if ft.flen > 0 && ft.decimal > 0 {
ctx.WritePlainf("(%d, %d)", ft.flen, ft.decimal)
} else if ft.flen > 0 {
ctx.WritePlainf("(%d)", ft.flen)
}
case mysql.TypeDuration:
ctx.WriteKeyWord("TIME")
if ft.decimal > 0 {
ctx.WritePlainf("(%d)", ft.decimal)
}
case mysql.TypeLonglong:
if ft.flag&mysql.UnsignedFlag != 0 {
ctx.WriteKeyWord("UNSIGNED")
} else {
ctx.WriteKeyWord("SIGNED")
}
case mysql.TypeJSON:
ctx.WriteKeyWord("JSON")
case mysql.TypeDouble:
ctx.WriteKeyWord("DOUBLE")
case mysql.TypeFloat:
ctx.WriteKeyWord("FLOAT")
case mysql.TypeYear:
ctx.WriteKeyWord("YEAR")
}
if ft.array {
ctx.WritePlain(" ")
ctx.WriteKeyWord("ARRAY")
}
}
// FormatAsCastType is used for write AST back to string.
func (ft *FieldType) FormatAsCastType(w io.Writer, explicitCharset bool) {
var sb strings.Builder
restoreCtx := format.NewRestoreCtx(format.DefaultRestoreFlags, &sb)
ft.RestoreAsCastType(restoreCtx, explicitCharset)
fmt.Fprint(w, sb.String())
}
// VarStorageLen indicates this column is a variable length column.
const VarStorageLen = -1
// StorageLength is the length of stored value for the type.
func (ft *FieldType) StorageLength() int {
switch ft.GetType() {
case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong,
mysql.TypeLonglong, mysql.TypeDouble, mysql.TypeFloat, mysql.TypeYear, mysql.TypeDuration,
mysql.TypeDate, mysql.TypeDatetime, mysql.TypeTimestamp, mysql.TypeEnum, mysql.TypeSet,
mysql.TypeBit:
// This may not be the accurate length, because we may encode them as varint.
return 8
case mysql.TypeNewDecimal:
precision, frac := ft.flen-ft.decimal, ft.decimal
return precision/digitsPerWord*wordSize + dig2bytes[precision%digitsPerWord] + frac/digitsPerWord*wordSize + dig2bytes[frac%digitsPerWord]
default:
return VarStorageLen
}
}
// HasCharset indicates if a COLUMN has an associated charset. Returning false here prevents some information
// statements(like `SHOW CREATE TABLE`) from attaching a CHARACTER SET clause to the column.
func HasCharset(ft *FieldType) bool {
switch ft.GetType() {
case mysql.TypeVarchar, mysql.TypeString, mysql.TypeVarString, mysql.TypeBlob,
mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob:
return !mysql.HasBinaryFlag(ft.flag)
case mysql.TypeEnum, mysql.TypeSet:
return true
}
return false
}
// for json
type jsonFieldType struct {
Tp byte
Flag uint
Flen int
Decimal int
Charset string
Collate string
Elems []string
ElemsIsBinaryLit []bool
Array bool
}
// UnmarshalJSON implements the json.Unmarshaler interface.
func (ft *FieldType) UnmarshalJSON(data []byte) error {
var r jsonFieldType
err := json.Unmarshal(data, &r)
if err == nil {
ft.tp = r.Tp
ft.flag = r.Flag
ft.flen = r.Flen
ft.decimal = r.Decimal
ft.charset = r.Charset
ft.collate = r.Collate
ft.elems = r.Elems
ft.elemsIsBinaryLit = r.ElemsIsBinaryLit
ft.array = r.Array
}
return err
}
// MarshalJSON marshals the FieldType to JSON.
func (ft *FieldType) MarshalJSON() ([]byte, error) {
var r jsonFieldType
r.Tp = ft.tp
r.Flag = ft.flag
r.Flen = ft.flen
r.Decimal = ft.decimal
r.Charset = ft.charset
r.Collate = ft.collate
r.Elems = ft.elems
r.ElemsIsBinaryLit = ft.elemsIsBinaryLit
r.Array = ft.array
return json.Marshal(r)
}
const emptyFieldTypeSize = int64(unsafe.Sizeof(FieldType{}))
// MemoryUsage return the memory usage of FieldType
func (ft *FieldType) MemoryUsage() (sum int64) {
if ft == nil {
return
}
sum = emptyFieldTypeSize + int64(len(ft.charset)+len(ft.collate)) + int64(cap(ft.elems))*int64(unsafe.Sizeof(*new(string))) +
int64(cap(ft.elemsIsBinaryLit))*int64(unsafe.Sizeof(*new(bool)))
for _, s := range ft.elems {
sum += int64(len(s))
}
return
}
|
//go:build integration
// +build integration
package msgbuzz
import (
"os"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestRabbitMqClient_Publish(t *testing.T) {
t.Run("ShouldPublishMessageToTopic", func(t *testing.T) {
// Init
rabbitClient := NewRabbitMqClient(os.Getenv("RABBITMQ_URL"), 1)
testTopicName := "msgbuzz.pubtest"
actualMsgReceivedChan := make(chan []byte)
// -- listen topic to check published message
rabbitClient.On(testTopicName, "msgbuzz", func(confirm MessageConfirm, bytes []byte) error {
actualMsgReceivedChan <- bytes
return confirm.Ack()
})
go rabbitClient.StartConsuming()
defer rabbitClient.Close()
// -- wait for exchange and queue to be created
time.Sleep(1 * time.Second)
// Code under test
sentMessage := []byte("some msg from msgbuzz")
err := rabbitClient.Publish(testTopicName, sentMessage)
// Expectations
// -- ShouldPublishMessageToTopic
require.NoError(t, err)
// -- Should receive correct msg
waitSec := 20
select {
case <-time.After(time.Duration(waitSec) * time.Second):
t.Fatalf("Not receiving msg after %d seconds", waitSec)
case actualMessageReceived := <-actualMsgReceivedChan:
require.Equal(t, sentMessage, actualMessageReceived)
}
})
t.Run("ShouldPublishMessageToTopicWithRoutingKeys", func(t *testing.T) {
// Init
rabbitClient := NewRabbitMqClient(os.Getenv("RABBITMQ_URL"), 1)
testTopicName := "msgbuzz.pubtest.routing"
actualMsgReceivedChan := make(chan []byte)
routingKey := "routing_key"
ch, err := rabbitClient.conn.Channel()
require.NoError(t, err)
defer ch.Close()
// Declare a direct exchange
err = ch.ExchangeDeclare(
testTopicName,
"direct",
true,
false,
false,
false,
nil,
)
require.NoError(t, err)
// Declare queue to the exchange
q, err := ch.QueueDeclare(
testTopicName,
false,
false,
false,
false,
nil,
)
require.NoError(t, err)
// Bind a queue to the exchange with the routing key
err = ch.QueueBind(
q.Name,
routingKey,
testTopicName,
false,
nil,
)
require.NoError(t, err)
// Consume messages from the queue
msgs, err := ch.Consume(
q.Name,
"",
true,
false,
false,
false,
nil,
)
require.NoError(t, err)
// -- listen topic to check published message
go func() {
for msg := range msgs {
actualMsgReceivedChan <- msg.Body
}
}()
defer rabbitClient.Close()
// -- wait for exchange and queue to be created
time.Sleep(3 * time.Second)
// Code under test
sentMessage := []byte("some msg from msgbuzz with routing keys")
err = rabbitClient.Publish(testTopicName, sentMessage, WithRabbitMqRoutingKey(routingKey))
// Expectations
// -- ShouldPublishMessageToTopic
require.NoError(t, err)
// -- Should receive correct msg
waitSec := 20
select {
case <-time.After(time.Duration(waitSec) * time.Second):
t.Fatalf("Not receiving msg after %d seconds", waitSec)
case actualMessageReceived := <-actualMsgReceivedChan:
require.Equal(t, sentMessage, actualMessageReceived)
}
})
t.Run("ShouldReconnectAndPublishToTopic_WhenDisconnectedFromRabbitMqServer", func(t *testing.T) {
// Init
err := StartRabbitMqServer()
require.NoError(t, err)
rabbitClient := NewRabbitMqClient(os.Getenv("RABBITMQ_URL"), 1)
rabbitClient.SetRcStepTime(1)
topicName := "msgbuzz.reconnect.test"
consumerName := "msgbuzz"
actualMsgSent := make(chan bool)
// Code under test
rabbitClient.On(topicName, consumerName, func(confirm MessageConfirm, bytes []byte) error {
t.Logf("Receive message from topic %s", topicName)
actualMsgSent <- true
return confirm.Ack()
})
go rabbitClient.StartConsuming()
defer rabbitClient.Close()
// wait for exchange and queue to be created
time.Sleep(500 * time.Millisecond)
// restart RabbitMQ dummy server
err = RestartRabbitMqServer()
require.NoError(t, err)
err = rabbitClient.Publish(topicName, []byte("Hi from msgbuzz"))
// Expectations
// -- Should publish message
require.NoError(t, err)
// -- Should receive message
waitSec := 20
select {
case <-time.After(time.Duration(waitSec) * time.Second):
t.Fatalf("Not receiving message after %d seconds", waitSec)
case msgSent := <-actualMsgSent:
require.True(t, msgSent)
}
})
}
|
package handlers
import (
"github.com/olivetree123/coco"
"github.com/olivetree123/river/pocket"
)
func AckHandler(c *coco.Coco) coco.Result {
msgID := c.Params.ByName("msgID")
node := pocket.ConsumingPool.Load(msgID)
pocket.GarbageList.Push(node)
return coco.APIResponse(nil)
}
|
package app
import (
"errors"
"github.com/go-chi/chi"
"github.com/go-chi/render"
"net/http"
)
// The list of error types returned from account resource.
var (
ErrUserValidation = errors.New("user validation error")
)
// AccountStore defines database operations for account.
type UserStore interface {
Get(id int) ()
}
// AccountResource implements account management handler.
type UserResource struct {
Store UserStore
}
// NewAccountResource creates and returns an account resource.
func NewUserResource(store UserStore) *UserResource {
return &UserResource{
Store: store,
}
}
func (rs *UserResource) router() *chi.Mux {
r := chi.NewRouter()
r.Get("/", rs.get)
return r
}
func (rs *UserResource) get(w http.ResponseWriter, r *http.Request) {
render.Respond(w, r, "welcome")
}
|
package _12_Integer_to_Roman
func intToRoman(num int) string {
return intToRomanGreedy(num)
}
func intToRomanGreedy(num int) string {
var (
str string
values = []int{1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1}
symbols = []string{"M", "CM", "D", "CD", "C", "XC", "L", "XL", "X", "IX", "V", "IV", "I"}
)
for idx, value := range values {
for value <= num {
num -= value
str += symbols[idx]
}
}
return str
}
|
package commands
import (
"fmt"
"github.com/luckywinds/rshell/options"
"github.com/luckywinds/rshell/pkg/checkers"
"github.com/luckywinds/rshell/pkg/crypt"
"github.com/luckywinds/rshell/types"
"strconv"
"strings"
)
func getGroupAuthbyHostinfo(hostinfo string) (types.Hostgroup, types.Auth, error) {
if hostinfo == "" {
return types.Hostgroup{}, types.Auth{}, fmt.Errorf("hostinfo[%s] empty", hostinfo)
}
authhost := strings.SplitN(hostinfo, "@", 2)
if len(authhost) != 2 {
return types.Hostgroup{}, types.Auth{}, fmt.Errorf("hostinfo[%s] auth wrong", hostinfo)
}
authname := authhost[0]
hostport := strings.SplitN(authhost[1], ":", 2)
host := hostport[0]
port := 22
if len(hostport) == 2 {
var err error
port, err = strconv.Atoi(hostport[1])
if err != nil {
return types.Hostgroup{}, types.Auth{}, fmt.Errorf("hostinfo[%s] port wrong", hostinfo)
}
}
au, err := options.GetAuthByname(authname)
if err != nil {
return types.Hostgroup{}, types.Auth{}, fmt.Errorf("auth[%s] not found", authname)
}
if !checkers.IsIpv4(host) || port < 0 || port > 65535 {
return types.Hostgroup{}, types.Auth{}, fmt.Errorf("hostinfo[%s] host or port wrong", hostinfo)
}
hg := types.Hostgroup{
Groupname: "TEMPHOST",
Authmethod: authname,
Sshport: port,
Hosts: nil,
Groups: nil,
Hostranges: nil,
Ips: []string{host},
}
return hg, au, nil
}
func getGroupAuthbyGroupname(groupname string) (types.Hostgroup, types.Auth, error) {
if groupname == "" {
return types.Hostgroup{}, types.Auth{}, fmt.Errorf("groupname[%s] empty", groupname)
}
hg, err := options.GetHostgroupByname(groupname)
if err != nil {
return types.Hostgroup{}, types.Auth{}, fmt.Errorf("group[%s] not found", groupname)
}
au, err := options.GetAuthByname(hg.Authmethod)
if err != nil {
return types.Hostgroup{}, types.Auth{}, fmt.Errorf("auth[%s] not found", hg.Authmethod)
}
if len(hg.Ips) == 0 {
return types.Hostgroup{}, types.Auth{}, fmt.Errorf("hostgroup[%s] hosts empty", groupname)
}
return hg, au, nil
}
func getPlainPass(pass string, cfg types.Cfg) (string, error) {
text, err := crypt.AesDecrypt(pass, cfg)
if err != nil {
return "", err
}
return text, nil
}
|
package gaodeMap
// get the route
type StructRoute struct {
Status string `json:"status"`
Message string `json:"info"`
Result *StructResult `json:"route"`
}
type StructResult struct {
Origin string `json:"orign"`
Destination string `json:"destination"`
Routes []*StructRoutes `json:"paths"`
}
type StructRoutes struct {
Distance string `json:"distance"`
Duration string `json:"duration"`
Toll string `json:"tolls"`
Steps []*StructSteps `json:"steps"`
}
type StructSteps struct {
Path string `json:"polyline"`
}
|
package main
import (
"fmt"
"sort"
"strings"
"github.com/Nerzal/gocloak/v13"
)
type Username string
// User is the definition of an user in excel state
type User struct {
niveau string
email Username
prenom string
nom string
segment string
fonction string
employeur string
goup string
scope []string
accesGeographique string
boards []string
taskforces []string
}
// Users is the collection of wanted users
type Users map[Username]User
var habilitations = CompositeRoles{
"a": []string{"bdf", "detection", "dgefp", "pge", "score", "urssaf"},
"b": []string{"detection", "dgefp", "pge", "score"},
}
func (user User) getRoles() Roles {
var roles Roles
if user.niveau == "" {
// TODO should return MisconfiguredUserError
}
roles = habilitations[user.niveau]
if strings.EqualFold("a", user.niveau) {
roles.add("urssaf", "dgefp", "bdf")
}
if strings.EqualFold("b", user.niveau) {
roles.add("dgefp")
}
if strings.EqualFold("a", user.niveau) || strings.EqualFold("b", user.niveau) {
roles.add("score", "detection", "pge")
if user.accesGeographique != "" {
roles.add(user.accesGeographique)
}
}
if !(len(user.scope) == 1 && user.scope[0] == "") {
roles.add(user.scope...)
}
return roles
}
// GetUser resolves existing user from its username
func (kc KeycloakContext) GetUser(username Username) (gocloak.User, error) {
for _, u := range kc.Users {
if u != nil && u.Username != nil && strings.EqualFold(*u.Username, string(username)) {
return *u, nil
}
}
return gocloak.User{},
fmt.Errorf(
"l'utilisateur '%s' n'existe pas dans le contexte Keycloak",
username,
)
}
// Compare returns missing, obsoletes, disabled users from kc.Users from []user
func (users Users) Compare(kc KeycloakContext) ([]gocloak.User, []gocloak.User, []gocloak.User, []gocloak.User) {
var missing []User
var enable []gocloak.User
var obsolete []gocloak.User
var current []gocloak.User
for _, u := range users {
kcu, err := kc.GetUser(u.email)
if err != nil {
missing = append(missing, u)
}
if err == nil && !*kcu.Enabled {
enable = append(enable, kcu)
}
}
for _, kcu := range kc.Users {
if _, ok := users[Username(strings.ToLower(*kcu.Username))]; !ok {
if *kcu.Enabled {
obsolete = append(obsolete, *kcu)
}
} else {
current = append(current, *kcu)
}
}
return toGocloakUsers(missing), obsolete, enable, current
}
func toGocloakUsers(users []User) []gocloak.User {
var u []gocloak.User
for _, user := range users {
u = append(u, user.ToGocloakUser())
}
return u
}
// ToGocloakUser creates a new gocloak.User object from User specification
func (user User) ToGocloakUser() gocloak.User {
t := true
attributes := make(map[string][]string)
if user.goup != "" {
attributes["goup_path"] = []string{user.goup}
}
attributes["fonction"] = []string{user.fonction}
attributes["employeur"] = []string{user.employeur}
if user.segment != "" {
attributes["segment"] = []string{user.segment}
}
email := string(user.email)
return gocloak.User{
Username: &email,
Email: &email,
EmailVerified: &t,
FirstName: &user.prenom,
LastName: &user.nom,
Enabled: &t,
Attributes: &attributes,
}
}
func compareAttributes(a *map[string][]string, b *map[string][]string) bool {
if a == nil && b == nil {
return true
}
if a == nil || b == nil {
return false
}
for k := range *b {
if _, ok := (*a)[k]; !ok {
return false
}
}
for k, attribA := range *a {
attribB, ok := (*b)[k]
if !ok {
return false
}
sort.Strings(attribB)
sort.Strings(attribA)
if strings.Join(attribA, "\t") != strings.Join(attribB, "\t") {
return false
}
}
return true
}
|
package app
import (
"github.com/RichardKnop/machinery/v1"
// "github.com/aws/aws-sdk-go/aws/session"
// "github.com/casbin/casbin/v2"
gormadapter "github.com/casbin/gorm-adapter/v2"
"github.com/jinzhu/gorm"
"github.com/qor/media"
"github.com/qor/oss"
log "github.com/sirupsen/logrus"
"github.com/spf13/viper" // json unmarshall primarily
"gitlab.com/reenue/graphql-server/jobs"
)
type App struct {
AWS *session.Session
DB *gorm.DB // DB Connection
E *casbin.Enforcer // Enforcer
Key string // App Key
Mailservice string
Queue *machinery.Server
Storage oss.StorageInterface
Services *Services
}
func New() *App {
db := constructDB()
aws := NewAWS()
a, err := gormadapter.NewAdapterByDB(db)
if err != nil {
log.Error(err)
}
e, err := casbin.NewEnforcer("config/enforcer.conf", a)
if err != nil {
log.Error(err)
}
media.RegisterCallbacks(db)
logger()
services := getServices(db, e)
app := &App{
AWS: aws,
DB: db,
Key: viper.GetString("app_key"),
E: e,
Mailservice: viper.GetString("mail_service"),
Queue: jobs.NewQueue(db, services.Forecast),
Storage: NewStorage(aws),
Services: services,
}
return app
}
|
package handlers
import (
"net/http"
"github.com/cloudfoundry-incubator/notifications/metrics"
"github.com/cloudfoundry-incubator/notifications/models"
"github.com/cloudfoundry-incubator/notifications/web/params"
"github.com/cloudfoundry-incubator/notifications/web/services"
"github.com/dgrijalva/jwt-go"
"github.com/ryanmoran/stack"
)
type RegisterNotifications struct {
registrar services.RegistrarInterface
errorWriter ErrorWriterInterface
}
func NewRegisterNotifications(registrar services.RegistrarInterface, errorWriter ErrorWriterInterface) RegisterNotifications {
return RegisterNotifications{
registrar: registrar,
errorWriter: errorWriter,
}
}
func (handler RegisterNotifications) ServeHTTP(w http.ResponseWriter, req *http.Request, context stack.Context) {
handler.Execute(w, req, models.Database().Connection(), context)
metrics.NewMetric("counter", map[string]interface{}{
"name": "notifications.web.registration",
}).Log()
}
func (handler RegisterNotifications) Execute(w http.ResponseWriter, req *http.Request, connection models.ConnectionInterface, context stack.Context) {
parameters, err := params.NewRegistration(req.Body)
if err != nil {
handler.errorWriter.Write(w, err)
return
}
err = parameters.Validate()
if err != nil {
handler.errorWriter.Write(w, err)
return
}
transaction := connection.Transaction()
transaction.Begin()
token := context.Get("token").(*jwt.Token)
clientID := token.Claims["client_id"].(string)
client := models.Client{
ID: clientID,
Description: parameters.SourceDescription,
}
kinds := []models.Kind{}
for _, kind := range parameters.Kinds {
kind.ClientID = client.ID
kinds = append(kinds, kind)
}
err = handler.registrar.Register(transaction, client, kinds)
if err != nil {
transaction.Rollback()
handler.errorWriter.Write(w, err)
return
}
if parameters.IncludesKinds {
err = handler.registrar.Prune(transaction, client, kinds)
if err != nil {
transaction.Rollback()
handler.errorWriter.Write(w, err)
return
}
}
transaction.Commit()
}
|
package main
import (
"fmt"
"godistributed-rabbitmq/coordinator"
_ "godistributed-rabbitmq/storage"
"log"
)
var dbConsumer *coordinator.StorageConsumer
var webConsumer *coordinator.WebappConsumer
func main() {
log.Println("Starting sensor listener...")
aggregator := coordinator.NewAggregator()
dbConsumer = coordinator.NewStorageConsumer(aggregator)
webConsumer = coordinator.NewWebappConsumer(aggregator)
listener := coordinator.NewListener(aggregator)
defer listener.Stop()
go listener.Start()
var input string
fmt.Scanln(&input)
}
|
package db
import "gorm.io/gorm"
// 단일 로그 저장
type ApiLog struct {
gorm.Model
Api string
Status string
Latency string
Method string
}
// 서비스 호출 횟수 저장
type ApiCount struct {
gorm.Model
Api string
Count uint
Method string
}
|
package galaxy
import (
"../basic"
"math"
"github.com/lucasb-eyer/go-colorful"
)
type star struct {
Current *basic.Point
prev *basic.Point
mass float64
force *basic.Point
number int
}
const VertexCount = 10
func newStar(p *basic.Point, v *basic.Point, mass float64, n int) *star {
return &star{
Current: p,
prev: p.Add(v.Mult(-1)),
mass: mass,
force: basic.Zero(),
number: n,
}
}
func (s *star) accelerate(delta float64) {
next := s.Current.Add(s.Current.Sub(s.prev)).Add(s.force.Mult(delta*delta/s.mass))
next.Z = 0
s.prev = s.Current
s.Current = next
s.force = basic.Zero()
}
func (s *star) color() (float32,float32,float32,float32) {
c := colorful.Hsv(240.0/0.50001*1.0/math.Sqrt(float64(s.mass)), 1.0, 1.0)
return float32(c.R),float32(c.G),float32(c.B), 1.0
//return 1.0, 1.0, float32(1.0/s.mass), 1.0
}
func (s *star) array() []float32 {
array := make([]float32, (VertexCount*3)*7)
for i := 0; i < VertexCount; i++ {
r := 0.01
theta := math.Pi*2.0*float64(i)/ VertexCount
array[(i*3)*7+0], array[(i*3)*7+1], array[(i*3)*7+2] = s.Current.Elements()
array[(i*3)*7+3], array[(i*3)*7+4], array[(i*3)*7+5], array[(i*3)*7+6] = s.color()
array[(i*3+1)*7+0], array[(i*3+1)*7+1], array[(i*3+1)*7+2] = s.Current.Add(
&basic.Point{
X: r*math.Cos(theta),
Y: r*math.Sin(theta),
}).Elements()
array[(i*3+1)*7+3], array[(i*3+1)*7+4], array[(i*3+1)*7+5], array[(i*3+1)*7+6] = s.color()
theta2 := math.Pi*2.0*float64(i+1)/ VertexCount
array[(i*3+2)*7+0], array[(i*3+2)*7+1], array[(i*3+2)*7+2] = s.Current.Add(
&basic.Point{
X: r*math.Cos(theta2),
Y: r*math.Sin(theta2),
}).Elements()
array[(i*3+2)*7+3], array[(i*3+2)*7+4], array[(i*3+2)*7+5], array[(i*3+2)*7+6] = s.color()
}
return array
}
|
package main
import (
"fmt"
"net/http"
)
func index1_handler(w http.ResponseWriter, r *http.Request) {
//we can write anything like html in this
fmt.Fprintf(w, "<h1 align=\"center\" style=\"color:red\">This is web</h1>")
fmt.Fprintf(w, "<p align=\"center\">Making different text appear</p>"+
"<p align=\"center\">Making different text appear</p>"+
"<p align=\"center\">Making different text appear</p>"+
"moving up %s", "<strong>Best</strong>")
}
func main() {
http.HandleFunc("/", index1_handler)
http.ListenAndServe(":8000", nil)
}
|
package auth0
import (
"bytes"
"context"
"errors"
"github.com/jpurdie/authapi"
"github.com/jpurdie/authapi/pkg/utl/redis"
"github.com/segmentio/encoding/json"
"log"
"net/http"
"os"
"strings"
"time"
)
var ctx = context.Background()
var (
ErrUnableToReachAuth0 = errors.New("unable to reach authentication service")
ErrUserAlreadyExists = errors.New("user already exists")
ErrUnableToCreateUser = errors.New("unable to create user")
)
type accessTokenResp struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
IDToken string `json:"id_token"`
TokenType string `json:"token_type"`
Expires string `json:"expires_in"`
}
func FetchAccessToken() (string, error) {
rdb := redis.BuildRedisClient()
accessToken, _ := rdb.Get(ctx, "auth0_access_token").Result()
if accessToken != "" {
log.Println("Access Token is present.")
return accessToken, nil
}
log.Println("Access Token is not present. Going out to Auth0")
domain := os.Getenv("AUTH0_DOMAIN")
clientId := os.Getenv("AUTH0_CLIENT_ID")
clientSecret := os.Getenv("AUTH0_CLIENT_SECRET")
url := "https://" + domain + "/oauth/token"
audience := "https://" + domain + "/api/v2/"
payload := strings.NewReader("{\"client_id\":\"" + clientId + "\",\"client_secret\": \"" + clientSecret + "\",\"audience\":\"" + audience + "\",\"grant_type\":\"client_credentials\"}")
req, _ := http.NewRequest("POST", url, payload)
req.Header.Add("content-type", "application/json")
res, _ := http.DefaultClient.Do(req)
log.Println("HTTP Response Status:", res.StatusCode, http.StatusText(res.StatusCode))
if res.StatusCode != 201 && res.StatusCode != 200 {
return "", errors.New("Unable to get access token")
}
defer res.Body.Close()
var atr accessTokenResp
json.NewDecoder(res.Body).Decode(&atr)
log.Println("Access token length " + string(len(atr.AccessToken)))
if res.Body != nil {
//set the duration time to the expires in. The expires in integer from Auth0 is in seconds
err := rdb.Set(ctx, "auth0_access_token", atr.AccessToken, time.Duration(30)*time.Second).Err()
err = rdb.Set(ctx, "auth0_refresh_token", atr.RefreshToken, time.Duration(30)*time.Second).Err()
err = rdb.Set(ctx, "auth0_id_token", atr.IDToken, time.Duration(30)*time.Second).Err()
err = rdb.Set(ctx, "auth0_access_token_expires_in", atr.Expires, time.Duration(30)*time.Second).Err()
if err != nil {
return "", err
}
}
return atr.AccessToken, nil
}
type appMetaData struct {
}
type createUserReq struct {
Email string `json:"email"`
Blocked bool `json:"blocked"`
EmailVerified bool `json:"email_verified"`
AppMetaData appMetaData `json:"app_metadata"`
GivenName string `json:"given_name"`
FamilyName string `json:"family_name"`
Name string `json:"name"`
Nickname string `json:"nickname"`
Connection string `json:"connection"`
Password string `json:"password"`
VerifyEmail bool `json:"verify_email"`
}
type createUserResp struct {
UserId string `json:"user_id"`
}
func CreateUser(u authapi.User) (string, error) {
log.Println("Inside CreateUser()")
const op = "Auth0.CreateUser"
accessToken, err := FetchAccessToken()
if err != nil {
return "", ErrUnableToReachAuth0
}
a := appMetaData{}
userReq := createUserReq{
Email: u.Email,
Blocked: false,
EmailVerified: false,
AppMetaData: a,
GivenName: u.FirstName,
FamilyName: u.LastName,
Name: u.FirstName + " " + u.LastName,
Nickname: u.FirstName,
Connection: os.Getenv("AUTH0_CONNECTION"),
Password: u.Password,
VerifyEmail: false,
}
timeout := time.Duration(10 * time.Second)
client := http.Client{
Timeout: timeout,
}
log.Println("Inside CreateUser()")
url := "https://" + os.Getenv("AUTH0_DOMAIN") + "/api/v2/users"
b := new(bytes.Buffer)
json.NewEncoder(b).Encode(userReq)
log.Println("Inside CreateUser()")
req, err := http.NewRequest("POST", url, b)
req.Header.Add("content-type", "application/json")
req.Header.Add("Authorization", "Bearer "+accessToken)
if err != nil {
log.Fatal(err)
}
res, err := client.Do(req)
if err != nil {
log.Fatal(err)
}
defer res.Body.Close()
if res.StatusCode == 409 {
return "", &authapi.Error{
Op: op,
Code: authapi.ECONFLICT,
Err: ErrUserAlreadyExists,
}
} else if res.StatusCode != 201 {
return "", &authapi.Error{
Op: op,
Code: authapi.EINTERNAL,
Err: ErrUnableToCreateUser,
}
}
var cur createUserResp
err = json.NewDecoder(res.Body).Decode(&cur)
if err != nil {
log.Fatal(err)
}
return cur.UserId, nil
}
type verEmailReq struct {
ExternalID string `json:"user_id"`
ClientID string `json:"client_id"`
}
type verEmailResp struct {
Status string `json:"status"`
Type string `json:"type"`
Created string `json:"created_at"`
ID string `json:"id"`
}
func SendVerificationEmail(u authapi.User) error {
log.Println("Inside Send Verification")
accessToken, err := FetchAccessToken()
if err != nil {
log.Println(err)
return ErrUnableToReachAuth0
}
verEmailReq := verEmailReq{
ExternalID: u.ExternalID,
ClientID: os.Getenv("AUTH0_CLIENT_ID"),
}
url := "https://" + os.Getenv("AUTH0_DOMAIN") + "/api/v2/jobs/verification-email"
b := new(bytes.Buffer)
json.NewEncoder(b).Encode(verEmailReq)
req, _ := http.NewRequest("POST", url, b)
req.Header.Add("content-type", "application/json")
req.Header.Add("Authorization", "Bearer "+accessToken)
res, _ := http.DefaultClient.Do(req)
defer res.Body.Close()
if res.StatusCode != 201 {
log.Println(res.StatusCode)
return errors.New("Unable to send verification email")
}
var vResp verEmailResp
json.NewDecoder(res.Body).Decode(&vResp)
log.Println("Inside Send Verification")
return nil
}
func DeleteUser(u authapi.User) error {
log.Println("Deleting Auth0 user " + u.ExternalID)
accessToken, err := FetchAccessToken()
if err != nil {
return ErrUnableToReachAuth0
}
url := "https://" + os.Getenv("AUTH0_DOMAIN") + "/api/v2/users/" + u.ExternalID
b := new(bytes.Buffer)
req, _ := http.NewRequest("DELETE", url, b)
req.Header.Add("content-type", "application/json")
req.Header.Add("Authorization", "Bearer "+accessToken)
res, _ := http.DefaultClient.Do(req)
defer res.Body.Close()
if res.StatusCode != 204 {
return errors.New("Unable to delete user from auth0 " + u.ExternalID)
}
var vResp verEmailResp
json.NewDecoder(res.Body).Decode(&vResp)
return nil
}
|
package main
import "fmt"
func main() {
fmt.Println(minNumberInRotateArray([]int{3, 4, 5, 1, 2}))
}
/**
*
* @param rotateArray int整型一维数组
* @return int整型
*/
func minNumberInRotateArray(rotateArray []int) int {
// write code here
if len(rotateArray) == 0 {
return 0
}
min := rotateArray[0]
for i := 0; i < len(rotateArray); i++ {
if rotateArray[i] < min {
min = rotateArray[i]
}
}
return min
}
|
package p
import (
"fmt"
"go/ast"
"go/parser"
"go/token"
"regexp"
"runtime"
"strings"
_ "unsafe"
"github.com/Kretech/xgo/astutil"
)
// VarName 用来获取变量的名字
// VarName(a, b) => []string{"a", "b"}
func VarName(args ...interface{}) []string {
return varNameDepth(1, args...)
}
func varNameDepth(skip int, args ...interface{}) (c []string) {
pc, _, _, _ := runtime.Caller(skip)
userCalledFunc := runtime.FuncForPC(pc) // 用户调用 varName 的函数名
// 用户通过这个方法来获取变量名。
// 直接通过 package 调用可能有几种写法:p.F() alias.F() .F(),我们需要解析 import 来确定
shouldCalledSel := userCalledFunc.Name()[:strings.LastIndex(userCalledFunc.Name(), `.`)]
splitName := strings.Split(userCalledFunc.Name(), "/")
shouldCalledExpr := splitName[len(splitName)-1]
// 粗匹配 dump.(*CliDumper).Dump
// 针对 d:=dumper(); d.Dump() 的情况
if strings.Contains(shouldCalledExpr, ".(") {
// 简单的正则来估算是不是套了一层 struct{}
matched, _ := regexp.MatchString(`\w+\.(.+)\.\w+`, shouldCalledExpr)
if matched {
// 暂时不好判断前缀 d 是不是 dumper 类型,先略过
// 用特殊的 . 前缀表示这个 sel 不处理
shouldCalledSel = ""
shouldCalledExpr = shouldCalledExpr[strings.LastIndex(shouldCalledExpr, "."):]
}
}
//fmt.Println("userCalledFunc =", userCalledFunc.Name())
//fmt.Println("shouldCalledSel =", shouldCalledSel)
//fmt.Println("shouldCalledExpr =", shouldCalledExpr)
_, file, line, _ := runtime.Caller(skip + 1)
//fmt.Printf("%v:%v\n", file, line)
// todo 一行多次调用时,还需根据 runtime 找到 column 一起定位
cacheKey := fmt.Sprintf("%s:%d@%s", file, line, shouldCalledExpr)
return cacheGet(cacheKey, func() interface{} {
r := []string{}
found := false
fset := token.NewFileSet()
f, _ := parser.ParseFile(fset, file, nil, 0)
// import alias
aliasImport := make(map[string]string)
for _, decl := range f.Decls {
decl, ok := decl.(*ast.GenDecl)
if !ok {
continue
}
for _, spec := range decl.Specs {
is, ok := spec.(*ast.ImportSpec)
if !ok {
continue
}
if is.Name != nil && strings.Trim(is.Path.Value, `""`) == shouldCalledSel {
aliasImport[is.Name.Name] = shouldCalledSel
shouldCalledExpr = is.Name.Name + "." + strings.Split(shouldCalledExpr, ".")[1]
shouldCalledExpr = strings.TrimLeft(shouldCalledExpr, `.`)
}
}
}
ast.Inspect(f, func(node ast.Node) (goon bool) {
if found {
return false
}
if node == nil {
return false
}
call, ok := node.(*ast.CallExpr)
if !ok {
return true
}
// 检查是不是调用 argsName 的方法
isArgsNameFunc := func(expr *ast.CallExpr, shouldCallName string) bool {
var equalCall = func(shouldCallName, currentName string) bool {
if shouldCallName[0] == '.' {
return strings.HasSuffix(currentName, shouldCallName)
}
return shouldCallName == currentName
}
if strings.Contains(shouldCallName, ".") {
fn, ok := call.Fun.(*ast.SelectorExpr)
if !ok {
return false
}
// 对于多级访问比如 a.b.c(),fn.X 还是个 SelectorExpr
lf, ok := fn.X.(*ast.Ident)
if !ok {
return false
}
currentName := lf.Name + "." + fn.Sel.Name
return equalCall(shouldCallName, currentName)
} else {
fn, ok := call.Fun.(*ast.Ident)
if !ok {
return false
}
return fn.Name == shouldCallName
}
}
if fset.Position(call.End()).Line != line {
return true
}
if !isArgsNameFunc(call, shouldCalledExpr) {
return true
}
// 拼装每个参数的名字
for _, arg := range call.Args {
name := astutil.ExprString(arg)
r = append(r, name)
}
found = true
return false
})
return r
}).([]string)
}
// Compact 将多个变量打包到一个字典里
// a,b:=1,2 Comapct(a, b) => {"a":1,"b":2}
// 参考自 http://php.net/manual/zh/function.compact.php
func Compact(args ...interface{}) (paramNames []string, paramAndValues map[string]interface{}) {
return DepthCompact(1, args...)
}
//go:linkname DepthCompact github.com/Kretech/xgo/dump.DepthCompact
func DepthCompact(depth int, args ...interface{}) (paramNames []string, paramAndValues map[string]interface{}) {
paramNames = varNameDepth(depth+1, args...)
// because of the variable depth
// len(paramNames) would large than len(args)
// so we put each args to paramNames by reversed order
length := len(args)
paramAndValues = make(map[string]interface{}, length)
for i := 1; i <= length; i++ {
paramAndValues[paramNames[len(paramNames)-i]] = args[len(args)-i]
}
return
}
var m = newRWMap()
func cacheGet(key string, backup func() interface{}) interface{} {
v := m.Get(key)
if v == nil {
v = backup()
m.Set(key, v)
}
return v
}
|
package types
import (
"errors"
"fmt"
"testing"
sptypes "github.com/bluele/interchain-simple-packet/types"
"github.com/cosmos/cosmos-sdk/store"
sdk "github.com/cosmos/cosmos-sdk/types"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
"github.com/cosmos/cosmos-sdk/x/ibc/04-channel/exported"
channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/04-channel/types"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/libs/log"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
dbm "github.com/tendermint/tm-db"
)
func TestRouter(t *testing.T) {
require := require.New(t)
r := New()
r.HandleFunc(
"/srv0",
func(ctx sdk.Context, p PacketI, pd PacketDataI, sender PacketSender) (*sdk.Result, []byte, error) {
return &sdk.Result{}, nil, nil
},
nil,
)
r.HandleFunc(
"/srv1",
func(ctx sdk.Context, p PacketI, pd PacketDataI, sender PacketSender) (*sdk.Result, []byte, error) {
return nil, nil, errors.New("some error")
},
nil,
)
r.HandleFunc(
"/srv3",
func(ctx sdk.Context, p PacketI, pd PacketDataI, sender PacketSender) (*sdk.Result, []byte, error) {
return &sdk.Result{}, []byte("ok"), nil
},
func(ctx sdk.Context, p PacketI, pd PacketDataI, ack []byte, sender PacketSender) (*sdk.Result, error) {
return &sdk.Result{}, nil
},
)
ctx := makeTestContext()
h0 := sptypes.Header{}
SetServiceID(&h0, "/srv0")
_, _, err := r.ServePacket(ctx, channeltypes.Packet{}, sptypes.PacketData{Header: h0}, &mockPacketSender{})
require.NoError(err)
h1 := sptypes.Header{}
SetServiceID(&h1, "/srv1")
_, _, err = r.ServePacket(ctx, channeltypes.Packet{}, sptypes.PacketData{Header: h1}, &mockPacketSender{})
require.Error(err)
// not found handler
h2 := sptypes.Header{}
SetServiceID(&h2, "/srv2")
_, _, err = r.ServePacket(ctx, channeltypes.Packet{}, sptypes.PacketData{Header: h2}, &mockPacketSender{})
require.Error(err)
h3 := sptypes.Header{}
SetServiceID(&h3, "/srv3")
_, ack, err := r.ServePacket(ctx, channeltypes.Packet{}, sptypes.PacketData{Header: h3}, &mockPacketSender{})
require.NoError(err)
_, err = r.ServeACK(ctx, channeltypes.Packet{}, sptypes.PacketData{Header: h3}, ack, &mockPacketSender{})
require.NoError(err)
}
func TestMiddleware(t *testing.T) {
require := require.New(t)
var authMiddleware = func(next PacketHandlerFunc) PacketHandlerFunc {
return func(ctx sdk.Context, p PacketI, pd PacketDataI, sender PacketSender) (*sdk.Result, []byte, error) {
if p.GetSourceChannel() != "root" {
return nil, nil, fmt.Errorf("unexpected channel id '%v'", p.GetSourceChannel())
}
sender = NewSendingPacketHandler(
sender,
func(ctx sdk.Context, channelCap *capabilitytypes.Capability, packet exported.PacketI) (exported.PacketI, error) {
return packet, nil
},
)
return next(ctx, p, pd, sender)
}
}
r := New()
r.UsePacketMiddlewares(authMiddleware)
r.HandleFunc(
"/srv0",
func(ctx sdk.Context, p PacketI, pd PacketDataI, sender PacketSender) (*sdk.Result, []byte, error) {
return &sdk.Result{}, nil, nil
},
nil,
)
r.HandleFunc(
"/srv1/send",
func(ctx sdk.Context, p PacketI, pd PacketDataI, sender PacketSender) (*sdk.Result, []byte, error) {
if err := sender.SendPacket(ctx, nil, nil); err != nil {
return nil, nil, err
}
return &sdk.Result{}, nil, nil
},
nil,
)
ctx := makeTestContext()
h0 := sptypes.Header{}
SetServiceID(&h0, "/srv0")
_, _, err := r.ServePacket(ctx, channeltypes.Packet{SourceChannel: "root"}, sptypes.PacketData{Header: h0}, &mockPacketSender{})
require.NoError(err)
_, _, err = r.ServePacket(ctx, channeltypes.Packet{SourceChannel: "user0"}, sptypes.PacketData{Header: h0}, &mockPacketSender{})
require.Error(err)
h1 := sptypes.Header{}
SetServiceID(&h1, "/srv1/send")
_, _, err = r.ServePacket(ctx, channeltypes.Packet{SourceChannel: "root"}, sptypes.PacketData{Header: h1}, &mockPacketSender{})
require.NoError(err)
}
type mockPacketSender struct{}
var _ PacketSender = (*mockPacketSender)(nil)
func (s *mockPacketSender) SendPacket(
ctx sdk.Context,
channelCap *capabilitytypes.Capability,
packet exported.PacketI,
) error {
return nil
}
func makeTestContext() sdk.Context {
db := dbm.NewMemDB()
cms := store.NewCommitMultiStore(db)
return sdk.NewContext(cms, tmproto.Header{}, false, log.NewNopLogger())
}
|
// Copyright 2021 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
computepb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/compute/compute_go_proto"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute"
)
// Server implements the gRPC interface for Reservation.
type ReservationServer struct{}
// ProtoToReservationSpecificReservationInstancePropertiesLocalSsdsInterfaceEnum converts a ReservationSpecificReservationInstancePropertiesLocalSsdsInterfaceEnum enum from its proto representation.
func ProtoToComputeReservationSpecificReservationInstancePropertiesLocalSsdsInterfaceEnum(e computepb.ComputeReservationSpecificReservationInstancePropertiesLocalSsdsInterfaceEnum) *compute.ReservationSpecificReservationInstancePropertiesLocalSsdsInterfaceEnum {
if e == 0 {
return nil
}
if n, ok := computepb.ComputeReservationSpecificReservationInstancePropertiesLocalSsdsInterfaceEnum_name[int32(e)]; ok {
e := compute.ReservationSpecificReservationInstancePropertiesLocalSsdsInterfaceEnum(n[len("ComputeReservationSpecificReservationInstancePropertiesLocalSsdsInterfaceEnum"):])
return &e
}
return nil
}
// ProtoToReservationStatusEnum converts a ReservationStatusEnum enum from its proto representation.
func ProtoToComputeReservationStatusEnum(e computepb.ComputeReservationStatusEnum) *compute.ReservationStatusEnum {
if e == 0 {
return nil
}
if n, ok := computepb.ComputeReservationStatusEnum_name[int32(e)]; ok {
e := compute.ReservationStatusEnum(n[len("ComputeReservationStatusEnum"):])
return &e
}
return nil
}
// ProtoToReservationSpecificReservation converts a ReservationSpecificReservation resource from its proto representation.
func ProtoToComputeReservationSpecificReservation(p *computepb.ComputeReservationSpecificReservation) *compute.ReservationSpecificReservation {
if p == nil {
return nil
}
obj := &compute.ReservationSpecificReservation{
InstanceProperties: ProtoToComputeReservationSpecificReservationInstanceProperties(p.GetInstanceProperties()),
Count: dcl.Int64OrNil(p.Count),
InUseCount: dcl.Int64OrNil(p.InUseCount),
}
return obj
}
// ProtoToReservationSpecificReservationInstanceProperties converts a ReservationSpecificReservationInstanceProperties resource from its proto representation.
func ProtoToComputeReservationSpecificReservationInstanceProperties(p *computepb.ComputeReservationSpecificReservationInstanceProperties) *compute.ReservationSpecificReservationInstanceProperties {
if p == nil {
return nil
}
obj := &compute.ReservationSpecificReservationInstanceProperties{
MachineType: dcl.StringOrNil(p.MachineType),
MinCpuPlatform: dcl.StringOrNil(p.MinCpuPlatform),
}
for _, r := range p.GetGuestAccelerators() {
obj.GuestAccelerators = append(obj.GuestAccelerators, *ProtoToComputeReservationSpecificReservationInstancePropertiesGuestAccelerators(r))
}
for _, r := range p.GetLocalSsds() {
obj.LocalSsds = append(obj.LocalSsds, *ProtoToComputeReservationSpecificReservationInstancePropertiesLocalSsds(r))
}
return obj
}
// ProtoToReservationSpecificReservationInstancePropertiesGuestAccelerators converts a ReservationSpecificReservationInstancePropertiesGuestAccelerators resource from its proto representation.
func ProtoToComputeReservationSpecificReservationInstancePropertiesGuestAccelerators(p *computepb.ComputeReservationSpecificReservationInstancePropertiesGuestAccelerators) *compute.ReservationSpecificReservationInstancePropertiesGuestAccelerators {
if p == nil {
return nil
}
obj := &compute.ReservationSpecificReservationInstancePropertiesGuestAccelerators{
AcceleratorType: dcl.StringOrNil(p.AcceleratorType),
AcceleratorCount: dcl.Int64OrNil(p.AcceleratorCount),
}
return obj
}
// ProtoToReservationSpecificReservationInstancePropertiesLocalSsds converts a ReservationSpecificReservationInstancePropertiesLocalSsds resource from its proto representation.
func ProtoToComputeReservationSpecificReservationInstancePropertiesLocalSsds(p *computepb.ComputeReservationSpecificReservationInstancePropertiesLocalSsds) *compute.ReservationSpecificReservationInstancePropertiesLocalSsds {
if p == nil {
return nil
}
obj := &compute.ReservationSpecificReservationInstancePropertiesLocalSsds{
DiskSizeGb: dcl.Int64OrNil(p.DiskSizeGb),
Interface: ProtoToComputeReservationSpecificReservationInstancePropertiesLocalSsdsInterfaceEnum(p.GetInterface()),
}
return obj
}
// ProtoToReservation converts a Reservation resource from its proto representation.
func ProtoToReservation(p *computepb.ComputeReservation) *compute.Reservation {
obj := &compute.Reservation{
Id: dcl.Int64OrNil(p.Id),
SelfLink: dcl.StringOrNil(p.SelfLink),
Zone: dcl.StringOrNil(p.Zone),
Description: dcl.StringOrNil(p.Description),
Name: dcl.StringOrNil(p.Name),
SpecificReservation: ProtoToComputeReservationSpecificReservation(p.GetSpecificReservation()),
Commitment: dcl.StringOrNil(p.Commitment),
SpecificReservationRequired: dcl.Bool(p.SpecificReservationRequired),
Status: ProtoToComputeReservationStatusEnum(p.GetStatus()),
Project: dcl.StringOrNil(p.Project),
}
return obj
}
// ReservationSpecificReservationInstancePropertiesLocalSsdsInterfaceEnumToProto converts a ReservationSpecificReservationInstancePropertiesLocalSsdsInterfaceEnum enum to its proto representation.
func ComputeReservationSpecificReservationInstancePropertiesLocalSsdsInterfaceEnumToProto(e *compute.ReservationSpecificReservationInstancePropertiesLocalSsdsInterfaceEnum) computepb.ComputeReservationSpecificReservationInstancePropertiesLocalSsdsInterfaceEnum {
if e == nil {
return computepb.ComputeReservationSpecificReservationInstancePropertiesLocalSsdsInterfaceEnum(0)
}
if v, ok := computepb.ComputeReservationSpecificReservationInstancePropertiesLocalSsdsInterfaceEnum_value["ReservationSpecificReservationInstancePropertiesLocalSsdsInterfaceEnum"+string(*e)]; ok {
return computepb.ComputeReservationSpecificReservationInstancePropertiesLocalSsdsInterfaceEnum(v)
}
return computepb.ComputeReservationSpecificReservationInstancePropertiesLocalSsdsInterfaceEnum(0)
}
// ReservationStatusEnumToProto converts a ReservationStatusEnum enum to its proto representation.
func ComputeReservationStatusEnumToProto(e *compute.ReservationStatusEnum) computepb.ComputeReservationStatusEnum {
if e == nil {
return computepb.ComputeReservationStatusEnum(0)
}
if v, ok := computepb.ComputeReservationStatusEnum_value["ReservationStatusEnum"+string(*e)]; ok {
return computepb.ComputeReservationStatusEnum(v)
}
return computepb.ComputeReservationStatusEnum(0)
}
// ReservationSpecificReservationToProto converts a ReservationSpecificReservation resource to its proto representation.
func ComputeReservationSpecificReservationToProto(o *compute.ReservationSpecificReservation) *computepb.ComputeReservationSpecificReservation {
if o == nil {
return nil
}
p := &computepb.ComputeReservationSpecificReservation{
InstanceProperties: ComputeReservationSpecificReservationInstancePropertiesToProto(o.InstanceProperties),
Count: dcl.ValueOrEmptyInt64(o.Count),
InUseCount: dcl.ValueOrEmptyInt64(o.InUseCount),
}
return p
}
// ReservationSpecificReservationInstancePropertiesToProto converts a ReservationSpecificReservationInstanceProperties resource to its proto representation.
func ComputeReservationSpecificReservationInstancePropertiesToProto(o *compute.ReservationSpecificReservationInstanceProperties) *computepb.ComputeReservationSpecificReservationInstanceProperties {
if o == nil {
return nil
}
p := &computepb.ComputeReservationSpecificReservationInstanceProperties{
MachineType: dcl.ValueOrEmptyString(o.MachineType),
MinCpuPlatform: dcl.ValueOrEmptyString(o.MinCpuPlatform),
}
for _, r := range o.GuestAccelerators {
p.GuestAccelerators = append(p.GuestAccelerators, ComputeReservationSpecificReservationInstancePropertiesGuestAcceleratorsToProto(&r))
}
for _, r := range o.LocalSsds {
p.LocalSsds = append(p.LocalSsds, ComputeReservationSpecificReservationInstancePropertiesLocalSsdsToProto(&r))
}
return p
}
// ReservationSpecificReservationInstancePropertiesGuestAcceleratorsToProto converts a ReservationSpecificReservationInstancePropertiesGuestAccelerators resource to its proto representation.
func ComputeReservationSpecificReservationInstancePropertiesGuestAcceleratorsToProto(o *compute.ReservationSpecificReservationInstancePropertiesGuestAccelerators) *computepb.ComputeReservationSpecificReservationInstancePropertiesGuestAccelerators {
if o == nil {
return nil
}
p := &computepb.ComputeReservationSpecificReservationInstancePropertiesGuestAccelerators{
AcceleratorType: dcl.ValueOrEmptyString(o.AcceleratorType),
AcceleratorCount: dcl.ValueOrEmptyInt64(o.AcceleratorCount),
}
return p
}
// ReservationSpecificReservationInstancePropertiesLocalSsdsToProto converts a ReservationSpecificReservationInstancePropertiesLocalSsds resource to its proto representation.
func ComputeReservationSpecificReservationInstancePropertiesLocalSsdsToProto(o *compute.ReservationSpecificReservationInstancePropertiesLocalSsds) *computepb.ComputeReservationSpecificReservationInstancePropertiesLocalSsds {
if o == nil {
return nil
}
p := &computepb.ComputeReservationSpecificReservationInstancePropertiesLocalSsds{
DiskSizeGb: dcl.ValueOrEmptyInt64(o.DiskSizeGb),
Interface: ComputeReservationSpecificReservationInstancePropertiesLocalSsdsInterfaceEnumToProto(o.Interface),
}
return p
}
// ReservationToProto converts a Reservation resource to its proto representation.
func ReservationToProto(resource *compute.Reservation) *computepb.ComputeReservation {
p := &computepb.ComputeReservation{
Id: dcl.ValueOrEmptyInt64(resource.Id),
SelfLink: dcl.ValueOrEmptyString(resource.SelfLink),
Zone: dcl.ValueOrEmptyString(resource.Zone),
Description: dcl.ValueOrEmptyString(resource.Description),
Name: dcl.ValueOrEmptyString(resource.Name),
SpecificReservation: ComputeReservationSpecificReservationToProto(resource.SpecificReservation),
Commitment: dcl.ValueOrEmptyString(resource.Commitment),
SpecificReservationRequired: dcl.ValueOrEmptyBool(resource.SpecificReservationRequired),
Status: ComputeReservationStatusEnumToProto(resource.Status),
Project: dcl.ValueOrEmptyString(resource.Project),
}
return p
}
// ApplyReservation handles the gRPC request by passing it to the underlying Reservation Apply() method.
func (s *ReservationServer) applyReservation(ctx context.Context, c *compute.Client, request *computepb.ApplyComputeReservationRequest) (*computepb.ComputeReservation, error) {
p := ProtoToReservation(request.GetResource())
res, err := c.ApplyReservation(ctx, p)
if err != nil {
return nil, err
}
r := ReservationToProto(res)
return r, nil
}
// ApplyReservation handles the gRPC request by passing it to the underlying Reservation Apply() method.
func (s *ReservationServer) ApplyComputeReservation(ctx context.Context, request *computepb.ApplyComputeReservationRequest) (*computepb.ComputeReservation, error) {
cl, err := createConfigReservation(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
return s.applyReservation(ctx, cl, request)
}
// DeleteReservation handles the gRPC request by passing it to the underlying Reservation Delete() method.
func (s *ReservationServer) DeleteComputeReservation(ctx context.Context, request *computepb.DeleteComputeReservationRequest) (*emptypb.Empty, error) {
cl, err := createConfigReservation(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteReservation(ctx, ProtoToReservation(request.GetResource()))
}
// ListComputeReservation handles the gRPC request by passing it to the underlying ReservationList() method.
func (s *ReservationServer) ListComputeReservation(ctx context.Context, request *computepb.ListComputeReservationRequest) (*computepb.ListComputeReservationResponse, error) {
cl, err := createConfigReservation(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
resources, err := cl.ListReservation(ctx, request.Project, request.Zone)
if err != nil {
return nil, err
}
var protos []*computepb.ComputeReservation
for _, r := range resources.Items {
rp := ReservationToProto(r)
protos = append(protos, rp)
}
return &computepb.ListComputeReservationResponse{Items: protos}, nil
}
func createConfigReservation(ctx context.Context, service_account_file string) (*compute.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return compute.NewClient(conf), nil
}
|
// SPDX-License-Identifier: Unlicense OR MIT
package headless
import "github.com/gop9/olt/gio/app/internal/gl"
/*
#cgo CFLAGS: -DGL_SILENCE_DEPRECATION -Werror -Wno-deprecated-declarations -fmodules -fobjc-arc -x objective-c
#include <CoreFoundation/CoreFoundation.h>
#include "headless_darwin.h"
*/
import "C"
type nsContext struct {
c *gl.Functions
ctx C.CFTypeRef
prepared bool
}
func newContext() (context, error) {
ctx := C.gio_headless_newContext()
return &nsContext{ctx: ctx, c: new(gl.Functions)}, nil
}
func (c *nsContext) MakeCurrent() error {
C.gio_headless_makeCurrentContext(c.ctx)
if !c.prepared {
C.gio_headless_prepareContext(c.ctx)
c.prepared = true
}
return nil
}
func (c *nsContext) ReleaseCurrent() {
C.gio_headless_clearCurrentContext(c.ctx)
}
func (c *nsContext) Functions() *gl.Functions {
return c.c
}
func (d *nsContext) Release() {
if d.ctx != 0 {
C.gio_headless_releaseContext(d.ctx)
d.ctx = 0
}
}
|
package main
import "fmt"
// Operasi Boolean
func main() {
var (
ujian = 80
absensi = 88
)
fmt.Println(ujian >= 80 && absensi >= 80)
}
|
package server
import (
"log"
"net/http"
)
// LogMiddleware returns handler decorated with log statement.
func LogMiddleware(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer log.Printf("[HTTP] %s %s", r.Method, r.RequestURI)
h.ServeHTTP(w, r)
})
}
|
package trie
import (
"fmt"
"github.com/openacid/low/bitmap"
)
// levelInfo records node count upto every level(inclusive).
// Slim has a slice []levelInfo to track node counts.
// These nodes count info helps to speed up in finding out the original position
// of a key in the creating key value array, with the help with another bitmap
// NodeTypeBM, in which a "1" indicates an inner node.
//
// The 0-th elt is always 0,0,0. An empty slim has only one level.
//
// The 1th elt describe root node in slim.
// The 1th elt is:
// - 1,0,1 if the slim has only one(must be a leaf) node.
// - 1,1,0 if the slim has more than one nodes.
//
//
// With a slim in the following structure:
//
// node-0(root)
// +--a--> node-1
// | +--x--> node-4(leaf)
// | `--y--> node-5(leaf)
// |
// +--b--> node-2(leaf)
// |
// `--c--> node-3
// +--u--> node-6(leaf)
// `--v--> node-7(leaf)
//
// The node count of every level is:
//
// [[0, 0, 0],
// [1, 1, 0],
// [4, 3, 1],
// [8, 2, 5],
// ]
//
// The NodeTypeBM is:
// 1 101 0000
//
// To find out the position of node-2(nodeId=2, level=2):
//
// rank0(NodeTypeBM, nodeId=2) - levels[2].leaf // the count of leaf at level 2.
// +rank0(NodeTypeBM, nodeId=6) - levels[3].leaf // the count of leaf at level 3.
//
// E.g., at every level, count the leaf nodes and sum them.
// When reaching a leaf, find the next inner node at this level(in our case
// node-3) and walks to its first child.
//
// Since 0.5.12
type levelInfo struct {
// total number of nodes
// number of inner nodes
// number of leaf nodes
// total = inner + leaf
total, inner, leaf int32
cache []innerCache
}
type innerCache struct {
nodeId int32
// N.O. leaves from this node or from preceding node at the same level
leafCount int32
}
// levelStr builds a slice of string for every level in form of:
// <i>: <total> = <inner> + <leaf> <total'> = <inner'> + <leaf'>
//
// Since 0.5.12
func levelsStr(l []levelInfo) []string {
lineFmt := "%2d: %8d =%8d + %-8d %8d =%8d + %-8d"
rst := make([]string, 0, len(l))
rst = append(rst, " 0: total = inner + leaf total'= inner' + leaf'")
for i := 1; i < len(l); i++ {
ll := l[i]
prev := l[i-1]
rst = append(rst, fmt.Sprintf(lineFmt,
i,
ll.total,
ll.inner,
ll.leaf,
ll.total-prev.total,
ll.inner-prev.inner,
ll.leaf-prev.leaf,
))
}
return rst
}
// initLevels builds the levelInfo slice.
//
// Since 0.5.12
func (st *SlimTrie) initLevels() {
ns := st.inner
if ns.NodeTypeBM == nil {
st.levels = []levelInfo{{0, 0, 0, nil}}
return
}
st.levels = make([]levelInfo, 0)
totalInner, b := bitmap.Rank64(ns.NodeTypeBM.Words, ns.NodeTypeBM.RankIndex, int32(len(ns.NodeTypeBM.Words)*64-1))
totalInner += b
// single leaf slim
total := int32(1)
if totalInner > 0 {
var b int32
total, b = bitmap.Rank128(ns.Inners.Words, ns.Inners.RankIndex, int32(len(ns.Inners.Words)*64-1))
total += b + 1
}
// From root node, walks to the first node at next level, until there is no
// inner node at next level.
currId := int32(0)
qr := &querySession{}
for {
// currId is the first node id at current level
nextInnerIdx, _ := bitmap.Rank64(ns.NodeTypeBM.Words, ns.NodeTypeBM.RankIndex, currId)
// update prev level
st.levels = append(st.levels, levelInfo{total: currId, inner: nextInnerIdx, leaf: currId - nextInnerIdx})
if nextInnerIdx == totalInner {
// no more inner node at this level, this is the bottom level
break
}
st.getIthInnerFrom(nextInnerIdx, qr)
leftMostChild, _ := bitmap.Rank128(ns.Inners.Words, ns.Inners.RankIndex, qr.from)
currId = leftMostChild + 1
}
st.levels = append(st.levels, levelInfo{total: total, inner: totalInner, leaf: total - totalInner})
}
|
package sdk
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"strconv"
"testing"
"time"
rmTesting "github.com/brigadecore/brigade/sdk/v3/internal/restmachinery/testing" // nolint: lll
"github.com/stretchr/testify/require"
)
func TestNewLogsClient(t *testing.T) {
client, ok := NewLogsClient(
rmTesting.TestAPIAddress,
rmTesting.TestAPIToken,
nil,
).(*logsClient)
require.True(t, ok)
rmTesting.RequireBaseClient(t, client.BaseClient)
}
func TestLogsClientStream(t *testing.T) {
const testEventID = "12345"
testSelector := LogsSelector{
Job: "farpoint",
Container: "enterprise",
}
testOpts := LogStreamOptions{
Follow: true,
}
testLogEntry := LogEntry{
Message: "Captain's log, Stardate 41153.7. Our destination is Planet " +
"Deneb IV, beyond which lies the great unexplored mass of the galaxy...",
}
t.Run("nil logs selector", func(t *testing.T) {
server := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, http.MethodGet, r.Method)
require.Equal(
t,
fmt.Sprintf("/v2/events/%s/logs", testEventID),
r.URL.Path,
)
require.Equal(
t,
1,
len(r.URL.Query()),
)
require.Equal(
t,
strconv.FormatBool(testOpts.Follow),
r.URL.Query().Get("follow"),
)
bodyBytes, err := json.Marshal(testLogEntry)
require.NoError(t, err)
w.Header().Set("Content-Type", "text/event-stream")
flusher, ok := w.(http.Flusher)
require.True(t, ok)
flusher.Flush()
fmt.Fprintln(w, string(bodyBytes))
flusher.Flush()
},
),
)
defer server.Close()
client := NewLogsClient(server.URL, rmTesting.TestAPIToken, nil)
logsCh, _, err := client.Stream(
context.Background(),
testEventID,
nil,
&testOpts,
)
require.NoError(t, err)
select {
case logEntry := <-logsCh:
require.Equal(t, testLogEntry, logEntry)
case <-time.After(3 * time.Second):
require.Fail(t, "timed out waiting for logs")
}
})
t.Run("non-nil logs selector", func(t *testing.T) {
server := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, http.MethodGet, r.Method)
require.Equal(
t,
fmt.Sprintf("/v2/events/%s/logs", testEventID),
r.URL.Path,
)
require.Equal(
t,
testSelector.Job,
r.URL.Query().Get("job"),
)
require.Equal(
t,
testSelector.Container,
r.URL.Query().Get("container"),
)
require.Equal(
t,
strconv.FormatBool(testOpts.Follow),
r.URL.Query().Get("follow"),
)
bodyBytes, err := json.Marshal(testLogEntry)
require.NoError(t, err)
w.Header().Set("Content-Type", "text/event-stream")
flusher, ok := w.(http.Flusher)
require.True(t, ok)
flusher.Flush()
fmt.Fprintln(w, string(bodyBytes))
flusher.Flush()
},
),
)
defer server.Close()
client := NewLogsClient(server.URL, rmTesting.TestAPIToken, nil)
logsCh, _, err := client.Stream(
context.Background(),
testEventID,
&testSelector,
&testOpts,
)
require.NoError(t, err)
select {
case logEntry := <-logsCh:
require.Equal(t, testLogEntry, logEntry)
case <-time.After(3 * time.Second):
require.Fail(t, "timed out waiting for logs")
}
})
}
|
package cmd
import (
"github.com/benjlevesque/task/pkg/db"
"github.com/benjlevesque/task/pkg/tasks"
"github.com/benjlevesque/task/pkg/util"
"github.com/spf13/cobra"
)
var doCmd = &cobra.Command{
Use: "do",
Short: "Marks a task as done",
ValidArgsFunction: util.GetTaskListValidArgs(db.Undone, true),
Run: func(cmd *cobra.Command, args []string) {
tasks.ToggleTasks(db.GetStore(), args, true)
},
}
func init() {
rootCmd.AddCommand(doCmd)
}
|
package common
const (
MaxEditctBits = 11
IndexMask = ((1 << MaxEditctBits) - 1)
)
const weaponPrefix = "weapon_"
/*
*/
type (
RoundMVPReason byte
Hitgroup byte
RoundEndReason byte
Team byte
EquipmentElement int
EquipmentClass int
)
/*
*/
const (
MVPReasonMostEliminations RoundMVPReason = iota + 1
MVPReasonBombDefused
MVPReasonBombPlanted
)
// MVPReasonStrings maps constant values to strings
var MVPReasonStrings = map[RoundMVPReason]string{
MVPReasonMostEliminations: "Most Eliminations",
MVPReasonBombDefused: "Bomb Defused",
MVPReasonBombPlanted: "Bomb Planted",
}
func (c RoundMVPReason) String() string {
return MVPReasonStrings[c]
}
/*
*/
const (
HGGeneric Hitgroup = 0
HGHead Hitgroup = 1
HGChest Hitgroup = 2
HGStomach Hitgroup = 3
HGLeftArm Hitgroup = 4
HGRightArm Hitgroup = 5
HGLeftLeg Hitgroup = 6
HGRightLeg Hitgroup = 7
HGGear Hitgroup = 10
)
// HGStrings maps constant values to strings
var HGStrings = map[Hitgroup]string{
HGGeneric: "Generic",
HGHead: "Head",
HGChest: "Chest",
HGStomach: "Stomach",
HGLeftArm: "Left Arm",
HGRightArm: "Right Arm",
HGLeftLeg: "Left Leg",
HGRightLeg: "Right leg",
HGGear: "Gear",
}
/*
*/
const (
RERTargetBombed RoundEndReason = iota + 1
RERVIPEscaped
RERVIPKilled
RERTerroristsEscaped
RERCTStoppedEscape
RERTerroristsStopped
RERBombDefused
RERCTWin
RERTerroristsWin
RERDraw
RERHostagesRescued
RERTargetSaved
RERHostagesNotRescued
RERTerroristsNotEscaped
RERVIPNotEscaped
RERGameStart
RERTerroristsSurrender
RERCTSurrender
)
// RERStrings maps constant values to strings
var RERStrings = map[RoundEndReason]string{
RERTargetBombed: "Target Bombed",
RERVIPEscaped: "VIP Escaped",
RERVIPKilled: "VIP Killed",
RERTerroristsEscaped: "Terrorists Escaped",
RERCTStoppedEscape: "CT Stopped Escape",
RERTerroristsStopped: "Terrorists Stopped",
RERBombDefused: "Bomb Defused",
RERCTWin: "CT Win",
RERTerroristsWin: "Terrorists Win",
RERDraw: "Draw",
RERHostagesRescued: "Hostages Rescued",
RERTargetSaved: "Target Saved",
RERHostagesNotRescued: "Hostages Not Rescued",
RERTerroristsNotEscaped: "Terrorists Not Escaped",
RERVIPNotEscaped: "VIP Not Escaped",
RERGameStart: "Game Start",
RERTerroristsSurrender: "Terrorists Surrender",
RERCTSurrender: "CT Surrender",
}
func (c RoundEndReason) String() string {
return RERStrings[c]
}
/*
*/
const (
TeamUnassigned Team = iota
TeamSpectators
TeamTerrorists
TeamCounterTerrorists
)
// TeamStrings maps constant values to strings
var TeamStrings = map[Team]string{
TeamUnassigned: "Unassigned",
TeamSpectators: "Spectators",
TeamTerrorists: "Terrorists",
TeamCounterTerrorists: "Counter Terrorists",
}
func (c Team) String() string {
return TeamStrings[c]
}
/*
*/
const (
EEUnknown EquipmentElement = 0
// Pistols
EEP2000 EquipmentElement = 1
EEGlock EquipmentElement = 2
EEP250 EquipmentElement = 3
EEDeagle EquipmentElement = 4
EEFiveSeven EquipmentElement = 5
EEDualBarettas EquipmentElement = 6
EETec9 EquipmentElement = 7
EECZ EquipmentElement = 8
EEUSP EquipmentElement = 9
EERevolver EquipmentElement = 10
// SMGs
EEMP7 EquipmentElement = 101
EEMP9 EquipmentElement = 102
EEBizon EquipmentElement = 103
EEMac10 EquipmentElement = 104
EEUMP EquipmentElement = 105
EEP90 EquipmentElement = 106
// Heavy
EESawedOff EquipmentElement = 201
EENova EquipmentElement = 202
EEMag7 EquipmentElement = 203
EEXM1014 EquipmentElement = 204
EEM249 EquipmentElement = 205
EENegev EquipmentElement = 206
// Rifles
EEGallil EquipmentElement = 301
EEFamas EquipmentElement = 302
EEAK47 EquipmentElement = 303
EEM4A4 EquipmentElement = 304
EEM4A1 EquipmentElement = 305
EEScout EquipmentElement = 306
EESG556 EquipmentElement = 307
EEAUG EquipmentElement = 308
EEAWP EquipmentElement = 309
EEScar20 EquipmentElement = 310
EEG3SG1 EquipmentElement = 311
// Equipment
EEZeus EquipmentElement = 401
EEKevlar EquipmentElement = 402
EEHelmet EquipmentElement = 403
EEBomb EquipmentElement = 404
EEKnife EquipmentElement = 405
EEDefuseKit EquipmentElement = 406
EEWorld EquipmentElement = 407
// Grenades
EEDecoy EquipmentElement = 501
EEMolotov EquipmentElement = 502
EEIncendiary EquipmentElement = 503
EEFlash EquipmentElement = 504
EESmoke EquipmentElement = 505
EEHE EquipmentElement = 506
)
// EquipmentElementStrings maps constant values to strings
var EquipmentElementStrings = map[EquipmentElement]string{
EEUnknown: "Unknown",
// Pistols
EEP2000: "P2000",
EEGlock: "Glock",
EEP250: "P250",
EEDeagle: "Desert Eagle",
EEFiveSeven: "Five-Seven",
EEDualBarettas: "Dual Barettas",
EETec9: "Tec-9",
EECZ: "CZ75-Auto",
EEUSP: "USP-S",
EERevolver: "R8 Revolver",
// SMGs
EEMP7: "MP7",
EEMP9: "MP9",
EEBizon: "PP-Bizon",
EEMac10: "Mac-10",
EEUMP: "UMP-45",
EEP90: "P90",
// Heavy
EESawedOff: "Sawed-Off",
EENova: "Nova",
EEMag7: "Mag-7",
EEXM1014: "XM1014",
EEM249: "M249",
EENegev: "Negev",
// Rifles
EEGallil: "Galil AR",
EEFamas: "Famas",
EEAK47: "AK-47",
EEM4A4: "M4A4",
EEM4A1: "M4A1-S",
EEScout: "SSG 08",
EESG556: "SG 553",
EEAUG: "Aug",
EEAWP: "AWP",
EEScar20: "Scar-20",
EEG3SG1: "G3SG1",
// Equipment
EEZeus: "Zeus",
EEKevlar: "Kevlar",
EEHelmet: "Helmet",
EEBomb: "C4",
EEKnife: "Knife",
EEDefuseKit: "Defuse Kit",
EEWorld: "World",
// Grenades
EEDecoy: "Decoy",
EEMolotov: "Molotov",
EEIncendiary: "Incendiary",
EEFlash: "Flash",
EESmoke: "Smoke",
EEHE: "HE",
}
func (c EquipmentElement) String() string {
return EquipmentElementStrings[c]
}
/*
*/
const (
ECUnknown EquipmentClass = iota
ECPistols
ECSMG
ECHeavy
ECRifle
ECEquipment
ECGrenade
)
|
package pkg
import (
"bytes"
"crypto/tls"
"github.com/wonderivan/logger"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/url"
"os"
"path"
"strings"
)
type HarborCfg struct {
URL string
UserName string
Password string
}
// GetUserAgent returns a user agent for user with an HTTP client
func userAgent() string {
return "Helm/" + strings.TrimPrefix("v3.0", "v")
}
func (cfg *HarborCfg) UploadChartPackage(chartPackagePath string, force bool) ([]byte, error) {
parsedURL, err := url.Parse(cfg.URL)
if err != nil {
return nil, err
}
parsedURL.RawPath = path.Join("api", parsedURL.RawPath, "charts")
parsedURL.Path = path.Join("api", parsedURL.Path, "charts")
logger.Debug("fetch url:", parsedURL.Path)
indexURL := parsedURL.String()
req, err := http.NewRequest("POST", indexURL, nil)
if err != nil {
return nil, err
}
// Add ?force to request querystring to force an upload if chart version already exists
if force {
req.URL.RawQuery = "force"
}
err = setUploadChartPackageRequestBody(req, chartPackagePath)
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", userAgent())
req.SetBasicAuth(cfg.UserName, cfg.Password)
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
buf := bytes.NewBuffer(nil)
_, err = io.Copy(buf, resp.Body)
resp.Body.Close()
return ioutil.ReadAll(buf)
}
func setUploadChartPackageRequestBody(req *http.Request, chartPackagePath string) error {
var body bytes.Buffer
w := multipart.NewWriter(&body)
defer w.Close()
fw, err := w.CreateFormFile("chart", chartPackagePath)
if err != nil {
return err
}
w.FormDataContentType()
fd, err := os.Open(chartPackagePath)
if err != nil {
return err
}
defer fd.Close()
_, err = io.Copy(fw, fd)
if err != nil {
return err
}
req.Header.Set("Content-Type", w.FormDataContentType())
req.Body = ioutil.NopCloser(&body)
return nil
}
|
package ddtracer
import (
"context"
"fmt"
stdlog "log"
"math/rand"
"os"
"time"
"github.com/DataDog/dd-trace-go/tracer"
opentracing "github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"github.com/opentracing/opentracing-go/log"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
func defaultHostname() string {
host, _ := os.Hostname()
return host
}
var (
// DefaultService is set to the hostname by default
DefaultService = defaultHostname()
// DefaultResource is the resource name if one isn't given
DefaultResource = "/"
// EnvTag set's the environment for a given span
// i.e EnvTag.Set(span, "development")
EnvTag = stringTagName("env")
)
// Tracer extends the DataDog tracer supplying our own text propagator
type Tracer struct {
*tracer.Tracer
textPropagator *textMapPropagator
}
// NewTracer creates a new Tracer.
func NewTracer() opentracing.Tracer {
return NewTracerTransport(nil)
}
// NewTracerTransport create a new Tracer with the given transport.
func NewTracerTransport(tr tracer.Transport) opentracing.Tracer {
var driver *tracer.Tracer
if tr == nil {
driver = tracer.NewTracer()
} else {
driver = tracer.NewTracerTransport(tr)
}
t := &Tracer{Tracer: driver}
t.textPropagator = &textMapPropagator{t}
return t
}
// StartSpan creates a new span object initialized with the supplied options
func (t *Tracer) StartSpan(op string, opts ...opentracing.StartSpanOption) opentracing.Span {
sso := &opentracing.StartSpanOptions{}
for _, o := range opts {
o.Apply(sso)
}
return t.startSpanWithOptions(op, sso)
}
func (t *Tracer) startSpanWithOptions(op string, opts *opentracing.StartSpanOptions) opentracing.Span {
var span *tracer.Span
for _, ref := range opts.References {
if ref.Type == opentracing.ChildOfRef {
if p, ok := ref.ReferencedContext.(*SpanContext); ok {
span = tracer.NewChildSpanFromContext(op, p.ctx)
// If this is true we're making a DD opentracing span from
// a standard open tracing span
if span.TraceID == 0 {
span.TraceID = span.SpanID // at the very least set the trace ID, with no other changes this becomes a root span
pSpan := tracer.SpanFromContextDefault(p.ctx)
if pSpan.TraceID == 0 { // if the parent span doesn't have a trace ID set it
pSpan.TraceID = pSpan.SpanID
}
if span.ParentID == pSpan.SpanID { // if this is infact our parent inherit its trace id
span.TraceID = pSpan.TraceID
}
}
}
}
}
if span == nil {
span = t.NewRootSpan(op, DefaultService, DefaultResource)
}
s := &Span{span, t}
for key, value := range opts.Tags {
s.SetTag(key, value)
}
return s
}
// Inject takes a span and a supplied carrier and injects the span into the carrier
func (t *Tracer) Inject(sm opentracing.SpanContext, format interface{}, carrier interface{}) error {
sc, ok := sm.(*SpanContext)
if !ok {
return opentracing.ErrInvalidSpanContext
}
span, ok := tracer.SpanFromContext(sc.ctx)
if !ok {
return opentracing.ErrInvalidSpanContext
}
switch format {
case opentracing.HTTPHeaders:
return t.textPropagator.Inject(span, carrier)
}
return opentracing.ErrUnsupportedFormat
}
// Extract a full span object from a supplied carrier if one exists
func (t *Tracer) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) {
switch format {
case opentracing.HTTPHeaders:
return t.textPropagator.Extract(carrier)
}
return nil, opentracing.ErrUnsupportedFormat
}
// Close shuts down the root tracer as well as the embeded one
func (t *Tracer) Close() error {
t.Stop()
t.Tracer.Stop()
return nil
}
// Span extends the DataDog span adding our own tracer to it
type Span struct {
*tracer.Span
tracer *Tracer
}
// Finish closes the span
func (s *Span) Finish() {
s.FinishWithOptions(opentracing.FinishOptions{})
}
// FinishWithOptions closes the span with the supplied options
func (s *Span) FinishWithOptions(opts opentracing.FinishOptions) {
if !opts.FinishTime.IsZero() {
s.Duration = opts.FinishTime.UTC().UnixNano() - s.Start
}
s.Span.Finish()
}
// Context returns the span context version of the span
func (s *Span) Context() opentracing.SpanContext {
ctx := s.Span.Context(context.Background())
return &SpanContext{ctx}
}
// SetOperationName is a setter function for the operationNAame property of the span
func (s *Span) SetOperationName(operationName string) opentracing.Span {
s.Name = operationName
return s
}
func (s *Span) setTag(key string, value interface{}) opentracing.Span {
val := fmt.Sprint(value)
switch key {
case string(ext.PeerService):
s.Service = val
case string(ext.Component):
s.Resource = val
default:
s.SetMeta(key, val)
}
return s
}
// SetTag adds a key+value pair as a tag to the span
func (s *Span) SetTag(key string, value interface{}) opentracing.Span {
switch t := value.(type) {
case float64:
s.SetMetric(key, t)
default:
s.setTag(key, value)
}
return s
}
// LogFields adds fields to a span, this is a more extensible way of adding tags
func (s *Span) LogFields(fields ...log.Field) {
for _, field := range fields {
switch field.Key() {
case "error":
s.SetError(field.Value().(error))
default:
s.SetTag(field.Key(), field.Value())
}
}
}
// LogKV converts key values to fields to be used with LogFields
func (s *Span) LogKV(alternatingKeyValues ...interface{}) {
fields, err := log.InterleavedKVToFields(alternatingKeyValues...)
if err != nil {
return
}
s.LogFields(fields...)
}
// LogEvent is a deprecated function for a special LogField
func (s *Span) LogEvent(event string) {
stdlog.Println("Span.LogEvent() has been deprecated, use LogFields or LogKV")
s.LogKV(event, nil)
}
// LogEventWithPayload is a deprecated function for a special LogField
func (s *Span) LogEventWithPayload(event string, payload interface{}) {
stdlog.Println("Span.LogEventWithPayload() has been deprecated, use LogFields or LogKV")
s.LogKV(event, payload)
}
// Log is a deprecated function for a special LogField
func (s *Span) Log(data opentracing.LogData) {
stdlog.Println("Span.Log() has been deprecated, use LogFields or LogKV")
}
// SetBaggageItem hasn't been implemented
func (s *Span) SetBaggageItem(restrictedKey string, value string) opentracing.Span {
stdlog.Println("WARNING - SetBaggageItem not implemented")
return s
}
// BaggageItem hasn't been implemented
func (s *Span) BaggageItem(restrictedKey string) string {
stdlog.Println("WARNING - BaggageItem not implemented")
return ""
}
// Tracer returns the tracer the span is associated with
func (s *Span) Tracer() opentracing.Tracer {
return s.tracer
}
// SpanContext is a type used for converting a span into a context obj
type SpanContext struct {
ctx context.Context
}
// ForeachBaggageItem hasn't been implemented
func (ctx *SpanContext) ForeachBaggageItem(handler func(k, v string) bool) {
stdlog.Println("WARNING - ForeachBaggageItem not implemented")
}
type stringTagName string
// Set a value for a tag in a supplied span
func (tag stringTagName) Set(span opentracing.Span, value string) {
span.SetTag(string(tag), value)
}
|
package io
import (
"encoding/binary"
"io"
"strings"
"testing"
)
func TestReadFull(t *testing.T) {
var b [4]byte
bufMsgLen := b[:2]
reader := strings.NewReader("hello")
if n, err := io.ReadFull(reader, bufMsgLen); err != nil {
t.Log(err)
} else {
t.Log(n)
}
msgLen := uint32(binary.BigEndian.Uint16(bufMsgLen))
msgData := make([]byte, msgLen)
if n, err := io.ReadFull(reader, msgData); err != nil {
t.Log(err)
} else {
t.Log(n)
}
}
|
package templateresolution
import (
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
apierr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/argoproj/argo/errors"
wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo/pkg/client/clientset/versioned/typed/workflow/v1alpha1"
typed "github.com/argoproj/argo/pkg/client/clientset/versioned/typed/workflow/v1alpha1"
"github.com/argoproj/argo/workflow/common"
)
// maxResolveDepth is the limit of template reference resolution.
const maxResolveDepth int = 10
// workflowTemplateInterfaceWrapper is an internal struct to wrap clientset.
type workflowTemplateInterfaceWrapper struct {
clientset typed.WorkflowTemplateInterface
}
func WrapWorkflowTemplateInterface(clientset v1alpha1.WorkflowTemplateInterface) WorkflowTemplateNamespacedGetter {
return &workflowTemplateInterfaceWrapper{clientset: clientset}
}
// Get retrieves the WorkflowTemplate of a given name.
func (wrapper *workflowTemplateInterfaceWrapper) Get(name string) (*wfv1.WorkflowTemplate, error) {
return wrapper.clientset.Get(name, metav1.GetOptions{})
}
// WorkflowTemplateNamespaceLister helps get WorkflowTemplates.
type WorkflowTemplateNamespacedGetter interface {
// Get retrieves the WorkflowTemplate from the indexer for a given name.
Get(name string) (*wfv1.WorkflowTemplate, error)
}
// clusterWorkflowTemplateInterfaceWrapper is an internal struct to wrap clientset.
type clusterWorkflowTemplateInterfaceWrapper struct {
clientset typed.ClusterWorkflowTemplateInterface
}
// WorkflowTemplateNamespaceLister helps get WorkflowTemplates.
type ClusterWorkflowTemplateGetter interface {
// Get retrieves the WorkflowTemplate from the indexer for a given name.
Get(name string) (*wfv1.ClusterWorkflowTemplate, error)
}
func WrapClusterWorkflowTemplateInterface(clusterClientset v1alpha1.ClusterWorkflowTemplateInterface) ClusterWorkflowTemplateGetter {
return &clusterWorkflowTemplateInterfaceWrapper{clientset: clusterClientset}
}
type NullClusterWorkflowTemplateGetter struct{}
func (n *NullClusterWorkflowTemplateGetter) Get(name string) (*wfv1.ClusterWorkflowTemplate, error) {
return nil, errors.Errorf("", "invalid spec: clusterworkflowtemplates.argoproj.io `%s` is "+
"forbidden: User cannot get resource 'clusterworkflowtemplates' in API group argoproj.io at the cluster scope", name)
}
// Get retrieves the WorkflowTemplate of a given name.
func (wrapper *clusterWorkflowTemplateInterfaceWrapper) Get(name string) (*wfv1.ClusterWorkflowTemplate, error) {
return wrapper.clientset.Get(name, metav1.GetOptions{})
}
// Context is a context of template search.
type Context struct {
// wftmplGetter is an interface to get WorkflowTemplates.
wftmplGetter WorkflowTemplateNamespacedGetter
// cwftmplGetter is an interface to get ClusterWorkflowTemplates
cwftmplGetter ClusterWorkflowTemplateGetter
// tmplBase is the base of local template search.
tmplBase wfv1.TemplateHolder
// workflow is the Workflow where templates will be stored
workflow *wfv1.Workflow
// log is a logrus entry.
log *logrus.Entry
}
// NewContext returns new Context.
func NewContext(wftmplGetter WorkflowTemplateNamespacedGetter, cwftmplGetter ClusterWorkflowTemplateGetter, tmplBase wfv1.TemplateHolder, workflow *wfv1.Workflow) *Context {
return &Context{
wftmplGetter: wftmplGetter,
cwftmplGetter: cwftmplGetter,
tmplBase: tmplBase,
workflow: workflow,
log: log.WithFields(logrus.Fields{}),
}
}
// NewContext returns new Context.
func NewContextFromClientset(wftmplClientset typed.WorkflowTemplateInterface, clusterWftmplClient typed.ClusterWorkflowTemplateInterface, tmplBase wfv1.TemplateHolder, workflow *wfv1.Workflow) *Context {
return &Context{
wftmplGetter: WrapWorkflowTemplateInterface(wftmplClientset),
cwftmplGetter: WrapClusterWorkflowTemplateInterface(clusterWftmplClient),
tmplBase: tmplBase,
workflow: workflow,
log: log.WithFields(logrus.Fields{}),
}
}
// GetTemplateByName returns a template by name in the context.
func (ctx *Context) GetTemplateByName(name string) (*wfv1.Template, error) {
ctx.log.Debug("Getting the template by name")
tmpl := ctx.tmplBase.GetTemplateByName(name)
if tmpl == nil {
return nil, errors.Errorf(errors.CodeNotFound, "template %s not found", name)
}
return tmpl.DeepCopy(), nil
}
func (ctx *Context) GetTemplateGetterFromRef(tmplRef *wfv1.TemplateRef) (wfv1.TemplateHolder, error) {
if tmplRef.ClusterScope {
return ctx.cwftmplGetter.Get(tmplRef.Name)
}
return ctx.wftmplGetter.Get(tmplRef.Name)
}
// GetTemplateFromRef returns a template found by a given template ref.
func (ctx *Context) GetTemplateFromRef(tmplRef *wfv1.TemplateRef) (*wfv1.Template, error) {
ctx.log.Debug("Getting the template from ref")
var template *wfv1.Template
var wftmpl wfv1.TemplateHolder
var err error
if tmplRef.ClusterScope {
wftmpl, err = ctx.cwftmplGetter.Get(tmplRef.Name)
} else {
wftmpl, err = ctx.wftmplGetter.Get(tmplRef.Name)
}
if err != nil {
if apierr.IsNotFound(err) {
return nil, errors.Errorf(errors.CodeNotFound, "workflow template %s not found", tmplRef.Name)
}
return nil, err
}
template = wftmpl.GetTemplateByName(tmplRef.Template)
if template == nil {
return nil, errors.Errorf(errors.CodeNotFound, "template %s not found in workflow template %s", tmplRef.Template, tmplRef.Name)
}
return template.DeepCopy(), nil
}
// GetTemplate returns a template found by template name or template ref.
func (ctx *Context) GetTemplate(tmplHolder wfv1.TemplateReferenceHolder) (*wfv1.Template, error) {
ctx.log.Debug("Getting the template")
tmplName := tmplHolder.GetTemplateName()
tmplRef := tmplHolder.GetTemplateRef()
if tmplRef != nil {
return ctx.GetTemplateFromRef(tmplRef)
} else if tmplName != "" {
return ctx.GetTemplateByName(tmplName)
} else {
if tmpl, ok := tmplHolder.(*wfv1.Template); ok {
if tmpl.GetType() != wfv1.TemplateTypeUnknown {
return tmpl.DeepCopy(), nil
}
return nil, errors.Errorf(errors.CodeNotFound, "template %s is not a concrete template", tmpl.Name)
}
}
return nil, errors.Errorf(errors.CodeInternal, "failed to get a template")
}
// GetCurrentTemplateBase returns the current template base of the context.
func (ctx *Context) GetCurrentTemplateBase() wfv1.TemplateHolder {
return ctx.tmplBase
}
func (ctx *Context) GetTemplateScope() string {
return string(ctx.tmplBase.GetResourceScope()) + "/" + ctx.tmplBase.GetName()
}
// ResolveTemplate digs into referenes and returns a merged template.
// This method is the public start point of template resolution.
func (ctx *Context) ResolveTemplate(tmplHolder wfv1.TemplateReferenceHolder) (*Context, *wfv1.Template, bool, error) {
return ctx.resolveTemplateImpl(tmplHolder, 0)
}
// resolveTemplateImpl digs into referenes and returns a merged template.
// This method processes inputs and arguments so the inputs of the final
// resolved template include intermediate parameter passing.
// The other fields are just merged and shallower templates overwrite deeper.
func (ctx *Context) resolveTemplateImpl(tmplHolder wfv1.TemplateReferenceHolder, depth int) (*Context, *wfv1.Template, bool, error) {
ctx.log = ctx.log.WithFields(logrus.Fields{
"depth": depth,
"base": common.GetTemplateGetterString(ctx.tmplBase),
"tmpl": common.GetTemplateHolderString(tmplHolder),
})
// Avoid infinite references
if depth > maxResolveDepth {
return nil, nil, false, errors.Errorf(errors.CodeBadRequest, "template reference exceeded max depth (%d)", maxResolveDepth)
}
ctx.log.Debug("Resolving the template")
templateStored := false
var tmpl *wfv1.Template
if ctx.workflow != nil {
// Check if the template has been stored.
scope := ctx.tmplBase.GetResourceScope()
resourceName := ctx.tmplBase.GetName()
tmpl = ctx.workflow.GetStoredTemplate(scope, resourceName, tmplHolder)
}
if tmpl != nil {
ctx.log.Debug("Found stored template")
} else {
// Find newly appeared template.
newTmpl, err := ctx.GetTemplate(tmplHolder)
if err != nil {
return nil, nil, false, err
}
// Stored the found template.
if ctx.workflow != nil {
scope := ctx.tmplBase.GetResourceScope()
resourceName := ctx.tmplBase.GetName()
stored, err := ctx.workflow.SetStoredTemplate(scope, resourceName, tmplHolder, newTmpl)
if err != nil {
return nil, nil, false, err
}
if stored {
ctx.log.Debug("Stored the template")
templateStored = true
}
}
tmpl = newTmpl
}
// Update the template base of the context.
newTmplCtx, err := ctx.WithTemplateHolder(tmplHolder)
if err != nil {
return nil, nil, false, err
}
// Return a concrete template without digging into it.
if tmpl.GetType() != wfv1.TemplateTypeUnknown {
return newTmplCtx, tmpl, templateStored, nil
}
// Dig into nested references with new template base.
finalTmplCtx, resolvedTmpl, templateStoredInCall, err := newTmplCtx.resolveTemplateImpl(tmpl, depth+1)
if err != nil {
return nil, nil, false, err
}
if templateStoredInCall {
templateStored = true
}
// Merge the referred template into the original.
mergedTmpl, err := common.MergeReferredTemplate(tmpl, resolvedTmpl)
if err != nil {
return nil, nil, false, err
}
return finalTmplCtx, mergedTmpl, templateStored, nil
}
// WithTemplateHolder creates new context with a template base of a given template holder.
func (ctx *Context) WithTemplateHolder(tmplHolder wfv1.TemplateReferenceHolder) (*Context, error) {
tmplRef := tmplHolder.GetTemplateRef()
if tmplRef != nil {
tmplName := tmplRef.Name
if tmplRef.ClusterScope {
return ctx.WithClusterWorkflowTemplate(tmplName)
} else {
return ctx.WithWorkflowTemplate(tmplName)
}
}
return ctx.WithTemplateBase(ctx.tmplBase), nil
}
// WithTemplateBase creates new context with a wfv1.TemplateHolder.
func (ctx *Context) WithTemplateBase(tmplBase wfv1.TemplateHolder) *Context {
return NewContext(ctx.wftmplGetter, ctx.cwftmplGetter, tmplBase, ctx.workflow)
}
// WithWorkflowTemplate creates new context with a wfv1.TemplateHolder.
func (ctx *Context) WithWorkflowTemplate(name string) (*Context, error) {
wftmpl, err := ctx.wftmplGetter.Get(name)
if err != nil {
return nil, err
}
return ctx.WithTemplateBase(wftmpl), nil
}
// WithWorkflowTemplate creates new context with a wfv1.TemplateHolder.
func (ctx *Context) WithClusterWorkflowTemplate(name string) (*Context, error) {
cwftmpl, err := ctx.cwftmplGetter.Get(name)
if err != nil {
return nil, err
}
return ctx.WithTemplateBase(cwftmpl), nil
}
|
// Copyright 2018 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xml
import (
"io/ioutil"
"os"
"testing"
"android/soong/android"
"android/soong/etc"
)
var buildDir string
func setUp() {
var err error
buildDir, err = ioutil.TempDir("", "soong_xml_test")
if err != nil {
panic(err)
}
}
func tearDown() {
os.RemoveAll(buildDir)
}
func TestMain(m *testing.M) {
run := func() int {
setUp()
defer tearDown()
return m.Run()
}
os.Exit(run())
}
func testXml(t *testing.T, bp string) *android.TestContext {
fs := map[string][]byte{
"foo.xml": nil,
"foo.dtd": nil,
"bar.xml": nil,
"bar.xsd": nil,
"baz.xml": nil,
}
config := android.TestArchConfig(buildDir, nil, bp, fs)
ctx := android.NewTestArchContext()
ctx.RegisterModuleType("prebuilt_etc", etc.PrebuiltEtcFactory)
ctx.RegisterModuleType("prebuilt_etc_xml", PrebuiltEtcXmlFactory)
ctx.Register(config)
_, errs := ctx.ParseFileList(".", []string{"Android.bp"})
android.FailIfErrored(t, errs)
_, errs = ctx.PrepareBuildActions(config)
android.FailIfErrored(t, errs)
return ctx
}
func assertEqual(t *testing.T, name, expected, actual string) {
t.Helper()
if expected != actual {
t.Errorf(name+" expected %q != got %q", expected, actual)
}
}
// Minimal test
func TestPrebuiltEtcXml(t *testing.T) {
ctx := testXml(t, `
prebuilt_etc_xml {
name: "foo.xml",
src: "foo.xml",
schema: "foo.dtd",
}
prebuilt_etc_xml {
name: "bar.xml",
src: "bar.xml",
schema: "bar.xsd",
}
prebuilt_etc_xml {
name: "baz.xml",
src: "baz.xml",
}
`)
for _, tc := range []struct {
rule, input, schemaType, schema string
}{
{rule: "xmllint-dtd", input: "foo.xml", schemaType: "dtd", schema: "foo.dtd"},
{rule: "xmllint-xsd", input: "bar.xml", schemaType: "xsd", schema: "bar.xsd"},
{rule: "xmllint-minimal", input: "baz.xml"},
} {
t.Run(tc.schemaType, func(t *testing.T) {
rule := ctx.ModuleForTests(tc.input, "android_arm64_armv8-a").Rule(tc.rule)
assertEqual(t, "input", tc.input, rule.Input.String())
if tc.schemaType != "" {
assertEqual(t, "schema", tc.schema, rule.Args[tc.schemaType])
}
})
}
m := ctx.ModuleForTests("foo.xml", "android_arm64_armv8-a").Module().(*prebuiltEtcXml)
assertEqual(t, "installDir", buildDir+"/target/product/test_device/system/etc", m.InstallDirPath().String())
}
|
package api
import (
"github.com/blang/semver/v4"
v "github.com/go-playground/validator/v10"
)
var validate *v.Validate
func init() {
validate = v.New()
validate.SetTagName("binding")
validate.RegisterValidation("version", func(fl v.FieldLevel) bool {
version, ok := fl.Field().Interface().(string)
if ok {
if _, err := semver.Parse(version); err == nil {
return true
}
return false
}
return false
})
}
|
package main
import (
"fmt"
"github.com/kavenegar/kavenegar-go"
)
func main() {
api := kavenegar.New(" your apikey ")
messageid := []string{"", ""}
if res, err := api.Message.Select(messageid); err != nil {
switch err := err.(type) {
case *kavenegar.APIError:
fmt.Println(err.Error())
case *kavenegar.HTTPError:
fmt.Println(err.Error())
default:
fmt.Println(err.Error())
}
} else {
for _, r := range res {
fmt.Println("MessageID = ", r.MessageID)
fmt.Println("Status = ", r.Status)
//...
}
}
}
|
package vpx
import (
"fmt"
"image"
ourcodec "github.com/trevor403/gostream/codec"
"github.com/edaniels/golog"
"github.com/trevor403/mediadevices/pkg/codec"
"github.com/trevor403/mediadevices/pkg/codec/vpx"
"github.com/trevor403/mediadevices/pkg/prop"
)
type encoder struct {
codec codec.ReadCloser
img image.Image
logger golog.Logger
}
// Version determines the version of a vpx codec.
type Version string
// The set of allowed vpx versions.
const (
Version8 Version = "vp8"
Version9 Version = "vp9"
)
// Gives suitable results. Probably want to make this configurable this in the future.
const bitrate = 3_600_000
// NewEncoder returns a vpx encoder of the given type that can encode images of the given width and height. It will
// also ensure that it produces key frames at the given interval.
func NewEncoder(codecVersion Version, width, height, keyFrameInterval int, logger golog.Logger) (ourcodec.Encoder, error) {
enc := &encoder{logger: logger}
var builder codec.VideoEncoderBuilder
switch codecVersion {
case Version8:
params, err := vpx.NewVP8Params()
if err != nil {
return nil, err
}
builder = ¶ms
params.BitRate = bitrate
params.KeyFrameInterval = keyFrameInterval
case Version9:
params, err := vpx.NewVP9Params()
if err != nil {
return nil, err
}
builder = ¶ms
params.BitRate = bitrate
params.KeyFrameInterval = keyFrameInterval
default:
return nil, fmt.Errorf("unsupported vpx version: %s", codecVersion)
}
codec, err := builder.BuildVideoEncoder(enc, prop.Media{
Video: prop.Video{
Width: width,
Height: height,
},
})
if err != nil {
return nil, err
}
enc.codec = codec
return enc, nil
}
// Read returns an image for codec to process.
func (v *encoder) Read() (img image.Image, release func(), err error) {
return v.img, nil, nil
}
// Encode asks the codec to process the given image.
func (v *encoder) Encode(img image.Image) ([]byte, error) {
v.img = img
data, release, err := v.codec.Read()
dataCopy := make([]byte, len(data))
copy(dataCopy, data)
release()
return dataCopy, err
}
|
// Copyright 2022 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
betapb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/healthcare/beta/healthcare_beta_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/healthcare/beta"
)
// FhirStoreServer implements the gRPC interface for FhirStore.
type FhirStoreServer struct{}
// ProtoToFhirStoreVersionEnum converts a FhirStoreVersionEnum enum from its proto representation.
func ProtoToHealthcareBetaFhirStoreVersionEnum(e betapb.HealthcareBetaFhirStoreVersionEnum) *beta.FhirStoreVersionEnum {
if e == 0 {
return nil
}
if n, ok := betapb.HealthcareBetaFhirStoreVersionEnum_name[int32(e)]; ok {
e := beta.FhirStoreVersionEnum(n[len("HealthcareBetaFhirStoreVersionEnum"):])
return &e
}
return nil
}
// ProtoToFhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaTypeEnum converts a FhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaTypeEnum enum from its proto representation.
func ProtoToHealthcareBetaFhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaTypeEnum(e betapb.HealthcareBetaFhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaTypeEnum) *beta.FhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaTypeEnum {
if e == 0 {
return nil
}
if n, ok := betapb.HealthcareBetaFhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaTypeEnum_name[int32(e)]; ok {
e := beta.FhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaTypeEnum(n[len("HealthcareBetaFhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaTypeEnum"):])
return &e
}
return nil
}
// ProtoToFhirStoreComplexDataTypeReferenceParsingEnum converts a FhirStoreComplexDataTypeReferenceParsingEnum enum from its proto representation.
func ProtoToHealthcareBetaFhirStoreComplexDataTypeReferenceParsingEnum(e betapb.HealthcareBetaFhirStoreComplexDataTypeReferenceParsingEnum) *beta.FhirStoreComplexDataTypeReferenceParsingEnum {
if e == 0 {
return nil
}
if n, ok := betapb.HealthcareBetaFhirStoreComplexDataTypeReferenceParsingEnum_name[int32(e)]; ok {
e := beta.FhirStoreComplexDataTypeReferenceParsingEnum(n[len("HealthcareBetaFhirStoreComplexDataTypeReferenceParsingEnum"):])
return &e
}
return nil
}
// ProtoToFhirStoreNotificationConfig converts a FhirStoreNotificationConfig object from its proto representation.
func ProtoToHealthcareBetaFhirStoreNotificationConfig(p *betapb.HealthcareBetaFhirStoreNotificationConfig) *beta.FhirStoreNotificationConfig {
if p == nil {
return nil
}
obj := &beta.FhirStoreNotificationConfig{
PubsubTopic: dcl.StringOrNil(p.GetPubsubTopic()),
}
return obj
}
// ProtoToFhirStoreStreamConfigs converts a FhirStoreStreamConfigs object from its proto representation.
func ProtoToHealthcareBetaFhirStoreStreamConfigs(p *betapb.HealthcareBetaFhirStoreStreamConfigs) *beta.FhirStoreStreamConfigs {
if p == nil {
return nil
}
obj := &beta.FhirStoreStreamConfigs{
BigqueryDestination: ProtoToHealthcareBetaFhirStoreStreamConfigsBigqueryDestination(p.GetBigqueryDestination()),
}
for _, r := range p.GetResourceTypes() {
obj.ResourceTypes = append(obj.ResourceTypes, r)
}
return obj
}
// ProtoToFhirStoreStreamConfigsBigqueryDestination converts a FhirStoreStreamConfigsBigqueryDestination object from its proto representation.
func ProtoToHealthcareBetaFhirStoreStreamConfigsBigqueryDestination(p *betapb.HealthcareBetaFhirStoreStreamConfigsBigqueryDestination) *beta.FhirStoreStreamConfigsBigqueryDestination {
if p == nil {
return nil
}
obj := &beta.FhirStoreStreamConfigsBigqueryDestination{
DatasetUri: dcl.StringOrNil(p.GetDatasetUri()),
SchemaConfig: ProtoToHealthcareBetaFhirStoreStreamConfigsBigqueryDestinationSchemaConfig(p.GetSchemaConfig()),
}
return obj
}
// ProtoToFhirStoreStreamConfigsBigqueryDestinationSchemaConfig converts a FhirStoreStreamConfigsBigqueryDestinationSchemaConfig object from its proto representation.
func ProtoToHealthcareBetaFhirStoreStreamConfigsBigqueryDestinationSchemaConfig(p *betapb.HealthcareBetaFhirStoreStreamConfigsBigqueryDestinationSchemaConfig) *beta.FhirStoreStreamConfigsBigqueryDestinationSchemaConfig {
if p == nil {
return nil
}
obj := &beta.FhirStoreStreamConfigsBigqueryDestinationSchemaConfig{
SchemaType: ProtoToHealthcareBetaFhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaTypeEnum(p.GetSchemaType()),
RecursiveStructureDepth: dcl.Int64OrNil(p.GetRecursiveStructureDepth()),
}
return obj
}
// ProtoToFhirStoreValidationConfig converts a FhirStoreValidationConfig object from its proto representation.
func ProtoToHealthcareBetaFhirStoreValidationConfig(p *betapb.HealthcareBetaFhirStoreValidationConfig) *beta.FhirStoreValidationConfig {
if p == nil {
return nil
}
obj := &beta.FhirStoreValidationConfig{
DisableProfileValidation: dcl.Bool(p.GetDisableProfileValidation()),
DisableRequiredFieldValidation: dcl.Bool(p.GetDisableRequiredFieldValidation()),
DisableReferenceTypeValidation: dcl.Bool(p.GetDisableReferenceTypeValidation()),
DisableFhirpathValidation: dcl.Bool(p.GetDisableFhirpathValidation()),
}
for _, r := range p.GetEnabledImplementationGuides() {
obj.EnabledImplementationGuides = append(obj.EnabledImplementationGuides, r)
}
return obj
}
// ProtoToFhirStore converts a FhirStore resource from its proto representation.
func ProtoToFhirStore(p *betapb.HealthcareBetaFhirStore) *beta.FhirStore {
obj := &beta.FhirStore{
Name: dcl.StringOrNil(p.GetName()),
EnableUpdateCreate: dcl.Bool(p.GetEnableUpdateCreate()),
NotificationConfig: ProtoToHealthcareBetaFhirStoreNotificationConfig(p.GetNotificationConfig()),
DisableReferentialIntegrity: dcl.Bool(p.GetDisableReferentialIntegrity()),
ShardNum: dcl.Int64OrNil(p.GetShardNum()),
DisableResourceVersioning: dcl.Bool(p.GetDisableResourceVersioning()),
Version: ProtoToHealthcareBetaFhirStoreVersionEnum(p.GetVersion()),
ValidationConfig: ProtoToHealthcareBetaFhirStoreValidationConfig(p.GetValidationConfig()),
DefaultSearchHandlingStrict: dcl.Bool(p.GetDefaultSearchHandlingStrict()),
ComplexDataTypeReferenceParsing: ProtoToHealthcareBetaFhirStoreComplexDataTypeReferenceParsingEnum(p.GetComplexDataTypeReferenceParsing()),
Project: dcl.StringOrNil(p.GetProject()),
Location: dcl.StringOrNil(p.GetLocation()),
Dataset: dcl.StringOrNil(p.GetDataset()),
}
for _, r := range p.GetStreamConfigs() {
obj.StreamConfigs = append(obj.StreamConfigs, *ProtoToHealthcareBetaFhirStoreStreamConfigs(r))
}
return obj
}
// FhirStoreVersionEnumToProto converts a FhirStoreVersionEnum enum to its proto representation.
func HealthcareBetaFhirStoreVersionEnumToProto(e *beta.FhirStoreVersionEnum) betapb.HealthcareBetaFhirStoreVersionEnum {
if e == nil {
return betapb.HealthcareBetaFhirStoreVersionEnum(0)
}
if v, ok := betapb.HealthcareBetaFhirStoreVersionEnum_value["FhirStoreVersionEnum"+string(*e)]; ok {
return betapb.HealthcareBetaFhirStoreVersionEnum(v)
}
return betapb.HealthcareBetaFhirStoreVersionEnum(0)
}
// FhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaTypeEnumToProto converts a FhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaTypeEnum enum to its proto representation.
func HealthcareBetaFhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaTypeEnumToProto(e *beta.FhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaTypeEnum) betapb.HealthcareBetaFhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaTypeEnum {
if e == nil {
return betapb.HealthcareBetaFhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaTypeEnum(0)
}
if v, ok := betapb.HealthcareBetaFhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaTypeEnum_value["FhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaTypeEnum"+string(*e)]; ok {
return betapb.HealthcareBetaFhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaTypeEnum(v)
}
return betapb.HealthcareBetaFhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaTypeEnum(0)
}
// FhirStoreComplexDataTypeReferenceParsingEnumToProto converts a FhirStoreComplexDataTypeReferenceParsingEnum enum to its proto representation.
func HealthcareBetaFhirStoreComplexDataTypeReferenceParsingEnumToProto(e *beta.FhirStoreComplexDataTypeReferenceParsingEnum) betapb.HealthcareBetaFhirStoreComplexDataTypeReferenceParsingEnum {
if e == nil {
return betapb.HealthcareBetaFhirStoreComplexDataTypeReferenceParsingEnum(0)
}
if v, ok := betapb.HealthcareBetaFhirStoreComplexDataTypeReferenceParsingEnum_value["FhirStoreComplexDataTypeReferenceParsingEnum"+string(*e)]; ok {
return betapb.HealthcareBetaFhirStoreComplexDataTypeReferenceParsingEnum(v)
}
return betapb.HealthcareBetaFhirStoreComplexDataTypeReferenceParsingEnum(0)
}
// FhirStoreNotificationConfigToProto converts a FhirStoreNotificationConfig object to its proto representation.
func HealthcareBetaFhirStoreNotificationConfigToProto(o *beta.FhirStoreNotificationConfig) *betapb.HealthcareBetaFhirStoreNotificationConfig {
if o == nil {
return nil
}
p := &betapb.HealthcareBetaFhirStoreNotificationConfig{}
p.SetPubsubTopic(dcl.ValueOrEmptyString(o.PubsubTopic))
return p
}
// FhirStoreStreamConfigsToProto converts a FhirStoreStreamConfigs object to its proto representation.
func HealthcareBetaFhirStoreStreamConfigsToProto(o *beta.FhirStoreStreamConfigs) *betapb.HealthcareBetaFhirStoreStreamConfigs {
if o == nil {
return nil
}
p := &betapb.HealthcareBetaFhirStoreStreamConfigs{}
p.SetBigqueryDestination(HealthcareBetaFhirStoreStreamConfigsBigqueryDestinationToProto(o.BigqueryDestination))
sResourceTypes := make([]string, len(o.ResourceTypes))
for i, r := range o.ResourceTypes {
sResourceTypes[i] = r
}
p.SetResourceTypes(sResourceTypes)
return p
}
// FhirStoreStreamConfigsBigqueryDestinationToProto converts a FhirStoreStreamConfigsBigqueryDestination object to its proto representation.
func HealthcareBetaFhirStoreStreamConfigsBigqueryDestinationToProto(o *beta.FhirStoreStreamConfigsBigqueryDestination) *betapb.HealthcareBetaFhirStoreStreamConfigsBigqueryDestination {
if o == nil {
return nil
}
p := &betapb.HealthcareBetaFhirStoreStreamConfigsBigqueryDestination{}
p.SetDatasetUri(dcl.ValueOrEmptyString(o.DatasetUri))
p.SetSchemaConfig(HealthcareBetaFhirStoreStreamConfigsBigqueryDestinationSchemaConfigToProto(o.SchemaConfig))
return p
}
// FhirStoreStreamConfigsBigqueryDestinationSchemaConfigToProto converts a FhirStoreStreamConfigsBigqueryDestinationSchemaConfig object to its proto representation.
func HealthcareBetaFhirStoreStreamConfigsBigqueryDestinationSchemaConfigToProto(o *beta.FhirStoreStreamConfigsBigqueryDestinationSchemaConfig) *betapb.HealthcareBetaFhirStoreStreamConfigsBigqueryDestinationSchemaConfig {
if o == nil {
return nil
}
p := &betapb.HealthcareBetaFhirStoreStreamConfigsBigqueryDestinationSchemaConfig{}
p.SetSchemaType(HealthcareBetaFhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaTypeEnumToProto(o.SchemaType))
p.SetRecursiveStructureDepth(dcl.ValueOrEmptyInt64(o.RecursiveStructureDepth))
return p
}
// FhirStoreValidationConfigToProto converts a FhirStoreValidationConfig object to its proto representation.
func HealthcareBetaFhirStoreValidationConfigToProto(o *beta.FhirStoreValidationConfig) *betapb.HealthcareBetaFhirStoreValidationConfig {
if o == nil {
return nil
}
p := &betapb.HealthcareBetaFhirStoreValidationConfig{}
p.SetDisableProfileValidation(dcl.ValueOrEmptyBool(o.DisableProfileValidation))
p.SetDisableRequiredFieldValidation(dcl.ValueOrEmptyBool(o.DisableRequiredFieldValidation))
p.SetDisableReferenceTypeValidation(dcl.ValueOrEmptyBool(o.DisableReferenceTypeValidation))
p.SetDisableFhirpathValidation(dcl.ValueOrEmptyBool(o.DisableFhirpathValidation))
sEnabledImplementationGuides := make([]string, len(o.EnabledImplementationGuides))
for i, r := range o.EnabledImplementationGuides {
sEnabledImplementationGuides[i] = r
}
p.SetEnabledImplementationGuides(sEnabledImplementationGuides)
return p
}
// FhirStoreToProto converts a FhirStore resource to its proto representation.
func FhirStoreToProto(resource *beta.FhirStore) *betapb.HealthcareBetaFhirStore {
p := &betapb.HealthcareBetaFhirStore{}
p.SetName(dcl.ValueOrEmptyString(resource.Name))
p.SetEnableUpdateCreate(dcl.ValueOrEmptyBool(resource.EnableUpdateCreate))
p.SetNotificationConfig(HealthcareBetaFhirStoreNotificationConfigToProto(resource.NotificationConfig))
p.SetDisableReferentialIntegrity(dcl.ValueOrEmptyBool(resource.DisableReferentialIntegrity))
p.SetShardNum(dcl.ValueOrEmptyInt64(resource.ShardNum))
p.SetDisableResourceVersioning(dcl.ValueOrEmptyBool(resource.DisableResourceVersioning))
p.SetVersion(HealthcareBetaFhirStoreVersionEnumToProto(resource.Version))
p.SetValidationConfig(HealthcareBetaFhirStoreValidationConfigToProto(resource.ValidationConfig))
p.SetDefaultSearchHandlingStrict(dcl.ValueOrEmptyBool(resource.DefaultSearchHandlingStrict))
p.SetComplexDataTypeReferenceParsing(HealthcareBetaFhirStoreComplexDataTypeReferenceParsingEnumToProto(resource.ComplexDataTypeReferenceParsing))
p.SetProject(dcl.ValueOrEmptyString(resource.Project))
p.SetLocation(dcl.ValueOrEmptyString(resource.Location))
p.SetDataset(dcl.ValueOrEmptyString(resource.Dataset))
mLabels := make(map[string]string, len(resource.Labels))
for k, r := range resource.Labels {
mLabels[k] = r
}
p.SetLabels(mLabels)
sStreamConfigs := make([]*betapb.HealthcareBetaFhirStoreStreamConfigs, len(resource.StreamConfigs))
for i, r := range resource.StreamConfigs {
sStreamConfigs[i] = HealthcareBetaFhirStoreStreamConfigsToProto(&r)
}
p.SetStreamConfigs(sStreamConfigs)
return p
}
// applyFhirStore handles the gRPC request by passing it to the underlying FhirStore Apply() method.
func (s *FhirStoreServer) applyFhirStore(ctx context.Context, c *beta.Client, request *betapb.ApplyHealthcareBetaFhirStoreRequest) (*betapb.HealthcareBetaFhirStore, error) {
p := ProtoToFhirStore(request.GetResource())
res, err := c.ApplyFhirStore(ctx, p)
if err != nil {
return nil, err
}
r := FhirStoreToProto(res)
return r, nil
}
// applyHealthcareBetaFhirStore handles the gRPC request by passing it to the underlying FhirStore Apply() method.
func (s *FhirStoreServer) ApplyHealthcareBetaFhirStore(ctx context.Context, request *betapb.ApplyHealthcareBetaFhirStoreRequest) (*betapb.HealthcareBetaFhirStore, error) {
cl, err := createConfigFhirStore(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyFhirStore(ctx, cl, request)
}
// DeleteFhirStore handles the gRPC request by passing it to the underlying FhirStore Delete() method.
func (s *FhirStoreServer) DeleteHealthcareBetaFhirStore(ctx context.Context, request *betapb.DeleteHealthcareBetaFhirStoreRequest) (*emptypb.Empty, error) {
cl, err := createConfigFhirStore(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteFhirStore(ctx, ProtoToFhirStore(request.GetResource()))
}
// ListHealthcareBetaFhirStore handles the gRPC request by passing it to the underlying FhirStoreList() method.
func (s *FhirStoreServer) ListHealthcareBetaFhirStore(ctx context.Context, request *betapb.ListHealthcareBetaFhirStoreRequest) (*betapb.ListHealthcareBetaFhirStoreResponse, error) {
cl, err := createConfigFhirStore(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
resources, err := cl.ListFhirStore(ctx, request.GetProject(), request.GetLocation(), request.GetDataset())
if err != nil {
return nil, err
}
var protos []*betapb.HealthcareBetaFhirStore
for _, r := range resources.Items {
rp := FhirStoreToProto(r)
protos = append(protos, rp)
}
p := &betapb.ListHealthcareBetaFhirStoreResponse{}
p.SetItems(protos)
return p, nil
}
func createConfigFhirStore(ctx context.Context, service_account_file string) (*beta.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return beta.NewClient(conf), nil
}
|
package main
import "fmt"
type Test interface {
Print()
//Write()
}
type Student struct {
name string
age int
score int
}
//go中,任何类型 只要实现了某个接口中的所有方法,就实现了该接口
func (p *Student) Print() {
fmt.Println("name:", p.name)
fmt.Println("age:", p.age)
fmt.Println("score:", p.score)
}
func main() {
var t Test
var student Student = Student{
name:"heylink",
age:16,
score:99,
}
t = &student
t.Print()
}
|
package powerdns
import "testing"
func TestBool(t *testing.T) {
source := true
if *Bool(source) != source {
t.Error("Invalid return value")
}
}
func TestBoolValue(t *testing.T) {
source := true
if BoolValue(&source) != source {
t.Error("Invalid return value")
}
if BoolValue(nil) != false {
t.Error("Unexpected return value")
}
}
func TestUint32(t *testing.T) {
source := uint32(1337)
if *Uint32(source) != source {
t.Error("Invalid return value")
}
}
func TestUint32Value(t *testing.T) {
source := uint32(1337)
if Uint32Value(&source) != source {
t.Error("Invalid return value")
}
if Uint32Value(nil) != 0 {
t.Error("Unexpected return value")
}
}
func TestUint64(t *testing.T) {
source := uint64(1337)
if *Uint64(source) != source {
t.Error("Invalid return value")
}
}
func TestUint64Value(t *testing.T) {
source := uint64(1337)
if Uint64Value(&source) != source {
t.Error("Invalid return value")
}
if Uint64Value(nil) != 0 {
t.Error("Unexpected return value")
}
}
func TestString(t *testing.T) {
source := "foo"
if *String(source) != source {
t.Error("Invalid return value")
}
}
func TestStringValue(t *testing.T) {
source := "foo"
if StringValue(&source) != source {
t.Error("Invalid return value")
}
if StringValue(nil) != "" {
t.Error("Unexpected return value")
}
}
|
package bitmap
// BitMap 位图
type BitMap struct {
bytes []byte
size int
}
// NewBitMap 新建位图
func NewBitMap(size int) *BitMap {
return &BitMap{bytes: make([]byte, size/32), size: size}
}
func (m *BitMap) set(k int) {
if k > m.size {
return
}
byteIndex := uint(k / 8)
bitIndex := uint(k % 8)
m.bytes[byteIndex] = byte(uint(m.bytes[byteIndex]) | uint(1<<bitIndex))
}
func (m *BitMap) get(k int) bool {
if k > m.size {
return false
}
byteIndex := uint(k / 8)
bitIndex := uint(k % 8)
return (uint(m.bytes[byteIndex]) & uint(1<<bitIndex)) != 0
}
|
package main
import (
"github.com/garyburd/redigo/redis"
"fmt"
)
var pool *redis.Pool
func init() {
pool = &redis.Pool{
MaxIdle:16,
MaxActive:1024,
IdleTimeout:300,
Dial: func() (redis.Conn, error) {
return redis.Dial("tcp", "localhost:6379")
},
}
}
func main() {
connect := pool.Get()
defer connect.Close()
_, err := connect.Do("Set", "abc", 100)
if err != nil {
fmt.Println("set err:", err)
return
}
r, err := redis.Int(connect.Do("Get", "abc"))
if err != nil {
fmt.Println("get abc err:", err)
return
}
fmt.Println("result:", r)
pool.Close()
}
|
package circuit
type Gate interface {
AddConnection(Connection)
Evaluate() Signal
}
type SignalGate struct {
Connections []Connection
Signal Signal
}
func (gate *SignalGate) AddConnection(connection Connection) {
gate.Connections = append(gate.Connections, connection)
}
func (gate SignalGate) Evaluate() Signal {
return gate.Signal
}
func NewSignalGate(signal Signal) SignalGate {
return SignalGate{[]Connection{}, signal}
}
type ORGate struct {
Connections [2]Connection
ConnectionCount int
}
func (gate *ORGate) AddConnection(connection Connection) {
if gate.ConnectionCount < 2 {
gate.Connections[gate.ConnectionCount] = connection
gate.ConnectionCount++
}
}
func (gate ORGate) Evaluate() Signal {
resultSignal := LOW
for i := 0; i < len(gate.Connections); i++ {
connection := gate.Connections[i]
signal := connection.Evaluate()
if signal == HI {
resultSignal = HI
}
}
return resultSignal
}
func NewORGate() ORGate {
return ORGate{[2]Connection{}, 0}
}
type ANDGate struct {
Connections [2]Connection
ConnectionCount int
}
func (gate *ANDGate) AddConnection(connection Connection) {
if gate.ConnectionCount < 2 {
gate.Connections[gate.ConnectionCount] = connection
gate.ConnectionCount++
}
}
func (gate ANDGate) Evaluate() Signal {
resultSignal := HI
for i := 0; i < len(gate.Connections); i++ {
connection := gate.Connections[i]
signal := connection.Evaluate()
if signal == LOW {
resultSignal = LOW
}
}
return resultSignal
}
func NewANDGate() ANDGate {
return ANDGate{[2]Connection{}, 0}
}
type NOTGate struct {
Connection Connection
}
func (gate *NOTGate) AddConnection(connection Connection) {
gate.Connection = connection
}
func (gate NOTGate) Evaluate() Signal {
return FlipSignal(gate.Connection.Evaluate())
}
func NewNOTGate() NOTGate {
return NOTGate{Connection{}}
}
|
package main
import (
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface"
"github.com/aws/aws-sdk-go/aws/session"
"fmt"
"github.com/aws/aws-sdk-go/service/dynamodb"
"encoding/json"
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
)
type DynamoDb struct {
Configuration *Configuration
ApiClient dynamodbiface.DynamoDBAPI
}
func NewDynamo(config *Configuration) *DynamoDb {
log.Infof("Starting dynamodb writer with configuration", config)
sess, err := session.NewSession()
if err != nil {
fmt.Println("failed to create session,", err)
panic(err)
}
client := &DynamoDb{
Configuration:config,
ApiClient:dynamodb.New(sess),
}
return client
}
func (dynamo *DynamoDb) pipeThrough(inputChan chan *WriteEntry, output chan *WriteEntry) {
go func() {
for {
entry := <-inputChan
go func() {
log.Debugf("Writing json entry to dynamo.", entry)
var entryJson interface{}
err := json.Unmarshal([]byte(entry.Json), &entryJson)
if (err != nil) {
log.Errorf("failed to parse json,", err)
return
}
parsedJson := entryJson.(map[string]interface{})
att, err := dynamodbattribute.MarshalMap(parsedJson)
if (err != nil) {
log.Errorf("failed to marshal json,", err)
return
}
returnValue := dynamodb.ReturnValueNone
_, err = dynamo.ApiClient.PutItem(&dynamodb.PutItemInput{
TableName:&dynamo.Configuration.TableName,
Item:att,
ReturnValues:&returnValue,
})
if (err != nil) {
log.Errorf("failed to put item to dynamodb,", err)
} else {
output <- entry
}
}()
}
}()
}
|
/*
Your job is to figure out why Daniel likes Wendy, and some other girls. If you look at the Tests tab you'll notice that Daniel doesn't like many girls.
Create a function that returns whether he likes her true, or not false.
Examples
danielLikes("Imani") ➞ false
danielLikes("Margo") ➞ true
danielLikes("Sandra") ➞ false
Notes
Daniel likes his own name.
If you figure it out, prepare to say aha!
You get 500 girl names, enough to try different approaches.
Looking at the resources tab will give you a big hint.
Hint
String.prototype.charCodeAt()developer.mozilla.org
Returns an integer between 0 and 65535 representing the UTF-16 code unit at the given index.
*/
package main
import "strings"
func main() {
for _, g := range girls {
assert(likes(g.name) == g.like)
}
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func likes(name string) bool {
name = strings.ToLower(name)
for _, g := range girls {
if strings.ToLower(g.name) == name && g.like {
return true
}
}
return false
}
var girls = []struct {
name string
like bool
}{
{"Katelyn", false},
{"Maia", false},
{"Celine", true},
{"Cameron", false},
{"Renata", false},
{"Jayleen", false},
{"Charli", true},
{"Emmalyn", false},
{"Holly", true},
{"Azalea", true},
{"Leona", false},
{"Alejandra", false},
{"Bristol", false},
{"Collins", false},
{"Imani", false},
{"Meadow", false},
{"Alexia", true},
{"Edith", false},
{"Kaydence", false},
{"Leslie", false},
{"Lilith", false},
{"Kora", false},
{"Aisha", false},
{"Meredith", false},
{"Danna", false},
{"Wynter", false},
{"Emberly", false},
{"Julieta", false},
{"Michaela", false},
{"Alayah", true},
{"Jemma", false},
{"Reign", true},
{"Colette", false},
{"Kaliyah", false},
{"Elliott", false},
{"Johanna", false},
{"Remy", false},
{"Sutton", false},
{"Emmy", false},
{"Virginia", false},
{"Briana", true},
{"Oaklynn", false},
{"Adelina", false},
{"Everlee", false},
{"Megan", false},
{"Angelica", false},
{"Justice", false},
{"Mariam", true},
{"Khaleesi", false},
{"Macie", false},
{"Karsyn", false},
{"Alanna", true},
{"Aleah", false},
{"Mae", false},
{"Mallory", false},
{"Esme", false},
{"Skyla", true},
{"Madilynn", false},
{"Charley", false},
{"Allyson", false},
{"Hanna", false},
{"Shiloh", false},
{"Henley", false},
{"Macy", false},
{"Maryam", false},
{"Ivanna", false},
{"Ashlynn", false},
{"Lorelai", false},
{"Amora", false},
{"Ashlyn", false},
{"Sasha", false},
{"Baylee", true},
{"Beatrice", false},
{"Itzel", true},
{"Priscilla", false},
{"Marie", false},
{"Jayda", false},
{"Liberty", false},
{"Rory", false},
{"Alessia", false},
{"Alaia", false},
{"Janelle", false},
{"Kalani", true},
{"Gloria", false},
{"Sloan", true},
{"Dorothy", false},
{"Greta", false},
{"Julie", true},
{"Zahra", true},
{"Savanna", false},
{"Annabella", false},
{"Poppy", true},
{"Amalia", true},
{"Zaylee", false},
{"Cecelia", false},
{"Coraline", false},
{"Kimber", false},
{"Emmie", false},
{"Anne", false},
{"Karina", true},
{"Kassidy", false},
{"Kynlee", false},
{"Monroe", false},
{"Anahi", false},
{"Jaliyah", false},
{"Jazmin", false},
{"Maren", false},
{"Monica", true},
{"Siena", false},
{"Marilyn", false},
{"Reyna", true},
{"Kyra", false},
{"Lilian", false},
{"Jamie", false},
{"Melany", false},
{"Alaya", false},
{"Ariya", true},
{"Kelly", true},
{"Rosie", true},
{"Adley", false},
{"Dream", false},
{"Jaylah", false},
{"Laurel", false},
{"Jazmine", false},
{"Mina", false},
{"Karla", false},
{"Bailee", true},
{"Aubrie", false},
{"Katalina", false},
{"Melina", true},
{"Harlee", true},
{"Elliot", false},
{"Hayley", false},
{"Elaine", true},
{"Karen", false},
{"Dallas", true},
{"Irene", false},
{"Lylah", true},
{"Ivory", true},
{"Chaya", false},
{"Rosa", false},
{"Aleena", true},
{"Braelyn", false},
{"Nola", false},
{"Alma", false},
{"Leyla", true},
{"Pearl", true},
{"Addyson", false},
{"Roselyn", false},
{"Lacey", false},
{"Lennox", false},
{"Reina", false},
{"Aurelia", false},
{"Noa", false},
{"Janiyah", false},
{"Jessie", false},
{"Madisyn", false},
{"Saige", false},
{"Alia", false},
{"Tiana", false},
{"Astrid", false},
{"Cassandra", false},
{"Kyleigh", false},
{"Romina", false},
{"Stevie", false},
{"Haylee", false},
{"Zelda", false},
{"Lillie", false},
{"Aileen", true},
{"Brylee", false},
{"Eileen", true},
{"Yara", false},
{"Ensley", false},
{"Lauryn", false},
{"Giuliana", false},
{"Livia", true},
{"Anya", false},
{"Mikaela", false},
{"Palmer", false},
{"Lyra", false},
{"Mara", false},
{"Marina", false},
{"Kailey", false},
{"Liv", false},
{"Clementine", false},
{"Kenna", false},
{"Briar", false},
{"Emerie", true},
{"Galilea", false},
{"Tiffany", false},
{"Bonnie", false},
{"Elyse", true},
{"Cynthia", false},
{"Frida", false},
{"Kinslee", false},
{"Tatiana", false},
{"Joelle", false},
{"Armani", false},
{"Jolie", false},
{"Nalani", true},
{"Rayna", true},
{"Yareli", false},
{"Meghan", true},
{"Rebekah", false},
{"Addilynn", false},
{"Faye", false},
{"Zariyah", false},
{"Lea", false},
{"Aliza", false},
{"Julissa", false},
{"Lilyana", false},
{"Anika", false},
{"Kairi", false},
{"Aniya", false},
{"Noemi", true},
{"Angie", false},
{"Crystal", false},
{"Bridget", false},
{"Ari", false},
{"Davina", true},
{"Amelie", true},
{"Amirah", true},
{"Annika", true},
{"Elora", false},
{"Xiomara", false},
{"Linda", false},
{"Hana", false},
{"Laney", true},
{"Mercy", true},
{"Hadassah", false},
{"Madalyn", false},
{"Louisa", false},
{"Simone", false},
{"Kori", false},
{"Jillian", false},
{"Alena", false},
{"Malaya", true},
{"Miley", true},
{"Milan", false},
{"Sariyah", false},
{"Malani", true},
{"Clarissa", false},
{"Nala", false},
{"Princess", false},
{"Amani", false},
{"Analia", true},
{"Estella", false},
{"Milana", true},
{"Aya", false},
{"Chana", false},
{"Jayde", false},
{"Tenley", false},
{"Zaria", true},
{"Itzayana", false},
{"Penny", true},
{"Ailani", true},
{"Lara", false},
{"Aubriella", false},
{"Clare", false},
{"Lina", false},
{"Rhea", false},
{"Bria", false},
{"Thalia", true},
{"Keyla", true},
{"Haisley", false},
{"Ryann", true},
{"Addisyn", false},
{"Amaia", false},
{"Chanel", true},
{"Ellen", false},
{"Harmoni", false},
{"Aliana", true},
{"Tinsley", false},
{"Landry", false},
{"Paisleigh", false},
{"Lexie", true},
{"Myah", false},
{"Rylan", true},
{"Deborah", false},
{"Emilee", true},
{"Laylah", false},
{"Novalee", false},
{"Ellis", true},
{"Emmeline", false},
{"Avalynn", false},
{"Hadlee", true},
{"Legacy", true},
{"Braylee", false},
{"Elisabeth", false},
{"Kaylie", false},
{"Ansley", false},
{"Dior", false},
{"Paula", false},
{"Belen", false},
{"Corinne", false},
{"Maleah", true},
{"Martha", false},
{"Teresa", false},
{"Salma", false},
{"Louise", false},
{"Averi", true},
{"Lilianna", false},
{"Amiya", false},
{"Milena", true},
{"Royal", true},
{"Aubrielle", false},
{"Calliope", false},
{"Frankie", false},
{"Natasha", false},
{"Kamilah", false},
{"Meilani", false},
{"Raina", false},
{"Amayah", true},
{"Lailah", true},
{"Rayne", true},
{"Zaniyah", false},
{"Isabela", false},
{"Nathalie", false},
{"Miah", false},
{"Opal", false},
{"Kenia", false},
{"Azariah", false},
{"Hunter", false},
{"Tori", false},
{"Andi", false},
{"Keily", true},
{"Leanna", true},
{"Scarlette", false},
{"Jaelyn", false},
{"Saoirse", false},
{"Selene", false},
{"Dalary", false},
{"Lindsey", false},
{"Marianna", false},
{"Ramona", false},
{"Estelle", false},
{"Giovanna", false},
{"Holland", false},
{"Nancy", true},
{"Emmalynn", false},
{"Mylah", true},
{"Rosalee", false},
{"Sariah", false},
{"Zoie", false},
{"Blaire", true},
{"Lyanna", false},
{"Maxine", false},
{"Anais", false},
{"Dana", false},
{"Judith", false},
{"Kiera", false},
{"Jaelynn", false},
{"Noor", false},
{"Kai", false},
{"Adalee", true},
{"Oaklee", true},
{"Amaris", false},
{"Jaycee", true},
{"Belle", false},
{"Carolyn", false},
{"Della", false},
{"Karter", false},
{"Sky", false},
{"Treasure", false},
{"Vienna", false},
{"Jewel", true},
{"Rivka", true},
{"Rosalyn", false},
{"Alannah", false},
{"Ellianna", false},
{"Sunny", true},
{"Claudia", false},
{"Cara", false},
{"Hailee", true},
{"Estrella", false},
{"Harleigh", false},
{"Zhavia", false},
{"Alianna", false},
{"Brittany", false},
{"Jaylene", false},
{"Journi", false},
{"Marissa", false},
{"Mavis", true},
{"Iliana", true},
{"Jurnee", false},
{"Aislinn", false},
{"Alyson", false},
{"Elsa", false},
{"Kamiyah", false},
{"Kiana", false},
{"Lisa", false},
{"Arlette", false},
{"Kadence", false},
{"Kathleen", false},
{"Halle", false},
{"Erika", false},
{"Sylvie", false},
{"Adele", false},
{"Erica", false},
{"Veda", false},
{"Whitney", false},
{"Bexley", false},
{"Emmaline", false},
{"Guadalupe", false},
{"August", false},
{"Brynleigh", false},
{"Gwen", false},
{"Promise", false},
{"Alisson", false},
{"India", false},
{"Madalynn", false},
{"Paloma", false},
{"Patricia", false},
{"Samira", false},
{"Aliya", false},
{"Casey", true},
{"Jazlynn", false},
{"Paulina", false},
{"Dulce", false},
{"Kallie", true},
{"Perla", true},
{"Adrienne", false},
{"Alora", false},
{"Nataly", false},
{"Ayleen", false},
{"Christine", false},
{"Kaiya", false},
{"Ariadne", false},
{"Karlee", true},
{"Barbara", false},
{"Lillianna", false},
{"Raquel", false},
{"Saniyah", false},
{"Yamileth", false},
{"Arely", true},
{"Celia", false},
{"Heavenly", false},
{"Kaylin", false},
{"Marisol", false},
{"Marleigh", false},
{"Avalyn", false},
{"Berkley", false},
{"Kataleya", false},
{"Zainab", true},
{"Dani", false},
{"Egypt", true},
{"Joyce", true},
{"Kenley", false},
{"Annabel", false},
{"Kaelyn", false},
{"Etta", false},
{"Hadleigh", false},
{"Joselyn", false},
{"Luella", false},
{"Jaylee", false},
{"Zola", false},
{"Alisha", true},
{"Ezra", false},
{"Queen", true},
{"Amia", false},
{"Annalee", false},
{"Bellamy", false},
{"Paola", false},
{"Tinley", false},
{"Violeta", false},
{"Jenesis", false},
{"Arden", false},
{"Giana", false},
{"Wendy", true},
{"Ellison", false},
{"Florence", false},
{"Margo", true},
{"Naya", false},
{"Robin", true},
{"Sandra", false},
{"Scout", true},
{"Waverly", false},
{"Janessa", false},
{"Jayden", false},
{"Micah", false},
{"Novah", true},
{"Zora", false},
{"Ann", false},
{"Jana", false},
{"Taliyah", false},
{"Vada", false},
{"Giavanna", false},
{"Ingrid", false},
{"Valery", false},
{"Azaria", false},
{"Emmarie", false},
{"Esperanza", false},
{"Kailyn", false},
{"Aiyana", true},
{"Keilani", false},
{"Austyn", false},
{"Whitley", false},
{"Elina", false},
{"Kimora", false},
{"Maliah", true},
}
|
/*
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package consumer
import (
"path/filepath"
"testing"
"github.com/apache/rocketmq-client-go/v2/internal"
"github.com/apache/rocketmq-client-go/v2/internal/remote"
"github.com/apache/rocketmq-client-go/v2/primitive"
"github.com/golang/mock/gomock"
. "github.com/smartystreets/goconvey/convey"
)
func TestNewLocalFileOffsetStore(t *testing.T) {
Convey("Given some test cases", t, func() {
type testCase struct {
clientId string
group string
expectedResult *localFileOffsetStore
}
cases := []testCase{
{
clientId: "",
group: "testGroup",
expectedResult: &localFileOffsetStore{
group: "testGroup",
path: filepath.Join(_LocalOffsetStorePath, "/testGroup/offset.json"),
},
}, {
clientId: "192.168.24.1@default",
group: "",
expectedResult: &localFileOffsetStore{
group: "",
path: filepath.Join(_LocalOffsetStorePath, "/192.168.24.1@default/offset.json"),
},
}, {
clientId: "192.168.24.1@default",
group: "testGroup",
expectedResult: &localFileOffsetStore{
group: "testGroup",
path: filepath.Join(_LocalOffsetStorePath, "/192.168.24.1@default/testGroup/offset.json"),
},
},
}
for _, value := range cases {
result := NewLocalFileOffsetStore(value.clientId, value.group).(*localFileOffsetStore)
value.expectedResult.OffsetTable = result.OffsetTable
So(result, ShouldResemble, value.expectedResult)
}
})
}
func TestLocalFileOffsetStore(t *testing.T) {
Convey("Given a local store with a starting value", t, func() {
localStore := NewLocalFileOffsetStore("192.168.24.1@default", "testGroup")
type offsetCase struct {
queue *primitive.MessageQueue
setOffset int64
expectedOffset int64
}
mq := &primitive.MessageQueue{
Topic: "testTopic",
BrokerName: "default",
QueueId: 1,
}
Convey("test update", func() {
Convey("when increaseOnly is false", func() {
cases := []offsetCase{
{
queue: mq,
setOffset: 3,
expectedOffset: 3,
}, {
queue: mq,
setOffset: 1,
expectedOffset: 1,
},
}
for _, value := range cases {
localStore.update(value.queue, value.setOffset, false)
offset, _ := localStore.readWithException(value.queue, _ReadFromMemory)
So(offset, ShouldEqual, value.expectedOffset)
}
})
Convey("when increaseOnly is true", func() {
localStore.update(mq, 0, false)
cases := []offsetCase{
{
queue: mq,
setOffset: 3,
expectedOffset: 3,
}, {
queue: mq,
setOffset: 1,
expectedOffset: 3,
},
}
for _, value := range cases {
localStore.update(value.queue, value.setOffset, true)
offset, _ := localStore.readWithException(value.queue, _ReadFromMemory)
So(offset, ShouldEqual, value.expectedOffset)
}
})
})
Convey("test persist", func() {
localStore.update(mq, 1, false)
offset, _ := localStore.readWithException(mq, _ReadFromMemory)
So(offset, ShouldEqual, 1)
queues := []*primitive.MessageQueue{mq}
localStore.persist(queues)
offset, _ = localStore.readWithException(mq, _ReadFromStore)
So(offset, ShouldEqual, 1)
localStore.(*localFileOffsetStore).OffsetTable.Delete(MessageQueueKey(*mq))
offset, _ = localStore.readWithException(mq, _ReadMemoryThenStore)
So(offset, ShouldEqual, 1)
})
})
}
func TestRemoteBrokerOffsetStore(t *testing.T) {
Convey("Given a remote store with a starting value", t, func() {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
namesrv := internal.NewMockNamesrvs(ctrl)
rmqClient := internal.NewMockRMQClient(ctrl)
remoteStore := NewRemoteOffsetStore("testGroup", rmqClient, namesrv)
type offsetCase struct {
queue *primitive.MessageQueue
setOffset int64
expectedOffset int64
}
mq := &primitive.MessageQueue{
Topic: "testTopic",
BrokerName: "default",
QueueId: 1,
}
Convey("test update", func() {
Convey("when increaseOnly is false", func() {
cases := []offsetCase{
{
queue: mq,
setOffset: 3,
expectedOffset: 3,
}, {
queue: mq,
setOffset: 1,
expectedOffset: 1,
},
}
for _, value := range cases {
remoteStore.update(value.queue, value.setOffset, false)
offset, _ := remoteStore.readWithException(value.queue, _ReadFromMemory)
So(offset, ShouldEqual, value.expectedOffset)
}
})
Convey("when increaseOnly is true", func() {
remoteStore.update(mq, 0, false)
cases := []offsetCase{
{
queue: mq,
setOffset: 3,
expectedOffset: 3,
}, {
queue: mq,
setOffset: 1,
expectedOffset: 3,
},
}
for _, value := range cases {
remoteStore.update(value.queue, value.setOffset, true)
offset, _ := remoteStore.readWithException(value.queue, _ReadFromMemory)
So(offset, ShouldEqual, value.expectedOffset)
}
})
})
Convey("test persist", func() {
queues := []*primitive.MessageQueue{mq}
namesrv.EXPECT().FindBrokerAddrByName(gomock.Any()).Return("192.168.24.1:10911").MaxTimes(2)
ret := &remote.RemotingCommand{
Code: internal.ResSuccess,
ExtFields: map[string]string{
"offset": "1",
},
}
rmqClient.EXPECT().InvokeSync(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(ret, nil).MaxTimes(2)
remoteStore.persist(queues)
offset, _ := remoteStore.readWithException(mq, _ReadFromStore)
So(offset, ShouldEqual, 1)
remoteStore.remove(mq)
offset, _ = remoteStore.readWithException(mq, _ReadFromMemory)
So(offset, ShouldEqual, -1)
offset, _ = remoteStore.readWithException(mq, _ReadMemoryThenStore)
So(offset, ShouldEqual, 1)
})
Convey("test remove", func() {
remoteStore.update(mq, 1, false)
offset, _ := remoteStore.readWithException(mq, _ReadFromMemory)
So(offset, ShouldEqual, 1)
remoteStore.remove(mq)
offset, _ = remoteStore.readWithException(mq, _ReadFromMemory)
So(offset, ShouldEqual, -1)
})
})
}
|
/*
* Tencent is pleased to support the open source community by making Blueking Container Service available.
* Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under,
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package common
import (
bcscommon "github.com/Tencent/bk-bcs/bcs-common/common"
)
// ResourceType resource type
type ResourceType string
// String xxx
func (rt ResourceType) String() string {
return string(rt)
}
var (
// Cluster type
Cluster ResourceType = "cluster"
// AutoScalingOption type
AutoScalingOption ResourceType = "autoscalingoption"
// Cloud type
Cloud ResourceType = "cloud"
// CloudVPC type
CloudVPC ResourceType = "cloudvpc"
// ClusterCredential type
ClusterCredential ResourceType = "clustercredential"
// NameSpace type
NameSpace ResourceType = "namespace"
// NameSpaceQuota type
NameSpaceQuota ResourceType = "namespacequota"
// NodeGroup type
NodeGroup ResourceType = "nodegroup"
// Project type
Project ResourceType = "project"
// Task type
Task ResourceType = "task"
)
// NodeType node type
type NodeType string
// String xxx
func (nt NodeType) String() string {
return string(nt)
}
var (
// CVM cloud instance
CVM NodeType = "CVM"
// IDC instance
IDC NodeType = "IDC"
)
// NodeGroupType group type
type NodeGroupType string
// String xxx
func (nt NodeGroupType) String() string {
return string(nt)
}
// NodeGroupTypeMap nodePool type
var NodeGroupTypeMap = map[NodeGroupType]struct{}{
Normal: {},
External: {},
}
var (
// Normal 普通云实例节点池
Normal NodeGroupType = "normal"
// External 第三方节点池
External NodeGroupType = "external"
)
const (
// MasterRole label
MasterRole = "node-role.kubernetes.io/master"
)
const (
// KubeAPIServer cluster apiserver key
KubeAPIServer = "KubeAPIServer"
// KubeController cluster controller key
KubeController = "KubeController"
// KubeScheduler cluster scheduler key
KubeScheduler = "KubeScheduler"
// Etcd cluster etcd key
Etcd = "Etcd"
// Kubelet cluster kubelet key
Kubelet = "kubelet"
// RootDir kubelet root-dir para
RootDir = "root-dir"
// RootDirValue kubelet root-dir value
RootDirValue = "/data/bcs/service/kubelet"
)
// DefaultClusterConfig cluster default service config
var DefaultClusterConfig = map[string]string{
Etcd: "node-data-dir=/data/bcs/lib/etcd;",
}
// DefaultNodeConfig default node config
var DefaultNodeConfig = map[string]string{
Kubelet: "root-dir=/data/bcs/service/kubelet;",
}
var (
// DefaultDockerRuntime xxx
DefaultDockerRuntime = &RunTimeInfo{
Runtime: DockerContainerRuntime,
Version: DockerRuntimeVersion,
}
// DefaultContainerdRuntime xxx
DefaultContainerdRuntime = &RunTimeInfo{
Runtime: ContainerdRuntime,
Version: ContainerdRuntimeVersion,
}
)
// RunTimeInfo runtime
type RunTimeInfo struct {
Runtime string
Version string
}
// IsDockerRuntime docker
func IsDockerRuntime(runtime string) bool {
return runtime == DockerContainerRuntime
}
// IsContainerdRuntime containerd
func IsContainerdRuntime(runtime string) bool {
return runtime == ContainerdRuntime
}
const (
// InitClusterID initClusterID
InitClusterID = "BCS-K8S-00000"
// RuntimeFlag xxx
RuntimeFlag = "runtime"
// ShowSharedCluster flag show shared cluster
ShowSharedCluster = "showSharedCluster"
// VClusterNetworkKey xxx
VClusterNetworkKey = "vclusterNetwork"
// VClusterNamespaceInfo xxx
VClusterNamespaceInfo = "namespaceInfo"
// VclusterNetworkMode xxx
VclusterNetworkMode = "vclusterMode"
// ClusterManager xxx
ClusterManager = "bcs-cluster-manager"
// Biz business
Biz = "biz"
// BizSet business set
BizSet = "biz_set"
// Prod prod env
Prod = "prod"
// Debug debug env
Debug = "debug"
// ClusterAddNodesLimit cluster addNodes limit
ClusterAddNodesLimit = 100
// ClusterManagerServiceDomain domain name for service
ClusterManagerServiceDomain = "clustermanager.bkbcs.tencent.com"
// ResourceManagerServiceDomain domain name for service
ResourceManagerServiceDomain = "resourcemanager.bkbcs.tencent.com"
// ClusterOverlayNetwork overlay
ClusterOverlayNetwork = "overlay"
// ClusterUnderlayNetwork underlay
ClusterUnderlayNetwork = "underlay"
// KubeletRootDirPath root-dir default path
KubeletRootDirPath = "/data/bcs/service/kubelet"
// DockerGraphPath docker path
DockerGraphPath = "/data/bcs/service/docker"
// MountTarget default mount path
MountTarget = "/data"
// DefaultImageName default image name
DefaultImageName = "TencentOS Server 2.6 (TK4)"
// DockerContainerRuntime runtime
DockerContainerRuntime = "docker"
// DockerRuntimeVersion runtime version
DockerRuntimeVersion = "19.3"
// ContainerdRuntime runtime
ContainerdRuntime = "containerd"
// ContainerdRuntimeVersion runtime version
ContainerdRuntimeVersion = "1.4.3"
// ClusterEngineTypeMesos mesos cluster
ClusterEngineTypeMesos = "mesos"
// ClusterEngineTypeK8s k8s cluster
ClusterEngineTypeK8s = "k8s"
// ClusterTypeFederation federation cluster
ClusterTypeFederation = "federation"
// ClusterTypeSingle single cluster
ClusterTypeSingle = "single"
// ClusterTypeVirtual virtual cluster
ClusterTypeVirtual = "virtual"
// MicroMetaKeyHTTPPort http port in micro service meta
MicroMetaKeyHTTPPort = "httpport"
//ClusterManageTypeManaged cloud manage cluster
ClusterManageTypeManaged = "MANAGED_CLUSTER"
//ClusterManageTypeIndependent BCS manage cluster
ClusterManageTypeIndependent = "INDEPENDENT_CLUSTER"
// TkeCidrStatusAvailable available tke cidr status
TkeCidrStatusAvailable = "available"
// TkeCidrStatusUsed used tke cidr status
TkeCidrStatusUsed = "used"
// TkeCidrStatusReserved reserved tke cidr status
TkeCidrStatusReserved = "reserved"
//StatusInitialization node/cluster/nodegroup status
StatusInitialization = "INITIALIZATION"
//StatusCreateClusterFailed status create failed
StatusCreateClusterFailed = "CREATE-FAILURE"
//StatusImportClusterFailed status import failed
StatusImportClusterFailed = "IMPORT-FAILURE"
//StatusRunning status running
StatusRunning = "RUNNING"
//StatusDeleting status deleting for scaling down
StatusDeleting = "DELETING"
//StatusDeleted status deleted
StatusDeleted = "DELETED"
//StatusDeleteClusterFailed status delete failed
StatusDeleteClusterFailed = "DELETE-FAILURE"
//StatusAddNodesFailed status add nodes failed
StatusAddNodesFailed = "ADD-FAILURE"
//StatusRemoveNodesFailed status remove nodes failed
StatusRemoveNodesFailed = "REMOVE-FAILURE"
// StatusNodeRemovable node is removable
StatusNodeRemovable = "REMOVABLE"
// StatusNodeUnknown node status is unknown
StatusNodeUnknown = "UNKNOWN"
// StatusNodeNotReady node not ready
StatusNodeNotReady = "NOTREADY"
// StatusDeleteNodeGroupFailed xxx
StatusDeleteNodeGroupFailed = "DELETE-FAILURE"
// StatusCreateNodeGroupCreating xxx
StatusCreateNodeGroupCreating = "CREATING"
// StatusDeleteNodeGroupDeleting xxx
StatusDeleteNodeGroupDeleting = "DELETING"
// StatusUpdateNodeGroupUpdating xxx
StatusUpdateNodeGroupUpdating = "UPDATING"
// StatusCreateNodeGroupFailed xxx
StatusCreateNodeGroupFailed = "CREATE-FAILURE"
// StatusAddCANodesFailed status add CA nodes failed
StatusAddCANodesFailed = "ADD-CA-FAILURE"
// StatusRemoveCANodesFailed delete CA nodes failure
StatusRemoveCANodesFailed = "REMOVE-CA-FAILURE"
// StatusResourceApplying 申请资源状态
StatusResourceApplying = "APPLYING"
// StatusResourceApplyFailed 申请资源失败状态
StatusResourceApplyFailed = "APPLY-FAILURE"
// StatusNodeGroupUpdating xxx
StatusNodeGroupUpdating = "UPDATING"
// StatusNodeGroupUpdateFailed xxx
StatusNodeGroupUpdateFailed = "UPDATE-FAILURE"
// StatusAutoScalingOptionNormal normal status
StatusAutoScalingOptionNormal = "NORMAL"
// StatusAutoScalingOptionUpdating updating status
StatusAutoScalingOptionUpdating = "UPDATING"
// StatusAutoScalingOptionUpdateFailed update failed status
StatusAutoScalingOptionUpdateFailed = "UPDATE-FAILURE"
// StatusAutoScalingOptionStopped stopped status
StatusAutoScalingOptionStopped = "STOPPED"
)
const (
// BcsErrClusterManagerSuccess success code
BcsErrClusterManagerSuccess = 0
// BcsErrClusterManagerSuccessStr success string
BcsErrClusterManagerSuccessStr = "success"
// BcsErrClusterManagerInvalidParameter invalid request parameter
BcsErrClusterManagerInvalidParameter = bcscommon.BCSErrClusterManager + 1
// BcsErrClusterManagerStoreOperationFailed invalid request parameter
BcsErrClusterManagerStoreOperationFailed = bcscommon.BCSErrClusterManager + 2
// BcsErrClusterManagerUnknown unknown error
BcsErrClusterManagerUnknown = bcscommon.BCSErrClusterManager + 3
// BcsErrClusterManagerUnknownStr unknown error msg
BcsErrClusterManagerUnknownStr = "unknown error"
// BcsErrClusterManagerDatabaseRecordNotFound database record not found
BcsErrClusterManagerDatabaseRecordNotFound = bcscommon.BCSErrClusterManager + 4
// BcsErrClusterManagerDatabaseRecordDuplicateKey database index key is duplicate
BcsErrClusterManagerDatabaseRecordDuplicateKey = bcscommon.BCSErrClusterManager + 5
// 6~19 is reserved error for database
// BcsErrClusterManagerDBOperation db operation error
BcsErrClusterManagerDBOperation = bcscommon.BCSErrClusterManager + 20
// BcsErrClusterManagerAllocateClusterInCreateQuota allocate cluster error
BcsErrClusterManagerAllocateClusterInCreateQuota = bcscommon.BCSErrClusterManager + 21
// BcsErrClusterManagerK8SOpsFailed k8s operation failed
BcsErrClusterManagerK8SOpsFailed = bcscommon.BCSErrClusterManager + 22
// BcsErrClusterManagerResourceDuplicated resource duplicated
BcsErrClusterManagerResourceDuplicated = bcscommon.BCSErrClusterManager + 23
// BcsErrClusterManagerCommonErr common error
BcsErrClusterManagerCommonErr = bcscommon.BCSErrClusterManager + 24
// BcsErrClusterManagerTaskErr Task error
BcsErrClusterManagerTaskErr = bcscommon.BCSErrClusterManager + 25
// BcsErrClusterManagerCloudProviderErr cloudprovider error
BcsErrClusterManagerCloudProviderErr = bcscommon.BCSErrClusterManager + 26
// BcsErrClusterManagerDataEmptyErr request data empty error
BcsErrClusterManagerDataEmptyErr = bcscommon.BCSErrClusterManager + 27
// BcsErrClusterManagerClusterIDBuildErr build clusterID error
BcsErrClusterManagerClusterIDBuildErr = bcscommon.BCSErrClusterManager + 28
// BcsErrClusterManagerNodeManagerErr build clusterID error
BcsErrClusterManagerNodeManagerErr = bcscommon.BCSErrClusterManager + 29
// BcsErrClusterManagerTaskDoneErr build task doing or done error
BcsErrClusterManagerTaskDoneErr = bcscommon.BCSErrClusterManager + 30
// BcsErrClusterManagerSyncCloudErr cloud config error
BcsErrClusterManagerSyncCloudErr = bcscommon.BCSErrClusterManager + 31
// BcsErrClusterManagerCheckKubeErr cloud config error
BcsErrClusterManagerCheckKubeErr = bcscommon.BCSErrClusterManager + 32
// BcsErrClusterManagerCheckCloudClusterResourceErr cloud/cluster resource error
BcsErrClusterManagerCheckCloudClusterResourceErr = bcscommon.BCSErrClusterManager + 33
// BcsErrClusterManagerBkSopsInterfaceErr cloud/cluster resource error
BcsErrClusterManagerBkSopsInterfaceErr = bcscommon.BCSErrClusterManager + 34
// BcsErrClusterManagerDecodeBase64ScriptErr base64 error
BcsErrClusterManagerDecodeBase64ScriptErr = bcscommon.BCSErrClusterManager + 35
// BcsErrClusterManagerDecodeActionErr decode action error
BcsErrClusterManagerDecodeActionErr = bcscommon.BCSErrClusterManager + 36
// BcsErrClusterManagerExternalNodeScriptErr get external script action error
BcsErrClusterManagerExternalNodeScriptErr = bcscommon.BCSErrClusterManager + 37
// BcsErrClusterManagerCheckPermErr cloud config error
BcsErrClusterManagerCheckPermErr = bcscommon.BCSErrClusterManager + 38
// BcsErrClusterManagerGetPermErr cloud config error
BcsErrClusterManagerGetPermErr = bcscommon.BCSErrClusterManager + 39
// BcsErrClusterManagerCACleanNodesEmptyErr nodegroup clean nodes empty error
BcsErrClusterManagerCACleanNodesEmptyErr = bcscommon.BCSErrClusterManager + 40
// BcsErrClusterManagerCheckKubeConnErr cloud config error
BcsErrClusterManagerCheckKubeConnErr = bcscommon.BCSErrClusterManager + 41
)
// ClusterIDRange for generate clusterID range
var ClusterIDRange = map[string][]int{
"mesos-stag": []int{10000, 15000},
"mesos-debug": []int{20000, 25000},
"mesos-prod": []int{30000, 399999},
"k8s-stag": []int{15001, 19999},
"k8s-debug": []int{25001, 29999},
"k8s-prod": []int{40000, 1000000},
}
// Develop run environment
var Develop = "dev"
// StagClusterENV stag env
var StagClusterENV = "stag"
// ImageProvider
const (
// ImageProvider 镜像提供方
ImageProvider = "IMAGE_PROVIDER"
// PublicImageProvider 公共镜像
PublicImageProvider = "PUBLIC_IMAGE"
// PrivateImageProvider 私有镜像
PrivateImageProvider = "PRIVATE_IMAGE"
// MarketImageProvider 市场镜像
MarketImageProvider = "MARKET_IMAGE"
)
// Instance sell status
const (
// InstanceSell SELL status
InstanceSell = "SELL"
// InstanceSoldOut SOLD_OUT status
InstanceSoldOut = "SOLD_OUT"
)
const (
// True xxx
True = "true"
// False xxx
False = "false"
)
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package x509 parses X.509-encoded keys and certificates.
package bitx509
import (
"crypto"
"encoding/asn1"
"errors"
"math/big"
"time"
"github.com/njones/bitcoin-crypto/bitecdsa"
"github.com/njones/bitcoin-crypto/bitelliptic"
"github.com/njones/bitcoin-crypto/bitx509/pkix"
)
// pkixPublicKey reflects a PKIX public key structure. See SubjectPublicKeyInfo
// in RFC 3280.
type pkixPublicKey struct {
Algo pkix.AlgorithmIdentifier
BitString asn1.BitString
}
// ParsePKIXPublicKey parses a DER encoded public key. These values are
// typically found in PEM blocks with "BEGIN PUBLIC KEY".
func ParsePKIXPublicKey(derBytes []byte) (pub interface{}, err error) {
var pki publicKeyInfo
if rest, err := asn1.Unmarshal(derBytes, &pki); err != nil {
return nil, err
} else if len(rest) != 0 {
return nil, errors.New("x509: trailing data after ASN.1 of public-key")
}
algo := getPublicKeyAlgorithmFromOID(pki.Algorithm.Algorithm)
if algo == UnknownPublicKeyAlgorithm {
return nil, errors.New("x509: unknown public key algorithm")
}
return parsePublicKey(algo, &pki)
}
func marshalPublicKey(pub interface{}) (publicKeyBytes []byte, publicKeyAlgorithm pkix.AlgorithmIdentifier, err error) {
switch pub := pub.(type) {
case *bitecdsa.PublicKey:
publicKeyBytes = pub.Marshal(pub.X, pub.Y)
// TODO(runeaune): Get the actual curve.
oid, ok := oidFromNamedCurve(bitelliptic.S256())
if !ok {
return nil, pkix.AlgorithmIdentifier{}, errors.New("x509: unsupported elliptic curve")
}
publicKeyAlgorithm.Algorithm = oidPublicKeyECDSA
var paramBytes []byte
paramBytes, err = asn1.Marshal(oid)
if err != nil {
return
}
publicKeyAlgorithm.Parameters.FullBytes = paramBytes
default:
return nil, pkix.AlgorithmIdentifier{}, errors.New("x509: only ECDSA public keys supported")
}
return publicKeyBytes, publicKeyAlgorithm, nil
}
// MarshalPKIXPublicKey serialises a public key to DER-encoded PKIX format.
func MarshalPKIXPublicKey(pub interface{}) ([]byte, error) {
var publicKeyBytes []byte
var publicKeyAlgorithm pkix.AlgorithmIdentifier
var err error
if publicKeyBytes, publicKeyAlgorithm, err = marshalPublicKey(pub); err != nil {
return nil, err
}
pkix := pkixPublicKey{
Algo: publicKeyAlgorithm,
BitString: asn1.BitString{
Bytes: publicKeyBytes,
BitLength: 8 * len(publicKeyBytes),
},
}
ret, _ := asn1.Marshal(pkix)
return ret, nil
}
// These structures reflect the ASN.1 structure of X.509 certificates.:
type certificate struct {
Raw asn1.RawContent
TBSCertificate tbsCertificate
SignatureAlgorithm pkix.AlgorithmIdentifier
SignatureValue asn1.BitString
}
type tbsCertificate struct {
Raw asn1.RawContent
Version int `asn1:"optional,explicit,default:1,tag:0"`
SerialNumber *big.Int
SignatureAlgorithm pkix.AlgorithmIdentifier
Issuer asn1.RawValue
Validity validity
Subject asn1.RawValue
PublicKey publicKeyInfo
UniqueId asn1.BitString `asn1:"optional,tag:1"`
SubjectUniqueId asn1.BitString `asn1:"optional,tag:2"`
Extensions []pkix.Extension `asn1:"optional,explicit,tag:3"`
}
type dsaAlgorithmParameters struct {
P, Q, G *big.Int
}
type dsaSignature struct {
R, S *big.Int
}
type ecdsaSignature dsaSignature
type validity struct {
NotBefore, NotAfter time.Time
}
type publicKeyInfo struct {
Raw asn1.RawContent
Algorithm pkix.AlgorithmIdentifier
PublicKey asn1.BitString
}
// RFC 5280, 4.2.1.1
type authKeyId struct {
Id []byte `asn1:"optional,tag:0"`
}
type SignatureAlgorithm int
const (
UnknownSignatureAlgorithm SignatureAlgorithm = iota
MD2WithRSA
MD5WithRSA
SHA1WithRSA
SHA256WithRSA
SHA384WithRSA
SHA512WithRSA
DSAWithSHA1
DSAWithSHA256
ECDSAWithSHA1
ECDSAWithSHA256
ECDSAWithSHA384
ECDSAWithSHA512
)
type PublicKeyAlgorithm int
const (
UnknownPublicKeyAlgorithm PublicKeyAlgorithm = iota
RSA
DSA
ECDSA
)
// OIDs for signature algorithms
//
// pkcs-1 OBJECT IDENTIFIER ::= {
// iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 1 }
//
//
// RFC 3279 2.2.1 RSA Signature Algorithms
//
// md2WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 2 }
//
// md5WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 4 }
//
// sha-1WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 5 }
//
// dsaWithSha1 OBJECT IDENTIFIER ::= {
// iso(1) member-body(2) us(840) x9-57(10040) x9cm(4) 3 }
//
// RFC 3279 2.2.3 ECDSA Signature Algorithm
//
// ecdsa-with-SHA1 OBJECT IDENTIFIER ::= {
// iso(1) member-body(2) us(840) ansi-x962(10045)
// signatures(4) ecdsa-with-SHA1(1)}
//
//
// RFC 4055 5 PKCS #1 Version 1.5
//
// sha256WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 11 }
//
// sha384WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 12 }
//
// sha512WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 13 }
//
//
// RFC 5758 3.1 DSA Signature Algorithms
//
// dsaWithSha256 OBJECT IDENTIFIER ::= {
// joint-iso-ccitt(2) country(16) us(840) organization(1) gov(101)
// csor(3) algorithms(4) id-dsa-with-sha2(3) 2}
//
// RFC 5758 3.2 ECDSA Signature Algorithm
//
// ecdsa-with-SHA256 OBJECT IDENTIFIER ::= { iso(1) member-body(2)
// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 2 }
//
// ecdsa-with-SHA384 OBJECT IDENTIFIER ::= { iso(1) member-body(2)
// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 3 }
//
// ecdsa-with-SHA512 OBJECT IDENTIFIER ::= { iso(1) member-body(2)
// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 4 }
var (
oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2}
oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4}
oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5}
oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11}
oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12}
oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13}
oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3}
oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 4, 3, 2}
oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1}
oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2}
oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3}
oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4}
)
var signatureAlgorithmDetails = []struct {
algo SignatureAlgorithm
oid asn1.ObjectIdentifier
pubKeyAlgo PublicKeyAlgorithm
hash crypto.Hash
}{
{MD2WithRSA, oidSignatureMD2WithRSA, RSA, crypto.Hash(0) /* no value for MD2 */},
{MD5WithRSA, oidSignatureMD5WithRSA, RSA, crypto.MD5},
{SHA1WithRSA, oidSignatureSHA1WithRSA, RSA, crypto.SHA1},
{SHA256WithRSA, oidSignatureSHA256WithRSA, RSA, crypto.SHA256},
{SHA384WithRSA, oidSignatureSHA384WithRSA, RSA, crypto.SHA384},
{SHA512WithRSA, oidSignatureSHA512WithRSA, RSA, crypto.SHA512},
{DSAWithSHA1, oidSignatureDSAWithSHA1, DSA, crypto.SHA1},
{DSAWithSHA256, oidSignatureDSAWithSHA256, DSA, crypto.SHA256},
{ECDSAWithSHA1, oidSignatureECDSAWithSHA1, ECDSA, crypto.SHA1},
{ECDSAWithSHA256, oidSignatureECDSAWithSHA256, ECDSA, crypto.SHA256},
{ECDSAWithSHA384, oidSignatureECDSAWithSHA384, ECDSA, crypto.SHA384},
{ECDSAWithSHA512, oidSignatureECDSAWithSHA512, ECDSA, crypto.SHA512},
}
func getSignatureAlgorithmFromOID(oid asn1.ObjectIdentifier) SignatureAlgorithm {
for _, details := range signatureAlgorithmDetails {
if oid.Equal(details.oid) {
return details.algo
}
}
return UnknownSignatureAlgorithm
}
// RFC 3279, 2.3 Public Key Algorithms
//
// pkcs-1 OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840)
// rsadsi(113549) pkcs(1) 1 }
//
// rsaEncryption OBJECT IDENTIFIER ::== { pkcs1-1 1 }
//
// id-dsa OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840)
// x9-57(10040) x9cm(4) 1 }
//
// RFC 5480, 2.1.1 Unrestricted Algorithm Identifier and Parameters
//
// id-ecPublicKey OBJECT IDENTIFIER ::= {
// iso(1) member-body(2) us(840) ansi-X9-62(10045) keyType(2) 1 }
var (
oidPublicKeyRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1}
oidPublicKeyDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1}
oidPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1}
)
func getPublicKeyAlgorithmFromOID(oid asn1.ObjectIdentifier) PublicKeyAlgorithm {
switch {
case oid.Equal(oidPublicKeyRSA):
return RSA
case oid.Equal(oidPublicKeyDSA):
return DSA
case oid.Equal(oidPublicKeyECDSA):
return ECDSA
}
return UnknownPublicKeyAlgorithm
}
// RFC 5480, 2.1.1.1. Named Curve
//
// secp256k1 OBJECT IDENTIFIER ::= {
// iso(1) identified-organization(3) certicom(132) curve(0) 10 }
//
var (
oidNamedCurveS256 = asn1.ObjectIdentifier{1, 3, 132, 0, 10}
)
func namedCurveFromOID(oid asn1.ObjectIdentifier) *bitelliptic.BitCurve {
switch {
case oid.Equal(oidNamedCurveS256):
return bitelliptic.S256()
}
return nil
}
func oidFromNamedCurve(curve *bitelliptic.BitCurve) (asn1.ObjectIdentifier, bool) {
switch curve {
case bitelliptic.S256():
return oidNamedCurveS256, true
}
return nil, false
}
// RFC 5280, 4.2.1.14
type distributionPoint struct {
DistributionPoint distributionPointName `asn1:"optional,tag:0"`
Reason asn1.BitString `asn1:"optional,tag:1"`
CRLIssuer asn1.RawValue `asn1:"optional,tag:2"`
}
type distributionPointName struct {
FullName asn1.RawValue `asn1:"optional,tag:0"`
RelativeName pkix.RDNSequence `asn1:"optional,tag:1"`
}
func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo) (interface{}, error) {
asn1Data := keyData.PublicKey.RightAlign()
switch algo {
case ECDSA:
paramsData := keyData.Algorithm.Parameters.FullBytes
namedCurveOID := new(asn1.ObjectIdentifier)
rest, err := asn1.Unmarshal(paramsData, namedCurveOID)
if err != nil {
return nil, err
}
if len(rest) != 0 {
return nil, errors.New("x509: trailing data after ECDSA parameters")
}
namedCurve := namedCurveFromOID(*namedCurveOID)
if namedCurve == nil {
return nil, errors.New("x509: unsupported elliptic curve")
}
x, y := namedCurve.Unmarshal(asn1Data)
if x == nil {
return nil, errors.New("x509: failed to unmarshal elliptic curve point")
}
pub := &bitecdsa.PublicKey{
namedCurve, x, y,
}
return pub, nil
default:
return nil, nil
}
}
func reverseBitsInAByte(in byte) byte {
b1 := in>>4 | in<<4
b2 := b1>>2&0x33 | b1<<2&0xcc
b3 := b2>>1&0x55 | b2<<1&0xaa
return b3
}
// asn1BitLength returns the bit-length of bitString by considering the
// most-significant bit in a byte to be the "first" bit. This convention
// matches ASN.1, but differs from almost everything else.
func asn1BitLength(bitString []byte) int {
bitLen := len(bitString) * 8
for i := range bitString {
b := bitString[len(bitString)-i-1]
for bit := uint(0); bit < 8; bit++ {
if (b>>bit)&1 == 1 {
return bitLen
}
bitLen--
}
}
return 0
}
var (
oidExtensionSubjectKeyId = []int{2, 5, 29, 14}
oidExtensionKeyUsage = []int{2, 5, 29, 15}
oidExtensionExtendedKeyUsage = []int{2, 5, 29, 37}
oidExtensionAuthorityKeyId = []int{2, 5, 29, 35}
oidExtensionBasicConstraints = []int{2, 5, 29, 19}
oidExtensionSubjectAltName = []int{2, 5, 29, 17}
oidExtensionCertificatePolicies = []int{2, 5, 29, 32}
oidExtensionNameConstraints = []int{2, 5, 29, 30}
oidExtensionCRLDistributionPoints = []int{2, 5, 29, 31}
oidExtensionAuthorityInfoAccess = []int{1, 3, 6, 1, 5, 5, 7, 1, 1}
)
var (
oidAuthorityInfoAccessOcsp = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1}
oidAuthorityInfoAccessIssuers = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 2}
)
// oidNotInExtensions returns whether an extension with the given oid exists in
// extensions.
func oidInExtensions(oid asn1.ObjectIdentifier, extensions []pkix.Extension) bool {
for _, e := range extensions {
if e.Id.Equal(oid) {
return true
}
}
return false
}
// signingParamsForPublicKey returns the parameters to use for signing with
// priv. If requestedSigAlgo is not zero then it overrides the default
// signature algorithm.
func signingParamsForPublicKey(pub interface{}, requestedSigAlgo SignatureAlgorithm) (hashFunc crypto.Hash, sigAlgo pkix.AlgorithmIdentifier, err error) {
var pubType PublicKeyAlgorithm
switch pub.(type) {
case *bitecdsa.PublicKey:
pubType = ECDSA
// TODO(rune): Allow other curves.
hashFunc = crypto.SHA256
sigAlgo.Algorithm = oidSignatureECDSAWithSHA256
default:
err = errors.New("x509: only RSA and ECDSA keys supported")
}
if err != nil {
return
}
if requestedSigAlgo == 0 {
return
}
found := false
for _, details := range signatureAlgorithmDetails {
if details.algo == requestedSigAlgo {
if details.pubKeyAlgo != pubType {
err = errors.New("x509: requested SignatureAlgorithm does not match private key type")
return
}
sigAlgo.Algorithm, hashFunc = details.oid, details.hash
if hashFunc == 0 {
err = errors.New("x509: cannot sign with hash function requested")
return
}
found = true
break
}
}
if !found {
err = errors.New("x509: unknown SignatureAlgorithm")
}
return
}
// newRawAttributes converts AttributeTypeAndValueSETs from a template
// CertificateRequest's Attributes into tbsCertificateRequest RawAttributes.
func newRawAttributes(attributes []pkix.AttributeTypeAndValueSET) ([]asn1.RawValue, error) {
var rawAttributes []asn1.RawValue
b, err := asn1.Marshal(attributes)
rest, err := asn1.Unmarshal(b, &rawAttributes)
if err != nil {
return nil, err
}
if len(rest) != 0 {
return nil, errors.New("x509: failed to unmarshall raw CSR Attributes")
}
return rawAttributes, nil
}
// parseRawAttributes Unmarshals RawAttributes intos AttributeTypeAndValueSETs.
func parseRawAttributes(rawAttributes []asn1.RawValue) []pkix.AttributeTypeAndValueSET {
var attributes []pkix.AttributeTypeAndValueSET
for _, rawAttr := range rawAttributes {
var attr pkix.AttributeTypeAndValueSET
rest, err := asn1.Unmarshal(rawAttr.FullBytes, &attr)
// Ignore attributes that don't parse into pkix.AttributeTypeAndValueSET
// (i.e.: challengePassword or unstructuredName).
if err == nil && len(rest) == 0 {
attributes = append(attributes, attr)
}
}
return attributes
}
|
package controller
import (
"dmicro/common/log"
"dmicro/pkg/context"
passport "dmicro/srv/passport/api"
"dmicro/web/dd/internal/client"
)
// 这里定义了 Passport Controller 模块,包含了各个处理函数。
// PassportController ...
type PassportController struct {
}
// context.DmContext 内部 gin.Context 对象,因为 dmicro 用了 gin http 框架。
// gin.Context 是 http 请求参数,我们使用 context.DmContext 对他进行了二次处理,
// Json 请求参数解析等。
func (this *PassportController) Login(mctx *context.DmContext) {
log.Debug("Login...")
request := &passport.LoginRequest{}
// 请求参数 Json 转换成 passport.LoginRequest 对象
if err := mctx.ParseJSON(request); err != nil {
log.Error(err)
mctx.ResponseError(err)
return
}
// TODO: request相关参数校验
// 这里可以引入专门的请求参数校验来做,比如数据类型,字段等。
// RPC PassportClient.Login() 方法,做登录。
// DmContext 转换成 golang.context,
// tracer 追踪 context 调用, context 包是用来解决,资源多次调用释放的问题。
// 注意这里 RPC 参数有 2 个: context.Context, LoginRequest
response, err := client.PassportClient.Login(toContext(mctx), request)
if err != nil {
log.Error(err)
mctx.ResponseError(err)
return
}
// response 对象
mctx.Response(response)
return
}
// Sma 短信服务,流程与 Login 登录服务基本一样。
func (this *PassportController) Sms(mctx *context.DmContext) {
log.Debug("Sms...")
request := &passport.Request{}
if err := mctx.ParseJSON(request); err != nil {
log.Error(err)
mctx.ResponseError(err)
return
}
// TODO: request相关参数校验
response, err := client.PassportClient.Sms(toContext(mctx), request)
if err != nil {
log.Error(err)
mctx.ResponseError(err)
return
}
mctx.Response(response)
return
}
// 短信登录
func (this *PassportController) SmsLogin(mctx *context.DmContext) {
log.Debug("SmsLogin...")
request := &passport.SmsLoginRequest{}
if err := mctx.ParseJSON(request); err != nil {
log.Error(err)
mctx.ResponseError(err)
return
}
// TODO: request相关参数校验
response, err := client.PassportClient.SmsLogin(toContext(mctx), request)
if err != nil {
log.Error(err)
mctx.ResponseError(err)
return
}
mctx.Response(response)
return
}
// Oauth 登录
func (this *PassportController) OauthLogin(mctx *context.DmContext) {
log.Debug("OauthLogin...")
request := &passport.OAuthLoginRequest{}
if err := mctx.ParseJSON(request); err != nil {
log.Error(err)
mctx.ResponseError(err)
return
}
// TODO: request相关参数校验
response, err := client.PassportClient.OAuthLogin(toContext(mctx), request)
if err != nil {
log.Error(err)
mctx.ResponseError(err)
return
}
mctx.Response(response)
return
}
// Password
func (this *PassportController) SetPwd(mctx *context.DmContext) {
log.Debug("SetPwd...")
request := &passport.SetPwdRequest{}
if err := mctx.ParseJSON(request); err != nil {
log.Error(err)
mctx.ResponseError(err)
return
}
// TODO: request相关参数校验
response, err := client.PassportClient.SetPwd(toContext(mctx), request)
if err != nil {
log.Error(err)
mctx.ResponseError(err)
return
}
mctx.Response(response)
return
}
|
package eyas_forage
import (
"time"
"context"
"github.com/ricky1122alonefe/hawkEye-go/module"
"go.etcd.io/etcd/clientv3"
"go.etcd.io/etcd/mvcc/mvccpb"
)
// 任务管理器
type ForageManager struct {
client *clientv3.Client
kv clientv3.KV
lease clientv3.Lease
watcher clientv3.Watcher
}
var (
// 单例
forageMgr *ForageManager
)
// 监听cron任务变化
func (forageMgr *ForageManager) watchForageJobs() (err error) {
var (
etcdScheduleGetResponse *clientv3.GetResponse
etcdScheduleKvPair *mvccpb.KeyValue
watchChanSchedule clientv3.WatchChan
etcdNormalGetResponse *clientv3.GetResponse
etcdNormalKvPair *mvccpb.KeyValue
watchChanNormal clientv3.WatchChan
job *module.ScheduleJob
watchStartRevision int64
watchRespSchedule clientv3.WatchResponse
watchRespNormal clientv3.WatchResponse
watchEvent *clientv3.Event
jobName string
jobEvent *module.JobEvent
)
//获取目前etcd中存储的所有的定时任务
if etcdScheduleGetResponse, err = forageMgr.kv.Get(context.TODO(), module.SCHE_JOB_SAVE_DIR, clientv3.WithPrefix()); err != nil {
return
}
//获取目前etcd中存储所有的普通任务
if etcdNormalGetResponse,err = forageMgr.kv.Get(context.TODO(), module.SIMP_JOB_SAVE_DIR, clientv3.WithPrefix()); err != nil {
return
}
// 当前有哪些定时任务
for _, etcdScheduleKvPair = range etcdScheduleGetResponse.Kvs {
//得到目前每一个定时任务
if job, err = module.UnpackJob(etcdScheduleKvPair.Value); err == nil {
jobEvent = module.BuildJobEvent(module.JOB_EVENT_SAVE, job)
// 同步给scheduler(调度协程)
forage_scheduler.PushJobEvent(jobEvent)
}
}
for _,etcdNormalKvPair =range etcdNormalGetResponse.Kvs{
//得到目前每一个普通任务
if job, err = module.UnpackJob(etcdNormalKvPair.Value); err == nil {
jobEvent = module.BuildJobEvent(module.JOB_EVENT_SAVE, job)
// 同步给scheduler(调度协程)
forage_scheduler.PushJobEvent(jobEvent)
}
}
// 2, 从该revision向后监听变化事件
go func() {
watchStartRevision = etcdScheduleGetResponse.Header.Revision + 1
// 监听/cron/jobs/目录的后续变化
watchChanSchedule = forageMgr.watcher.Watch(context.TODO(), module.SCHE_JOB_SAVE_DIR, clientv3.WithRev(watchStartRevision), clientv3.WithPrefix())
watchChanNormal = forageMgr.watcher.Watch(context.TODO(), module.SIMP_JOB_SAVE_DIR, clientv3.WithRev(watchStartRevision), clientv3.WithPrefix())
// 处理监听事件
for watchRespSchedule = range watchChanSchedule {
for _, watchEvent = range watchRespSchedule.Events {
switch watchEvent.Type {
case mvccpb.PUT: // 写入etcd动作
if job, err = module.UnpackJob(watchEvent.Kv.Value); err != nil {
continue
}
//构建一个创建事件
jobEvent = module.BuildJobEvent(module.JOB_EVENT_SAVE, job)
case mvccpb.DELETE: // 删除etcd操作
jobName = module.ExtractJobName(string(watchEvent.Kv.Key))
job = &module.ScheduleJob{Name: jobName}
//构建一个删除事件
jobEvent = module.BuildJobEvent(module.JOB_EVENT_DELETE, job)
}
// 变化推给scheduler
forage_scheduler.PushJobEvent(jobEvent)
}
}
for watchRespNormal = range watchChanSchedule {
for _, watchEvent = range watchRespNormal.Events {
switch watchEvent.Type {
case mvccpb.PUT: // 写入etcd动作
if job, err = module.UnpackJob(watchEvent.Kv.Value); err != nil {
continue
}
//构建一个创建事件
jobEvent = module.BuildJobEvent(module.JOB_EVENT_SAVE, job)
case mvccpb.DELETE: // 删除etcd操作
jobName = module.ExtractJobName(string(watchEvent.Kv.Key))
job = &module.ScheduleJob{Name: jobName}
//构建一个删除事件
jobEvent = module.BuildJobEvent(module.JOB_EVENT_DELETE, job)
}
// 变化推给scheduler
forage_scheduler.PushJobEvent(jobEvent)
}
}
}()
return
}
// 监听取消通知
func (forageMgr *ForageManager) scheduleWatchKiller() {
var (
watchChan clientv3.WatchChan
watchResp clientv3.WatchResponse
watchEvent *clientv3.Event
jobEvent *module.JobEvent
jobName string
job *module.ScheduleJob
)
//
go func() { // 监听协程
// 监听/cron/killer/目录的变化
watchChan = forageMgr.watcher.Watch(context.TODO(), module.JOB_KILLER_DIR, clientv3.WithPrefix())
// 处理监听事件
for watchResp = range watchChan {
for _, watchEvent = range watchResp.Events {
switch watchEvent.Type {
case mvccpb.PUT: // 杀死任务事件
jobName = module.ExtractKillerName(string(watchEvent.Kv.Key))
job = &module.ScheduleJob{Name: jobName}
jobEvent = module.BuildJobEvent(module.JOB_EVENT_KILL, job)
// 事件推给scheduler
forage_scheduler.PushJobEvent(jobEvent)
case mvccpb.DELETE: // killer标记过期, 被自动删除
}
}
}
}()
}
// 初始化管理器
func InitJobMgr() (err error) {
var (
config clientv3.Config
client *clientv3.Client
kv clientv3.KV
lease clientv3.Lease
watcher clientv3.Watcher
)
// 初始化配置
config = clientv3.Config{
Endpoints: G_config.EtcdEndpoints, // 集群地址
DialTimeout: time.Duration(G_config.EtcdDialTimeout) * time.Millisecond, // 连接超时
}
// 建立连接
if client, err = clientv3.New(config); err != nil {
return
}
// 得到KV和Lease的API子集
kv = clientv3.NewKV(client)
lease = clientv3.NewLease(client)
watcher = clientv3.NewWatcher(client)
// 赋值单例
forageMgr = &ForageManager{
client: client,
kv: kv,
lease: lease,
watcher: watcher,
}
// 启动任务监听
forageMgr.watchForageJobs()
// 启动监听killer
forageMgr.scheduleWatchKiller()
return
}
// 创建任务执行锁
func (forageMgr *ForageManager) CreateJobLock(jobName string) (jobLock *JobLock){
jobLock = InitJobLock(jobName, forageMgr.kv, forageMgr.lease)
return
}
|
package antminer
import (
"time"
"github.com/ka2n/masminer/sshutil"
"golang.org/x/crypto/ssh"
)
var (
sshDialer sshutil.TimeoutDialer
)
// NewSSHClient returns *ssh.Client with default setting
func NewSSHClient(host string) (*ssh.Client, error) {
return NewSSHClientTimeout(host, 0)
}
// NewSSHClientTimeout returns *ssh.Client with default setting with connection timeout
func NewSSHClientTimeout(host string, timeout time.Duration) (*ssh.Client, error) {
var c Client
addr, cfg := c.SSHConfig(host)
cfg.Timeout = timeout
return sshDialer.DialTimeout("tcp", addr, cfg, timeout)
}
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package azurestack
import (
"context"
"fmt"
"github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-05-01/resources"
"github.com/Azure/go-autorest/autorest"
log "github.com/sirupsen/logrus"
)
// DeployTemplate implements the TemplateDeployer interface for the AzureClient client
func (az *AzureClient) DeployTemplate(ctx context.Context, resourceGroupName, deploymentName string, template map[string]interface{}, parameters map[string]interface{}) (de resources.DeploymentExtended, err error) {
deployment := resources.Deployment{
Properties: &resources.DeploymentProperties{
Template: &template,
Parameters: ¶meters,
Mode: resources.Incremental,
},
}
log.Infof("Starting ARM Deployment (%s). This will take some time...", deploymentName)
future, err := az.deploymentsClient.CreateOrUpdate(ctx, resourceGroupName, deploymentName, deployment)
if err != nil {
return de, err
}
outcomeText := "Succeeded"
err = future.WaitForCompletionRef(ctx, az.deploymentsClient.Client)
if err != nil {
outcomeText = fmt.Sprintf("Error: %v", err)
log.Infof("Finished ARM Deployment (%s). %s", deploymentName, outcomeText)
return de, err
}
de, err = future.Result(az.deploymentsClient)
if err != nil {
outcomeText = fmt.Sprintf("Error: %v", err)
}
log.Infof("Finished ARM Deployment (%s). %s", deploymentName, outcomeText)
return de, err
}
// ValidateTemplate validate the template and parameters
func (az *AzureClient) ValidateTemplate(
ctx context.Context,
resourceGroupName string,
deploymentName string,
template map[string]interface{},
parameters map[string]interface{}) (result resources.DeploymentValidateResult, err error) {
deployment := resources.Deployment{
Properties: &resources.DeploymentProperties{
Template: &template,
Parameters: ¶meters,
Mode: resources.Incremental,
},
}
return az.deploymentsClient.Validate(ctx, resourceGroupName, deploymentName, deployment)
}
// GetDeployment returns the template deployment
func (az *AzureClient) GetDeployment(ctx context.Context, resourceGroupName, deploymentName string) (result resources.DeploymentExtended, err error) {
return az.deploymentsClient.Get(ctx, resourceGroupName, deploymentName)
}
// CheckDeploymentExistence returns if the deployment already exists
func (az *AzureClient) CheckDeploymentExistence(ctx context.Context, resourceGroupName string, deploymentName string) (result autorest.Response, err error) {
return az.deploymentsClient.CheckExistence(ctx, resourceGroupName, deploymentName)
}
|
package zapdefaults
import (
"fmt"
"os"
"strings"
"github.com/mattn/go-isatty"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// Preset is an option that specifies a default configuration.
type Preset int32
const (
Invalid Preset = iota
// Development uses a console encoder, colored output, and more friendly string-encoded durations.
Development
// Development uses a json encoder with durations in seconds.
Production
// Dynamic uses a Development preset if attached to a TTY and a Production preset if not.
Dynamic
)
// String returns a string describing the level.
func (m Preset) String() string {
switch m {
case Development:
return "development"
case Production:
return "production"
case Dynamic:
return "dynamic"
default:
return "invalid"
}
}
// UnmarshalText initializes a preset from text.
//
// Preset conforms to the encoding.TextUnmarshaler interface,
// which also lets it support unmarshaling values from json
// structures.
func (m *Preset) UnmarshalText(text []byte) error {
lowerModeString := strings.ToLower(string(text))
switch lowerModeString {
case Development.String():
*m = Development
case Production.String():
*m = Production
case Dynamic.String():
*m = Dynamic
default:
*m = Invalid
return fmt.Errorf("invalid logger mode: %s", text)
}
return nil
}
// MarshalText supports encoding the string as text.
//
// Preset conforms to the encoding.TextMarshaler interface.
func (m Preset) MarshalText() ([]byte, error) {
s := m.String()
b := []byte(s)
if s == "invalid" {
return b, fmt.Errorf("cannot marshal invalid preset")
}
return b, nil
}
func (m Preset) apply(config *zap.Config) error {
// apply the preset overrides
switch m {
case Development:
// override the current settings to start with a clean slate
*config = *DefaultConfiguration()
config.Development = true
config.Encoding = "console"
config.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
config.EncoderConfig.EncodeDuration = zapcore.StringDurationEncoder
case Production:
// override the current settings to start with a clean slate
*config = *DefaultConfiguration()
config.Development = false
config.Encoding = "json"
config.EncoderConfig.EncodeLevel = zapcore.LowercaseLevelEncoder
config.EncoderConfig.EncodeDuration = zapcore.SecondsDurationEncoder
case Dynamic:
var err error
if isatty.IsTerminal(os.Stdout.Fd()) {
err = Development.apply(config)
} else {
err = Production.apply(config)
}
if err != nil {
return err
}
default:
return fmt.Errorf("cannot apply invalid preset")
}
return nil
}
|
package passbook
import (
"crypto/rand"
"fmt"
"io/ioutil"
"os"
)
// PassBook - type passbook
type PassBook struct {
blocks uint16
loadData []byte
exportData []byte
}
// NewPassBook -
func NewPassBook(blocks uint16) *PassBook {
return &PassBook{
blocks: blocks,
}
}
// getters
// GetLoadData -
func (book *PassBook) GetLoadData() []byte {
return book.loadData
}
// GetExportData -
func (book *PassBook) GetExportData() []byte {
return book.exportData
}
// LoadFromFile - load data from file
func (book *PassBook) LoadFromFile(file string) error {
f, err := os.Open(file)
if err != nil {
return err
}
defer f.Close()
b, err2 := ioutil.ReadAll(f)
if err2 != nil {
return err2
}
book.loadData = b
return nil
}
// LoadFromData - load data from byte array
func (book *PassBook) LoadFromData(data []byte) error {
book.loadData = data
return nil
}
// Generate - generate an series of bytes as a new passbook
func (book *PassBook) Generate() {
var index uint16
// clear previous
book.exportData = book.exportData[:0]
for index = 0; index < book.blocks; index++ {
book.exportData = append(book.exportData, generateBlock()...)
}
}
// Export - export (generated, not loaded) passbook data to file
func (book *PassBook) Export(file string) error {
f, err := os.OpenFile(file, os.O_RDWR|os.O_CREATE, 0755)
if err != nil {
return err
}
defer f.Close()
_, errWrite := f.Write(book.exportData)
if errWrite != nil {
return errWrite
}
return nil
}
// Encrypt - encrypt data from vector
func (book *PassBook) Encrypt(vector []byte, reverse bool) ([]byte, error) {
var index int
var result = make([]byte, 0)
if len(book.loadData) < int(book.blocks*256) {
return nil, fmt.Errorf("loadData length not enough")
}
// do reading
for index = 0; index < int(book.blocks); index++ {
fixedPoint := 0
findFixedPoint := false
for j := 0; j < 256; j++ {
i := int(index*256) + j
if book.loadData[i] == byte(j) {
findFixedPoint = true
fixedPoint = j
break
}
}
if findFixedPoint == false {
return nil, fmt.Errorf("Invalid or corrupted data")
}
coor := 0
newX := index*256 + int(vector[index])
v := int(book.loadData[newX])
if reverse == true {
coor = (fixedPoint - v + 256) % 256
} else {
coor = (fixedPoint + v) % 256
}
result = append(result, book.loadData[index*256+coor])
}
return result, nil
}
// internal function
func generateBlock() []byte {
repeat := true
block := make([]byte, 256)
for repeat == true {
rand.Read(block)
for j := 0; j < 256; j++ {
if block[j] == byte(j) {
repeat = false
break
}
}
}
return block
}
|
// +build load
package main
import (
"flag"
"fmt"
"strconv"
"sync"
"time"
)
// func makeConnection(wg *sync.WaitGroup, times int, msg string) {
// conn, err := net.Dial("tcp", ":1774")
// oks := 0
// notoks := 0
// PanicOnError(err)
// defer conn.Close()
// defer wg.Done()
// for i := 0; i < times; i++ {
// m := "Message #" + strconv.Itoa(i+1) + msg
// final_msg, _ := FormatMessage("LOG", []byte(m))
// n, err := conn.Write(final_msg)
// if n < len(final_msg) {
// panic("too short")
// }
// PanicOnError(err)
// tbuf := make([]byte, 2)
// _, err = io.ReadFull(conn, tbuf)
// PanicOnError(err)
// // fmt.Printf("%d '%s'\n", i, tbuf)
// if string(tbuf) != "OK" {
// notoks++
// // panic("Wrong response: " + string(tbuf))
// } else {
// oks++
// }
// // time.Sleep(250000 * time.Nanosecond)
// // if i%1000 == 0 {
// // fmt.Printf("Sent %d messages\n", i)
// // }
// }
// // time.Sleep(3 * time.Second)
// fmt.Printf("%d OKs, %d not\n", oks, notoks)
// }
func makeConnection(wg *sync.WaitGroup, times int, msg string) {
defer wg.Done()
ok, notok := 0, 0
service := NewService()
service.TCPConn.Addr = ":1774"
err := service.Connect()
PanicOnError(err)
defer service.TCPConn.Conn.Close()
batchsize := 25
cmds := make([]string, 0)
args := make([][]byte, 0)
for i := 0; i < times; i++ {
m := "Message #" + strconv.Itoa(i+1) + msg
cmds = append(cmds, "LOG")
args = append(args, []byte(m))
if i != 0 && i%batchsize == 0 {
resp, err := service.TCPConn.SendBatchedCommand(cmds, args)
if err != nil {
notok++
} else if resp != "OK\r\n" {
notok++
} else {
ok++
}
cmds = make([]string, 0)
args = make([][]byte, 0)
}
}
// fmt.Printf("%d OK, %d not\n", ok, notok)
}
var runs = flag.Int("runs", 20, "number of times to run")
var concurrency = flag.Int("concurrency", 5, "number of simultaneous requests")
var requests = flag.Int("requests", 10000, "number of requests per worker")
var requestlen = flag.Int("requestlen", 500, "length of each request")
func main() {
flag.Parse()
num := *runs
num *= *concurrency
num *= *requests
fmt.Printf("Should send %d\n", num)
totalBytes := num
totalBytes *= *requestlen
msg := ""
for len(msg) < *requestlen {
msg += "-"
}
// fmt.Printf("Sending '%s'\n", msg)
start := time.Now()
for r := 0; r < *runs; r++ {
var wg sync.WaitGroup
for i := 0; i < *concurrency; i++ {
wg.Add(1)
go makeConnection(&wg, *requests, msg)
}
wg.Wait()
// time.Sleep(250000 * time.Nanosecond)
}
d := time.Now().Sub(start)
seconds := d.Seconds()
nanos := d.Nanoseconds()
fmt.Printf("wrote %fMB in %fs, %fns/request %f/s %fMB/s\n", float64(totalBytes/1024/1024), seconds, (float64(nanos) / float64(num)), float64(num)/float64(seconds), float64(totalBytes/1024/1024)/float64(seconds))
}
|
package main
import (
"fmt"
"unsafe"
)
func InspectSlice(slice []string) {
// Capture the address to the slice structure
address := unsafe.Pointer(&slice)
addrSize := unsafe.Sizeof(address)
// Capture the address where the length and cap size is stored
lenAddr := uintptr(address) + addrSize
capAddr := uintptr(address) + (addrSize * 2)
// Create pointers to the length and cap size
lenPtr := (*int)(unsafe.Pointer(lenAddr))
capPtr := (*int)(unsafe.Pointer(capAddr))
// Create a pointer to the underlying array
addPtr := (*[8]string)(unsafe.Pointer(*(*uintptr)(address)))
fmt.Printf("Slice Addr[%p] Len Addr[0x%x] Cap Addr[0x%x]\n",
address,
lenAddr,
capAddr)
fmt.Printf("Slice Length[%d] Cap[%d]\n",
*lenPtr,
*capPtr)
for index := 0; index < *lenPtr; index++ {
fmt.Printf("[%d] %p %s\n",
index,
&(*addPtr)[index],
(*addPtr)[index])
}
fmt.Printf("\n\n")
}
func main() {
cobaSlice := []string{"satu","dua","tiga"}
InspectSlice(cobaSlice)
}
|
func GO() {
fmt.Println("我是GO,现在没有发生异常,我是正常执行的。")
}
func PHP() {
defer func() {
if err := recover(); err != nil {
fmt.Println("终于捕获到了panic产生的异常:", err) // 这里的err其实就是panic传入的内容
fmt.Println("我是defer里的匿名函数,我捕获到panic的异常了,我要recover,恢复过来了。")
}
}()
panic("我是PHP,我要抛出一个异常了,等下defer会通过recover捕获这个异常,捕获到我时,在PHP里是不会输出的,会在defer里被捕获输出,然后正常处理,使后续程序正常运行。但是注意的是,在PHP函数里,排在panic后面的代码也不会执行的。")
fmt.Println("我是PHP里panic后面要打印出的内容。但是我是永远也打印不出来了。因为逻辑并不会恢复到panic那个点去,函数还是会在defer之后返回,也就是说执行到defer后,程序直接返回到main()里,接下来开始执行PYTHON()")
}
func PYTHON() {
fmt.Println("我是PYTHON,没有defer来recover捕获panic的异常,我是不会被正常执行的。")
}
func main() {
GO()
PHP()
PYTHON()
}
|
package sqlconnector
import "github.com/jinzhu/gorm"
func Connect() (*gorm.DB, error) {
db, err := connectCloudSql()
if err != nil {
return nil, err
}
if db != nil {
return db, nil
}
return connectLocalSql()
}
|
package goSolution
func leastBricks(wall [][]int) int {
f := make(map[int]int)
n := len(wall)
for i := 0; i < n; i++ {
w := wall[i]
k := 0
for j := 0; j < len(w) - 1; j++ {
k += w[j]
f[k] += 1
}
}
r := 0
for _, value := range f {
r = max(r, value)
}
return n - r
}
|
// Copyright 2016 Kranz. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package base
import (
"crypto/sha1"
"encoding/hex"
"encoding/json"
)
// Encode string to sha1 hex value.
func EncodeSha1(str string) string {
h := sha1.New()
h.Write([]byte(str))
return hex.EncodeToString(h.Sum(nil))
}
func Marshal(i interface{}) string {
data, err := json.MarshalIndent(i, "", " ")
if err != nil {
return ""
}
return string(data)
}
|
// Copyright (C) 2020 VMware, Inc.
// SPDX-License-Identifier: Apache-2.0
package common
import (
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
)
type BaseSuite struct {
suite.Suite
Name string
CreateFlags []string
}
func (s *BaseSuite) SetupTest() {
logrus.Infof("%s: Setting up builder", s.Name)
args := append(
[]string{
s.Name,
},
s.CreateFlags...,
)
err := RunBuildkit("create", args)
require.NoError(s.T(), err, "%s: builder create failed", s.Name)
}
func (s *BaseSuite) TearDownTest() {
logrus.Infof("%s: Removing builder", s.Name)
err := RunBuildkit("rm", []string{
s.Name,
})
require.NoError(s.T(), err, "%s: builder rm failed", s.Name)
}
func (s *BaseSuite) TestSimpleBuild() {
logrus.Infof("%s: Simple Build", s.Name)
dir, cleanup, err := NewSimpleBuildContext()
defer cleanup()
require.NoError(s.T(), err, "Failed to set up temporary build context")
args := []string{}
if s.Name != "buildkit" { // TODO wire up the default name variable
args = append(
args,
"--builder", s.Name,
)
}
args = append(
args,
"--tag", s.Name+"replaceme:latest",
dir,
)
err = RunBuild(args)
require.NoError(s.T(), err, "build failed")
}
|
package gogacap
import (
"testing"
)
func TestChanZeroToNumber(t *testing.T) {
aa := [][]int{
[]int{},
}
c := ChanZeroToNumber(-1)
for _, a := range aa {
b := <-c
if !sliceEq(a, b) {
t.Errorf("%v != %v", a, b)
}
}
aa = [][]int{
[]int{},
[]int{0},
}
c = ChanZeroToNumber(0)
for _, a := range aa {
b := <-c
if !sliceEq(a, b) {
t.Errorf("%v != %v", a, b)
}
}
aa = [][]int{
[]int{},
[]int{1},
[]int{1, 1},
[]int{0},
[]int{0, 1},
[]int{0, 0},
}
c = ChanZeroToNumber(1)
for _, a := range aa {
b := <-c
if !sliceEq(a, b) {
t.Errorf("%v != %v", a, b)
}
}
aa = [][]int{
[]int{},
[]int{2},
[]int{2, 2},
[]int{2, 2, 2},
[]int{1},
[]int{1, 2},
[]int{1, 2, 2},
[]int{1, 1},
[]int{1, 1, 2},
[]int{1, 1, 1},
[]int{0},
[]int{0, 2},
[]int{0, 2, 2},
[]int{0, 1},
[]int{0, 1, 2},
[]int{0, 1, 1},
[]int{0, 0},
[]int{0, 0, 2},
[]int{0, 0, 1},
[]int{0, 0, 0},
}
c = ChanZeroToNumber(2)
for _, a := range aa {
b := <-c
if !sliceEq(a, b) {
t.Errorf("%v != %v", a, b)
}
}
c = ChanZeroToNumber(10)
for range c {
}
}
|
package main
import (
"bufio"
"container/heap"
"container/list"
"fmt"
"log"
"math"
"os"
"regexp"
"strconv"
)
type point struct {
x int
y int
z int
radius float64
}
const (
input = "day23/input.txt"
//input = "day23/test.txt"
//input = "day23/test2.txt"
)
// An Item is something we manage in a priority queue.
type Item struct {
x int
y int
z int
radius float64
precision int
priority int // The priority of the item in the queue.
// The index is needed by update and is maintained by the heap.Interface methods.
index int // The index of the item in the heap.
}
// A PriorityQueue implements heap.Interface and holds Items.
type PriorityQueue []*Item
func (pq PriorityQueue) Len() int { return len(pq) }
func (pq PriorityQueue) Less(i, j int) bool {
// We want Pop to give us the highest, not lowest, priority so we use greater than here.
return pq[i].priority > pq[j].priority
}
func (pq PriorityQueue) Swap(i, j int) {
pq[i], pq[j] = pq[j], pq[i]
pq[i].index = i
pq[j].index = j
}
func (pq *PriorityQueue) Push(x interface{}) {
n := len(*pq)
item := x.(*Item)
item.index = n
*pq = append(*pq, item)
}
func (pq *PriorityQueue) Pop() interface{} {
old := *pq
n := len(old)
item := old[n-1]
item.index = -1 // for safety
*pq = old[0 : n-1]
return item
}
// update modifies the priority and value of an Item in the queue.
func (pq *PriorityQueue) update(item *Item, value *point, priority int) {
item.x = value.x
item.y = value.y
item.z = value.z
item.priority = priority
heap.Fix(pq, item.index)
}
func main() {
file, err := os.Open(input)
check(err)
defer file.Close()
ls := list.New()
readAll(file, ls)
// pos=<0,0,0>, r=4
regex, err := regexp.Compile(`pos=<(-?\d+),(-?\d+),(-?\d+)>,\s*r=(\d+)`)
check(err)
points := make([]*point, 0)
for line := ls.Front(); line != nil; line = line.Next() {
matches := regex.FindAllStringSubmatch(line.Value.(string), -1)[0]
x, err := strconv.Atoi(matches[1])
check(err)
y, err := strconv.Atoi(matches[2])
check(err)
z, err := strconv.Atoi(matches[3])
check(err)
radius, err := strconv.Atoi(matches[4])
check(err)
points = append(points, &point{x: x, y: y, z: z, radius: float64(radius)})
}
fmt.Printf("Solution to part 1 is: %v\n", part1(points))
//countP1 := 0
//p1 := &point{x: 11382527, y: 29059459, z: 39808804}
//for _, p2 := range points {
// d1 := dist(p1, p2)
// if d1 <= p2.radius {
// countP1++
// }
//}
//fmt.Printf("Number of bots that match: %v, distance: %v", countP1, int(dist(&point{}, p1)))
fmt.Printf("Solution to part 2 is: %v\n", int(part2(points)))
}
func part1(points []*point) int {
var maxRP *point = nil
for _, p := range points {
if maxRP == nil || p.radius > maxRP.radius {
maxRP = p
}
}
//fmt.Printf("Point with max radius is: %v\n", maxRP)
countInRadius := 0
for _, p := range points {
if dist(p, maxRP) <= maxRP.radius {
//fmt.Printf("Point is in radius: %v\n", p)
countInRadius++
}
}
return countInRadius
}
func part2(points []*point) float64 {
var minX, minY, minZ, maxX, maxY, maxZ = math.Inf(-1), math.Inf(-1), math.Inf(-1), math.Inf(-1), math.Inf(-1), math.Inf(-1)
for _, p := range points {
if float64(p.x) < minX || math.IsInf(minX, -1) {
minX = float64(p.x)
}
if float64(p.y) < minY || math.IsInf(minY, -1) {
minY = float64(p.y)
}
if float64(p.z) < minZ || math.IsInf(minZ, -1) {
minZ = float64(p.z)
}
if float64(p.x) > maxX || math.IsInf(maxX, -1) {
maxX = float64(p.x)
}
if float64(p.y) > maxY || math.IsInf(maxY, -1) {
maxY = float64(p.y)
}
if float64(p.z) > maxZ || math.IsInf(maxZ, -1) {
maxZ = float64(p.z)
}
}
var maxResolution = (maxX - minX) / 100
pq := make(PriorityQueue, 0)
heap.Init(&pq)
var maxRP *point = nil
var howManyMax = 0
found := func(x, y, z, countP1, found int) {
fmt.Printf("Floating countP1=%v\n", countP1)
p1 := &point{x: x, y: y, z: z}
if countP1 > howManyMax {
fmt.Printf("New best sample found at %v, %v, %v, count=%v (distance=%v)\n", x, y, z, countP1, int(found))
howManyMax = countP1
maxRP = p1
} else if countP1 == howManyMax {
if maxRP == nil || dist(zero, p1) < dist(zero, maxRP) {
fmt.Printf("New best sample found at %v, %v, %v, count=%v (distance=%v)\n", x, y, z, countP1, int(found))
maxRP = p1
}
}
}
mapIntoPQ(int(minX), int(maxX), int(minY), int(maxY), int(minZ), int(maxZ), maxResolution, points, &pq, found)
for pq.Len() > 0 {
item := heap.Pop(&pq).(*Item)
mapIntoPQ(item.x-item.precision, item.x+item.precision, item.y-item.precision, item.y+item.precision, item.z-item.precision, item.z+item.precision, float64(item.precision/2), points, &pq, found)
}
//fmt.Printf("Best point is: %v since it is covered by %d bots\n", totalMaxRP, totalHowManyMax)
//return dist(totalMaxRP, zero)
return -1
}
var zero = &point{}
func mapIntoPQ(minX int, maxX int, minY int, maxY int, minZ int, maxZ int, resolution float64, points []*point, pq *PriorityQueue, foundHandler func(x, y, z, countP1, found int)) {
if resolution < 1 {
resolution = 1
}
var p = &point{}
for x := minX; x <= maxX; x += int(resolution) {
//fmt.Printf("Processed %v%% so far (resolution: %f)\n", 100*(x-minX)/(maxX-minX), resolution)
p.x = int(x)
for y := minY; y <= maxY; y += int(resolution) {
p.y = int(y)
for z := minZ; z <= maxZ; z += int(resolution) {
p.z = int(z)
countP1 := 0
for _, p2 := range points {
d1 := dist(p, p2)
var radius float64
if resolution == 1 {
radius = p2.radius
} else {
radius = p2.radius + resolution*3
}
if d1 <= radius {
countP1++
}
}
if countP1 == 0 {
continue
} else if resolution == 1 {
foundHandler(x, y, z, countP1, int(dist(zero, p)))
} else {
fmt.Printf("Resampling at %v, %v, %v (count:%v) with precision %v\n", x, y, z, countP1, resolution)
item := &Item{
x: int(x),
y: int(y),
z: int(z),
priority: countP1,
precision: int(resolution),
}
heap.Push(pq, item)
}
}
}
}
}
func dist(p1, p2 *point) float64 {
return math.Abs(float64(p1.x)-float64(p2.x)) + math.Abs(float64(p1.y)-float64(p2.y)) + math.Abs(float64(p1.z)-float64(p2.z))
}
func check(err error) {
if err != nil {
log.Fatal(err)
}
}
func readAll(file *os.File, list *list.List) {
scanner := bufio.NewScanner(file)
for scanner.Scan() {
val := scanner.Text()
list.PushBack(val)
}
if err := scanner.Err(); err != nil {
log.Fatal(err)
}
}
|
// Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package resourcegrouptag
import (
"github.com/pingcap/errors"
"github.com/pingcap/kvproto/pkg/coprocessor"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/tidb/parser"
"github.com/pingcap/tidb/tablecodec/rowindexcodec"
"github.com/pingcap/tipb/go-tipb"
"github.com/tikv/client-go/v2/tikvrpc"
)
// EncodeResourceGroupTag encodes sql digest and plan digest into resource group tag.
func EncodeResourceGroupTag(sqlDigest, planDigest *parser.Digest, label tipb.ResourceGroupTagLabel) []byte {
if sqlDigest == nil && planDigest == nil {
return nil
}
tag := &tipb.ResourceGroupTag{Label: &label}
if sqlDigest != nil {
tag.SqlDigest = sqlDigest.Bytes()
}
if planDigest != nil {
tag.PlanDigest = planDigest.Bytes()
}
b, err := tag.Marshal()
if err != nil {
return nil
}
return b
}
// DecodeResourceGroupTag decodes a resource group tag and return the sql digest.
func DecodeResourceGroupTag(data []byte) (sqlDigest []byte, err error) {
if len(data) == 0 {
return nil, nil
}
tag := &tipb.ResourceGroupTag{}
err = tag.Unmarshal(data)
if err != nil {
return nil, errors.Errorf("invalid resource group tag data %x", data)
}
return tag.SqlDigest, nil
}
// GetResourceGroupLabelByKey determines the tipb.ResourceGroupTagLabel of key.
func GetResourceGroupLabelByKey(key []byte) tipb.ResourceGroupTagLabel {
switch rowindexcodec.GetKeyKind(key) {
case rowindexcodec.KeyKindRow:
return tipb.ResourceGroupTagLabel_ResourceGroupTagLabelRow
case rowindexcodec.KeyKindIndex:
return tipb.ResourceGroupTagLabel_ResourceGroupTagLabelIndex
default:
return tipb.ResourceGroupTagLabel_ResourceGroupTagLabelUnknown
}
}
// GetFirstKeyFromRequest gets the first Key of the request from tikvrpc.Request.
func GetFirstKeyFromRequest(req *tikvrpc.Request) (firstKey []byte) {
if req == nil {
return
}
switch req.Req.(type) {
case *kvrpcpb.GetRequest:
r := req.Req.(*kvrpcpb.GetRequest)
if r != nil {
firstKey = r.Key
}
case *kvrpcpb.BatchGetRequest:
r := req.Req.(*kvrpcpb.BatchGetRequest)
if r != nil && len(r.Keys) > 0 {
firstKey = r.Keys[0]
}
case *kvrpcpb.ScanRequest:
r := req.Req.(*kvrpcpb.ScanRequest)
if r != nil {
firstKey = r.StartKey
}
case *kvrpcpb.PrewriteRequest:
r := req.Req.(*kvrpcpb.PrewriteRequest)
if r != nil && len(r.Mutations) > 0 {
if mutation := r.Mutations[0]; mutation != nil {
firstKey = mutation.Key
}
}
case *kvrpcpb.CommitRequest:
r := req.Req.(*kvrpcpb.CommitRequest)
if r != nil && len(r.Keys) > 0 {
firstKey = r.Keys[0]
}
case *kvrpcpb.BatchRollbackRequest:
r := req.Req.(*kvrpcpb.BatchRollbackRequest)
if r != nil && len(r.Keys) > 0 {
firstKey = r.Keys[0]
}
case *coprocessor.Request:
r := req.Req.(*coprocessor.Request)
if r != nil && len(r.Ranges) > 0 {
if keyRange := r.Ranges[0]; keyRange != nil {
firstKey = keyRange.Start
}
}
case *coprocessor.BatchRequest:
r := req.Req.(*coprocessor.BatchRequest)
if r != nil && len(r.Regions) > 0 {
if region := r.Regions[0]; region != nil {
if len(region.Ranges) > 0 {
if keyRange := region.Ranges[0]; keyRange != nil {
firstKey = keyRange.Start
}
}
}
}
}
return
}
|
package response
import (
"net/http"
"github.com/GoAdminGroup/go-admin/context"
"github.com/GoAdminGroup/go-admin/modules/auth"
"github.com/GoAdminGroup/go-admin/modules/config"
"github.com/GoAdminGroup/go-admin/modules/db"
"github.com/GoAdminGroup/go-admin/modules/errors"
"github.com/GoAdminGroup/go-admin/modules/language"
"github.com/GoAdminGroup/go-admin/modules/menu"
"github.com/GoAdminGroup/go-admin/template"
"github.com/GoAdminGroup/go-admin/template/types"
)
func Ok(ctx *context.Context) {
ctx.JSON(http.StatusOK, map[string]interface{}{
"code": http.StatusOK,
"msg": "ok",
})
}
func OkWithMsg(ctx *context.Context, msg string) {
ctx.JSON(http.StatusOK, map[string]interface{}{
"code": http.StatusOK,
"msg": msg,
})
}
func OkWithData(ctx *context.Context, data map[string]interface{}) {
ctx.JSON(http.StatusOK, map[string]interface{}{
"code": http.StatusOK,
"msg": "ok",
"data": data,
})
}
func BadRequest(ctx *context.Context, msg string) {
ctx.JSON(http.StatusBadRequest, map[string]interface{}{
"code": http.StatusBadRequest,
"msg": language.Get(msg),
})
}
func Alert(ctx *context.Context, desc, title, msg string, conn db.Connection, btns *types.Buttons,
pageType ...template.PageType) {
user := auth.Auth(ctx)
pt := template.Error500Page
if len(pageType) > 0 {
pt = pageType[0]
}
pageTitle, description, content := template.GetPageContentFromPageType(title, desc, msg, pt)
tmpl, tmplName := template.Default().GetTemplate(ctx.IsPjax())
buf := template.Execute(&template.ExecuteParam{
User: user,
TmplName: tmplName,
Tmpl: tmpl,
Panel: types.Panel{
Content: content,
Description: description,
Title: pageTitle,
},
Config: config.Get(),
Menu: menu.GetGlobalMenu(user, conn, ctx.Lang()).SetActiveClass(config.URLRemovePrefix(ctx.Path())),
Animation: true,
Buttons: *btns,
IsPjax: ctx.IsPjax(),
Iframe: ctx.IsIframe(),
})
ctx.HTML(http.StatusOK, buf.String())
}
func Error(ctx *context.Context, msg string, datas ...map[string]interface{}) {
res := map[string]interface{}{
"code": http.StatusInternalServerError,
"msg": language.Get(msg),
}
if len(datas) > 0 {
res["data"] = datas[0]
}
ctx.JSON(http.StatusInternalServerError, res)
}
func Denied(ctx *context.Context, msg string) {
ctx.JSON(http.StatusInternalServerError, map[string]interface{}{
"code": http.StatusForbidden,
"msg": language.Get(msg),
})
}
var OffLineHandler = func(ctx *context.Context) {
if config.GetSiteOff() {
if ctx.WantHTML() {
ctx.HTML(http.StatusOK, `<html><body><h1>The website is offline</h1></body></html>`)
} else {
ctx.JSON(http.StatusForbidden, map[string]interface{}{
"code": http.StatusForbidden,
"msg": language.Get(errors.SiteOff),
})
}
ctx.Abort()
}
}
|
//go:build darwin
// +build darwin
package envoy
import (
"context"
"syscall"
"github.com/pomerium/pomerium/internal/log"
)
var sysProcAttr = &syscall.SysProcAttr{
Setpgid: true,
}
func (srv *Server) runProcessCollector(_ context.Context) {}
func (srv *Server) prepareRunEnvoyCommand(ctx context.Context, sharedArgs []string) (exePath string, args []string) {
if srv.cmd != nil && srv.cmd.Process != nil {
log.Info(ctx).Msg("envoy: terminating previous envoy process")
_ = srv.cmd.Process.Kill()
}
args = make([]string, len(sharedArgs))
copy(args, sharedArgs)
return srv.envoyPath, args
}
|
package ionic
import (
"bytes"
"encoding/json"
"fmt"
"net/url"
"github.com/ion-channel/ionic/pagination"
"github.com/ion-channel/ionic/products"
"github.com/ion-channel/ionic/responses"
)
// GetProducts takes a product ID search string and token. It returns the product found,
// and any API errors it may encounters.
func (ic *IonClient) GetProducts(idSearch, token string) ([]products.Product, error) {
params := url.Values{}
params.Set("external_id", idSearch)
b, _, err := ic.Get(products.GetProductEndpoint, token, params, nil, pagination.Pagination{})
if err != nil {
return nil, fmt.Errorf("failed to get raw product: %v", err.Error())
}
var ps []products.Product
err = json.Unmarshal(b, &ps)
if err != nil {
return nil, fmt.Errorf("failed to get products: %v", err.Error())
}
return ps, nil
}
// GetProductVersions takes a product name, version, and token.
// It returns the product versions found, and any API errors it may encounters.
func (ic *IonClient) GetProductVersions(name, version, token string) ([]products.Product, error) {
params := url.Values{}
params.Set("name", name)
if version != "" {
params.Set("version", version)
}
b, _, err := ic.Get(products.GetProductVersionsEndpoint, token, params, nil, pagination.Pagination{})
if err != nil {
return nil, fmt.Errorf("failed to get product versions: %v", err.Error())
}
var ps []products.Product
err = json.Unmarshal(b, &ps)
if err != nil {
return nil, fmt.Errorf("failed to get product versions: %v", err.Error())
}
return ps, nil
}
// ProductSearch takes a search query. It returns a new raw json message
// of all the matching products in the Bunsen dependencies table
func (ic *IonClient) ProductSearch(searchInput products.ProductSearchQuery, token string) ([]products.Product, error) {
if !searchInput.IsValid() {
return nil, fmt.Errorf("Product search request not valid")
}
bodyBytes, err := json.Marshal(searchInput)
if err != nil {
// log
return nil, err
}
buffer := bytes.NewBuffer(bodyBytes)
b, err := ic.Post(products.ProductSearchEndpoint, token, nil, *buffer, nil)
if err != nil {
// log
return nil, err
}
var ps []products.Product
err = json.Unmarshal(b, &ps)
if err != nil {
// log
return nil, err
}
return ps, nil
}
// GetRawProducts takes a product ID search string and token. It returns a raw json
// message of the product found, and any API errors it may encounters.
func (ic *IonClient) GetRawProducts(idSearch, token string) (json.RawMessage, error) {
params := url.Values{}
params.Set("external_id", idSearch)
b, _, err := ic.Get(products.GetProductEndpoint, token, params, nil, pagination.Pagination{})
if err != nil {
return nil, fmt.Errorf("failed to get raw product: %v", err.Error())
}
return b, nil
}
// GetProductSearch takes a search query. It returns a new raw json message of
// all the matching products in the Bunsen dependencies table
func (ic *IonClient) GetProductSearch(query string, page pagination.Pagination, token string) ([]products.Product, *responses.Meta, error) {
params := url.Values{}
params.Set("q", query)
b, m, err := ic.Get(products.ProductSearchEndpoint, token, params, nil, page)
if err != nil {
return nil, nil, fmt.Errorf("failed to GetProductSearch: %v", err.Error())
}
var products []products.Product
err = json.Unmarshal(b, &products)
if err != nil {
return nil, nil, fmt.Errorf("failed to parse products: %v", err.Error())
}
return products, m, nil
}
|
package loadbalancer_api
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
)
func prepareRequest(body interface{}) (bodyBuf *bytes.Buffer, err error) {
if bodyBuf == nil {
bodyBuf = &bytes.Buffer{}
}
err = json.NewEncoder(bodyBuf).Encode(body)
return
}
func GetPathListAll(client *http.Client, host string) (response *PathListAll, err error) {
resp, err := client.Get(fmt.Sprintf("https://%s/loadbalancer/init", host))
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf(resp.Status)
}
rspbody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
err = json.Unmarshal(rspbody, &response)
return
}
func SendPduSessionPathRequest(client *http.Client, host string, supi string, request SessionInfo) (response *SessionInfo, err error) {
body, err := prepareRequest(request)
if err != nil {
return nil, err
}
resp, err := client.Post(fmt.Sprintf("https://%s/loadbalancer/user/%s", host, supi), "application/json", body)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusCreated {
return nil, fmt.Errorf(resp.Status)
}
rspbody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
err = json.Unmarshal(rspbody, &response)
return
}
func SendPduSessionPathDeleteRequest(client *http.Client, host string, supi string, request SessionInfo) error {
body, err := prepareRequest(request)
if err != nil {
return err
}
req, err := http.NewRequest("DELETE", fmt.Sprintf("https://%s/loadbalancer/user/%s", host, supi), body)
if err != nil {
return err
}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusNoContent {
return fmt.Errorf(resp.Status)
}
return nil
}
// func UpdateWorstPathList(client *http.Client, host string, request WorstPathList) error {
// body, err := prepareRequest(request)
// if err != nil {
// return err
// }
// resp, err := client.Post(fmt.Sprintf("https://%s/loadbalancer/path", host), "application/json", body)
// if err != nil {
// return err
// }
// defer resp.Body.Close()
// if resp.StatusCode != http.StatusNoContent {
// return fmt.Errorf(resp.Status)
// }
// return nil
// }
|
// Copyright 2014 The Sporting Exchange Limited. All rights reserved.
// Use of this source code is governed by a free license that can be
// found in the LICENSE file.
// Package config implements shared configuration-related routines.
package config
import (
"bytes"
"crypto/tls"
"encoding/json"
"expvar"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"path"
"reflect"
"strings"
"text/template"
"time"
)
var Debug *log.Logger
var httpClient = &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true, // TODO: make this configurable.
},
},
Timeout: 15 * time.Second,
}
// getInterval determines frequency of server config lookups.
const getInterval = 60 * time.Second
// maxBytes limits the size of server config response.
const maxBytes = 1 << 20
var statLoadErrors = expvar.NewMap("config.load.Errors")
// Struct is an interface satisfied by every configuration struct. Reset resets
// struct by setting default values and zeroing others. Validate returns nil if
// the configuration is valid for processing by the client.
type Struct interface {
Reset()
Validate() error
}
var saved *loadPath
type loadPath struct {
filePath string
serverPath string
}
// Load loads the config struct with values stored in the given file. The file's
// encoding is JSON.
//
// In addition, depending on environment details, a second file is loaded over
// network. The remote file overrides those settings of the local file which have
// been marked as dynamic in the corresponding struct field. To mark field as
// dynamic, tag it with config=dynamic, for example:
//
// RelayHost string `config:"dynamic"`
//
// The server path must be a valid HTTP path except the order of query parameters
// is significant. The path may include the template action {{.Hostname}} which
// expands to local host name, for example:
//
// "/tsp/forwarder?host={{.Hostname}}"
//
// The test for config=dynamic tag is not performed recursively.
//
// At least one path must be provided. If a path is empty, it is ignored.
func Load(config Struct, filePath, serverPath string) {
if saved != nil {
log.Panicf("Load called twice")
}
if serverPath != "" && os.Getenv("CONTROL_HOST") == "" {
serverPath = ""
}
saved = &loadPath{filePath, serverPath}
load(config, filePath, serverPath)
}
// Next blocks until new configuration values arrive over network. Once they do,
// the config struct is rewritten to contain the new settings.
//
// If called before Load, Next will panic. In some environments, Next may block
// forever.
func Next(config Struct) {
if saved.serverPath == "" {
select {}
}
load(config, saved.filePath, saved.serverPath)
}
func load(config Struct, filePath, serverPath string) {
rateLimit := time.NewTicker(1 * time.Second)
defer rateLimit.Stop()
switch {
default:
log.Fatalf("config: no path defined")
case filePath != "" && serverPath == "":
config.Reset()
ok := decodeFileTry(config, filePath)
if !ok {
os.Exit(1)
}
if !isValid(config) {
os.Exit(1)
}
case filePath == "" && serverPath != "":
client := dial(serverPath)
for ; ; <-rateLimit.C {
config.Reset()
ok := decodeServerTry(config, client)
if !ok {
continue
}
if !isValid(config) {
continue
}
break
}
defaultClient = client
case filePath != "" && serverPath != "":
client := dial(serverPath)
for ; ; <-rateLimit.C {
config.Reset()
ok := decodeFileTry(config, filePath)
if !ok {
continue
}
ok = decodeServerTry(config, client)
if !ok {
continue
}
if !isValid(config) {
continue
}
break
}
defaultClient = client
}
}
func decodeFileTry(config Struct, path string) (ok bool) {
if err := decodeFile(config, path); err != nil {
statLoadErrors.Add("type=Decode", 1)
log.Printf("config: file decode error: %v", err)
return
}
ok = true
return
}
func decodeFile(config Struct, path string) error {
file, err := os.Open(path)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
defer file.Close()
buf, err := ioutil.ReadAll(file)
if err != nil {
return err
}
if err := unmarshal(config, buf); err != nil {
return err
}
return nil
}
func decodeServerTry(config Struct, client *client) (ok bool) {
if err := decodeServer(config, client); err != nil {
statLoadErrors.Add("type=Decode", 1)
log.Printf("config: server decode error: %v", err)
return
}
ok = true
return
}
func decodeServer(config Struct, client *client) error {
buf := client.NextUpdate()
filteredStruct := newFieldFilter(config)
if err := unmarshal(filteredStruct, buf); err != nil {
return err
}
return nil
}
func unmarshal(config interface{}, buf []byte) error {
if err := json.Unmarshal(buf, config); err != nil {
err = addLineNum(err, buf)
return err
}
return nil
}
func isValid(config Struct) bool {
if err := config.Validate(); err != nil {
statLoadErrors.Add("type=Validation", 1)
log.Printf("config: validation error: %v", err)
return false
}
return true
}
func addLineNum(err error, buf []byte) error {
syntax, ok := err.(*json.SyntaxError)
if !ok {
return err
}
start := bytes.LastIndex(buf[:syntax.Offset], []byte("\n")) + 1
num := bytes.Count(buf[:start], []byte("\n")) + 1
return fmt.Errorf("line %d: %v", num, err)
}
var defaultClient *client
type client struct {
addr, path string
nextUpdate chan []byte
}
func dial(path string) *client {
if defaultClient != nil {
return defaultClient
}
c := &client{
addr: os.Getenv("CONTROL_HOST"),
path: path,
nextUpdate: make(chan []byte),
}
go c.mainloop()
return c
}
// NexUpdate blocks until a payload arrives that differs from the previous one.
func (c *client) NextUpdate() []byte {
return <-c.nextUpdate
}
func (c *client) mainloop() {
var last []byte
tick := time.Tick(getInterval)
for ; ; <-tick {
buf, ok := c.getTry()
if !ok {
continue
}
if last != nil && bytes.Equal(buf, last) {
continue
}
c.nextUpdate <- buf
last = buf
}
}
func (c *client) get() (buf []byte, err error) {
resp, err := c.getPlainOrTLS()
if err != nil {
return nil, err
}
defer resp.Body.Close()
r := io.LimitReader(resp.Body, maxBytes)
return ioutil.ReadAll(r)
}
func (c *client) requestURL(scheme string) string {
i := strings.Index(c.path, "?")
URL := url.URL{
Scheme: scheme,
Host: c.addr,
Path: path.Join("/control/v1", c.path[:i]),
RawQuery: expand(c.path[i+1:]),
}
return URL.String()
}
func (c *client) getPlainOrTLS() (*http.Response, error) {
requests := []string{
c.requestURL("https"),
c.requestURL("http"),
}
var (
ok *http.Response
errors []error
)
for _, URL := range requests {
if Debug != nil {
Debug.Printf("get %s", URL)
}
resp, err := httpClient.Get(URL)
if err != nil {
errors = append(errors, err)
continue
}
if resp.StatusCode != http.StatusOK {
err := fmt.Errorf("Get %s: got status code %d (%q)",
URL, resp.StatusCode, resp.Status)
errors = append(errors, err)
continue
}
ok = resp
break
}
if ok == nil {
return nil, fmt.Errorf("%q", errors)
}
return ok, nil
}
func expand(s string) string {
var data struct {
Hostname string
}
var err error
data.Hostname, err = os.Hostname()
if err != nil {
log.Panic(err)
}
t := template.Must(template.New("query").Parse(s))
var buf bytes.Buffer
if err := t.Execute(&buf, data); err != nil {
log.Panic(err)
}
return buf.String()
}
func (c *client) getTry() (buf []byte, ok bool) {
buf, err := c.get()
if err != nil {
statLoadErrors.Add("type=Client", 1)
log.Printf("config: client error: %v", err)
return
}
ok = true
return
}
// fieldFilter blocks illegal writes to Struct.
type fieldFilter struct {
v interface{}
field []string
}
func newFieldFilter(v interface{}) *fieldFilter {
typ := reflect.Indirect(reflect.ValueOf(v)).Type()
if typ.Kind() != reflect.Struct {
panic("not a struct")
}
filter := &fieldFilter{v: v}
for i := 0; i < typ.NumField(); i++ {
f := typ.Field(i)
if f.Tag.Get("config") == "dynamic" {
filter.field = append(filter.field, f.Name)
}
}
return filter
}
func (filter *fieldFilter) UnmarshalJSON(buf []byte) error {
got := make(map[string]json.RawMessage)
if err := json.Unmarshal(buf, &got); err != nil {
return err
}
for _, f := range filter.field {
raw := got[f]
if raw == nil {
continue
}
buf := []byte(fmt.Sprintf("{%q: %s}", f, raw))
if err := json.Unmarshal(buf, filter.v); err != nil {
return err
}
}
return nil
}
|
package node
import (
"reflect"
"babyboy-dag/rpc"
"babyboy-dag/event"
"babyboy-dag/accounts"
"errors"
)
type Service interface {
APIs() []rpc.API
}
type ServiceConstructor func(ctx *ServiceContext) (Service, error)
type ServiceContext struct {
Services map[reflect.Type]Service // Index of the already constructed services
EventMux *event.TypeMux // Event multiplexer used for decoupled notifications
AccountManager *accounts.Manager // Account manager created by the node.
Node *Node
}
// Service retrieves a currently running service registered of a specific type.
func (ctx *ServiceContext) Service(service interface{}) error {
element := reflect.ValueOf(service).Elem()
if running, ok := ctx.Services[element.Type()]; ok {
element.Set(reflect.ValueOf(running))
return nil
}
return errors.New("unknown service")
}
|
package bydefine
type ProductModel struct{
Name string `json:"name"` //设备型号
SerialNo string `json:"serial_no"` //序列号
}
|
package main
import (
"fmt"
"io"
"strconv"
"time"
)
// Point wraps a single data point. It stores database-agnostic data
// representing one point in time of one measurement.
//
// Internally, Point uses byte slices instead of strings to try to minimize
// overhead.
type Point struct {
MeasurementName []byte
TagKeys [][]byte
TagValues [][]byte
FieldKeys [][]byte
FieldValues []interface{}
Timestamp *time.Time
}
// Using these literals prevents the slices from escaping to the heap, saving
// a few micros per call:
var (
charComma = byte(',')
charEquals = byte('=')
charSpace = byte(' ')
)
func (p *Point) Reset() {
p.MeasurementName = nil
p.TagKeys = p.TagKeys[:0]
p.TagValues = p.TagValues[:0]
p.FieldKeys = p.FieldKeys[:0]
p.FieldValues = p.FieldValues[:0]
p.Timestamp = nil
}
func (p *Point) SetTimestamp(t *time.Time) {
p.Timestamp = t
}
func (p *Point) SetMeasurementName(s []byte) {
p.MeasurementName = s
}
func (p *Point) AppendTag(key, value []byte) {
p.TagKeys = append(p.TagKeys, key)
p.TagValues = append(p.TagValues, value)
}
func (p *Point) AppendField(key []byte, value interface{}) {
p.FieldKeys = append(p.FieldKeys, key)
p.FieldValues = append(p.FieldValues, value)
}
// SerializeInfluxBulk writes Point data to the given writer, conforming to the
// InfluxDB wire protocol.
//
// This function writes output that looks like:
// <measurement>,<tag key>=<tag value> <field name>=<field value> <timestamp>\n
//
// For example:
// foo,tag0=bar baz=-1.0 100\n
//
// TODO(rw): Speed up this function. The bulk of time is spent in strconv.
func (p *Point) SerializeInfluxBulk(w io.Writer) (err error) {
buf := make([]byte, 0, 256)
buf = append(buf, p.MeasurementName...)
for i := 0; i < len(p.TagKeys); i++ {
buf = append(buf, charComma)
buf = append(buf, p.TagKeys[i]...)
buf = append(buf, charEquals)
buf = append(buf, p.TagValues[i]...)
}
if len(p.FieldKeys) > 0 {
buf = append(buf, charSpace)
}
for i := 0; i < len(p.FieldKeys); i++ {
buf = append(buf, p.FieldKeys[i]...)
buf = append(buf, charEquals)
v := p.FieldValues[i]
buf = fastFormatAppend(v, buf)
// Influx uses 'i' to indicate integers:
switch v.(type) {
case int, int64:
buf = append(buf, byte('i'))
}
if i+1 < len(p.FieldKeys) {
buf = append(buf, charComma)
}
}
buf = append(buf, []byte(fmt.Sprintf(" %d\n", p.Timestamp.UTC().UnixNano()))...)
_, err = w.Write(buf)
return err
}
// SerializeESBulk writes Point data to the given writer, conforming to the
// ElasticSearch bulk load protocol.
//
// This function writes output that looks like:
// <action line>
// <tags, fields, and timestamp>
//
// For example:
// { "create" : { "_index" : "measurement_otqio", "_type" : "point" } }\n
// { "tag_launx": "btkuw", "tag_gaijk": "jiypr", "field_wokxf": 0.08463898963964356, "field_zqstf": -0.043641533500086316, "timestamp": 171300 }\n
//
// TODO(rw): Speed up this function. The bulk of time is spent in strconv.
func (p *Point) SerializeESBulk(w io.Writer) error {
action := "{ \"create\" : { \"_index\" : \"%s\", \"_type\" : \"point\" } }\n"
_, err := fmt.Fprintf(w, action, p.MeasurementName)
if err != nil {
return err
}
buf := make([]byte, 0, 256)
buf = append(buf, []byte("{")...)
for i := 0; i < len(p.TagKeys); i++ {
if i > 0 {
buf = append(buf, []byte(", ")...)
}
buf = append(buf, []byte(fmt.Sprintf("\"%s\": ", p.TagKeys[i]))...)
buf = append(buf, []byte(fmt.Sprintf("\"%s\"", p.TagValues[i]))...)
}
if len(p.TagKeys) > 0 && len(p.FieldKeys) > 0 {
buf = append(buf, []byte(", ")...)
}
for i := 0; i < len(p.FieldKeys); i++ {
if i > 0 {
buf = append(buf, []byte(", ")...)
}
buf = append(buf, "\""...)
buf = append(buf, p.FieldKeys[i]...)
buf = append(buf, "\": "...)
v := p.FieldValues[i]
buf = fastFormatAppend(v, buf)
}
if len(p.TagKeys) > 0 || len(p.FieldKeys) > 0 {
buf = append(buf, []byte(", ")...)
}
// Timestamps in ES must be millisecond precision:
buf = append(buf, []byte(fmt.Sprintf("\"timestamp\": %d }\n", p.Timestamp.UTC().UnixNano()/1e6))...)
_, err = w.Write(buf)
if err != nil {
return err
}
return nil
}
func fastFormatAppend(v interface{}, buf []byte) []byte {
switch v.(type) {
case int:
return strconv.AppendInt(buf, int64(v.(int)), 10)
case int64:
return strconv.AppendInt(buf, v.(int64), 10)
case float64:
return strconv.AppendFloat(buf, v.(float64), 'f', 16, 64)
case float32:
return strconv.AppendFloat(buf, float64(v.(float32)), 'f', 16, 32)
case bool:
return strconv.AppendBool(buf, v.(bool))
case []byte:
buf = append(buf, v.([]byte)...)
return buf
case string:
buf = append(buf, v.(string)...)
return buf
default:
panic(fmt.Sprintf("unknown field type for %#v", v))
}
}
|
package LongSteps
type Operand interface {
Evaluate(environment Environment) (Operand, Environment)
Int(Environment) int
Bool(Environment) bool
}
type IntNumber int
func (i IntNumber) Evaluate(environment Environment) (Operand, Environment) {
return i, nil
}
func (i IntNumber) Int(Environment) int {
return int(i)
}
func (i IntNumber) Bool(environment Environment) bool {
if int(i) > 0 {
return true
} else {
return false
}
}
type BoolOperand bool
func (b BoolOperand) Evaluate(environment Environment) (Operand, Environment) {
return b, nil
}
func (b BoolOperand) Int(Environment) int {
if b {
return 1
} else {
return 0
}
}
func (b BoolOperand) Bool(environment Environment) bool {
return bool(b)
}
|
package main
import (
"encoding/json"
"fmt"
"net/http"
"github.com/graphql-go/graphql"
"github.com/graphql-go/graphql/examples/todo/schema"
)
type postData struct {
Query string `json:"query"`
Operation string `json:"operationName"`
Variables map[string]interface{} `json:"variables"`
}
func main() {
http.HandleFunc("/graphql", func(w http.ResponseWriter, req *http.Request) {
var p postData
if err := json.NewDecoder(req.Body).Decode(&p); err != nil {
w.WriteHeader(400)
return
}
result := graphql.Do(graphql.Params{
Context: req.Context(),
Schema: schema.TodoSchema,
RequestString: p.Query,
VariableValues: p.Variables,
OperationName: p.Operation,
})
if err := json.NewEncoder(w).Encode(result); err != nil {
fmt.Printf("could not write result to response: %s", err)
}
})
fmt.Println("Now server is running on port 8080")
fmt.Println("")
fmt.Println(`Get single todo:
curl \
-X POST \
-H "Content-Type: application/json" \
--data '{ "query": "{ todo(id:\"b\") { id text done } }" }' \
http://localhost:8080/graphql`)
fmt.Println("")
fmt.Println(`Create new todo:
curl \
-X POST \
-H "Content-Type: application/json" \
--data '{ "query": "mutation { createTodo(text:\"My New todo\") { id text done } }" }' \
http://localhost:8080/graphql`)
fmt.Println("")
fmt.Println(`Update todo:
curl \
-X POST \
-H "Content-Type: application/json" \
--data '{ "query": "mutation { updateTodo(id:\"a\", done: true) { id text done } }" }' \
http://localhost:8080/graphql`)
fmt.Println("")
fmt.Println(`Load todo list:
curl \
-X POST \
-H "Content-Type: application/json" \
--data '{ "query": "{ todoList { id text done } }" }' \
http://localhost:8080/graphql`)
http.ListenAndServe(":8080", nil)
}
|
package lingua
import (
"bytes"
"testing"
)
func TestSummary(t *testing.T) {
testCases := []struct {
description, want string
input Summary
}{
{
description: "It outputs a proper definition",
input: Summary{
Word: "jejune",
Pronunciation: "/jay-june/",
Definitions: []Definition{
{
PartOfSpeech: "adjective",
Meaning: "naive, simplistic, and superficial",
UsageExamples: []string{
"their entirely predicatable and usually jejune opinions",
"the poem seems to me rather jejune",
},
},
},
},
want: `
jejune (/jay-june/)
[adjective]
naive, simplistic, and superficial
e.g. "their entirely predicatable and usually jejune opinions"
e.g. "the poem seems to me rather jejune"
`,
},
{
description: "It does not include a pronunciation in the output if it doesn't exist",
input: Summary{
Word: "jejune",
Pronunciation: "",
Definitions: []Definition{
{
PartOfSpeech: "adjective",
Meaning: "naive, simplistic, and superficial",
UsageExamples: []string{
"their entirely predicatable and usually jejune opinions",
"the poem seems to me rather jejune",
},
},
},
},
want: `
jejune
[adjective]
naive, simplistic, and superficial
e.g. "their entirely predicatable and usually jejune opinions"
e.g. "the poem seems to me rather jejune"
`,
},
{
description: "It includes multiple definitions",
input: Summary{
Word: "jejune",
Pronunciation: "",
Definitions: []Definition{
{
PartOfSpeech: "adjective",
Meaning: "naive, simplistic, and superficial",
UsageExamples: []string{
"their entirely predicatable and usually jejune opinions",
},
},
{
PartOfSpeech: "adjective",
Meaning: "(of ideas or writings) dry and uninteresting.",
UsageExamples: []string{
"the poem seems to me rather jejune",
},
},
},
},
want: `
jejune
[adjective]
naive, simplistic, and superficial
e.g. "their entirely predicatable and usually jejune opinions"
[adjective]
(of ideas or writings) dry and uninteresting.
e.g. "the poem seems to me rather jejune"
`,
},
{
description: "It works without usage examples",
input: Summary{
Word: "jejune",
Pronunciation: "",
Definitions: []Definition{
{
PartOfSpeech: "adjective",
Meaning: "naive, simplistic, and superficial",
},
{
PartOfSpeech: "adjective",
Meaning: "(of ideas or writings) dry and uninteresting.",
},
},
},
want: `
jejune
[adjective]
naive, simplistic, and superficial
[adjective]
(of ideas or writings) dry and uninteresting.
`,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
buffer := bytes.Buffer{}
tc.input.Print(&buffer)
got := buffer.String()
if got != tc.want {
t.Errorf("\ngot %q,\nwant %q", got, tc.want)
}
})
}
}
|
/*
Package params contains implementation for QueryParameters api
interface.
*/
package params
import (
"net/http"
"github.com/go-chi/chi/v5"
)
// Chi implements QueryParameters api interface for
// chi router.
//
// You can safely use new bulit-in function to allocate
// new Chi instace.
type Chi struct{}
func (c *Chi) ID(r *http.Request) string {
return chi.URLParam(r, "id")
}
|
package escaping
import (
"testing"
)
func Test_EscapeString_UnescapeString(t *testing.T) {
mapping := map[string]string{
// spaces
"": "",
" ": " ",
" ": " ",
// doublequotes
`\"`: "\"",
`\" \"`: "\" \"",
// newlines
`\n`: "\n",
`\n \n`: "\n \n",
// backslashes
`\\`: "\\",
` \\ \\ `: " \\ \\ ",
// blackslashes + newlines
`\\n`: "\\n",
}
for a, b := range mapping {
var intermediary string
var output string
var err error
output, err = UnescapeString(a)
if err != nil {
t.Error(err)
}
if output != b {
t.Errorf("unescapeString() failed. Input: `%s`. Expected output: `%s`. Actual output: `%s`.", a, b, output)
}
output = EscapeString(b)
if output != a {
t.Errorf("escapeString() failed. Input: `%s`. Expected output: `%s`. Actual output: `%s`.", b, a, output)
}
output, err = UnescapeString(EscapeString(b))
if err != nil {
t.Error(err)
}
if output != b {
t.Errorf("Conversion failed. Expected output: `%s`. Actual output: `%s`.", b, output)
}
intermediary, err = UnescapeString(a)
if err != nil {
t.Error(err)
}
output = EscapeString(intermediary)
if output != a {
t.Errorf("Conversion failed. Expected output: `%s`. Actual output: `%s`.", a, output)
}
}
}
func Test_UnescapeString(t *testing.T) {
shouldFail := []string{`\`, `\\\`}
for _, s := range shouldFail {
_, err := UnescapeString(s)
if err == nil {
t.Error("Should have failed.")
}
}
}
|
package config
import (
"database/sql"
"fmt"
"testing"
"github.com/DemoHn/obsidian-panel/pkg/dbmigrate"
// init migrations
_ "github.com/DemoHn/obsidian-panel/app/migrations"
// import sqlite3
_ "github.com/mattn/go-sqlite3"
)
func TestConfigDBLoad(t *testing.T) {
// TODO: add testcase
db, _ := sql.Open("sqlite3", "/tmp/b.db")
// migration up
dbmigrate.Down(db)
if err := dbmigrate.Up(db); err != nil {
panic(err)
}
// insert data
err := writeValueToDB(db, "url.port", newInt(8080))
if err != nil {
panic(err)
}
writeValueToDB(db, "url.host", newString("Hello World"))
// read data
v, _ := readValueFromDB(db, "url.host")
fmt.Println(v)
// read all data
vv, _ := loadConfigData(db)
fmt.Println(vv)
}
|
package mockingjay
import (
"fmt"
"regexp"
)
// RegexYAML allows you to work with regex fields in YAML
type RegexYAML struct {
*regexp.Regexp
}
// UnmarshalYAML will unhmarshal a YAML field into regexp
func (r *RegexYAML) UnmarshalYAML(unmarshal func(interface{}) error) error {
var stringFromYAML string
err := unmarshal(&stringFromYAML)
if err != nil {
return err
}
reg, err := regexp.Compile(stringFromYAML)
if err != nil {
return err
}
r.Regexp = reg
return nil
}
// MarshalJSON returns a string for the regex
func (r *RegexYAML) MarshalJSON() ([]byte, error) {
asString := fmt.Sprintf(`"%s"`, r.Regexp.String())
return []byte(asString), nil
}
|
package run
import (
"reflect"
"sync"
)
// LazyRunner run Run only when there is at least one supervisor
type LazyRunner struct {
Run func(stopCh <-chan struct{})
Locker interface {
sync.Locker
RLock()
RUnlock()
} // protect supervisorStopChs, running, stoppedCh
supervisorStopChs []<-chan struct{}
running bool
stoppedCh chan struct{}
}
func (lr *LazyRunner) AddSupervisor(stopCh <-chan struct{}) {
lr.Locker.Lock()
defer lr.Locker.Unlock()
lr.supervisorStopChs = append(lr.supervisorStopChs, stopCh)
if !lr.running {
lr.running = true
lr.stoppedCh = make(chan struct{})
go lr.run()
}
}
func (lr *LazyRunner) run() {
stopCh := make(chan struct{})
go func() {
defer close(stopCh)
for {
lr.Locker.RLock()
stopChs := lr.supervisorStopChs
lr.Locker.RUnlock()
if len(stopChs) == 0 {
// optimization: use rlock to check, and use lock to check and set
lr.Locker.Lock()
if len(lr.supervisorStopChs) == 0 {
lr.running = false
lr.Locker.Unlock()
return
}
stopChs = lr.supervisorStopChs
lr.Locker.Unlock()
}
chosen, _, _ := selectChannels(stopChs)
lr.Locker.Lock()
lr.supervisorStopChs = append(lr.supervisorStopChs[:chosen], lr.supervisorStopChs[chosen+1:]...)
lr.Locker.Unlock()
}
}()
lr.Run(stopCh)
lr.Locker.Lock()
defer lr.Locker.Unlock()
close(lr.stoppedCh)
lr.stoppedCh = nil
}
func (lr *LazyRunner) Wait() {
lr.Locker.RLock()
stoppedCh := lr.stoppedCh
lr.Locker.RUnlock()
if stoppedCh != nil {
<-stoppedCh
}
}
func selectChannels[T any](chans []<-chan T) (chosenIndex int, value T, ok bool) {
cases := make([]reflect.SelectCase, len(chans))
for i, ch := range chans {
cases[i] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(ch)}
}
var rValue reflect.Value
chosenIndex, rValue, ok = reflect.Select(cases)
if ok {
// not closed, should read value
value = rValue.Interface().(T)
}
return
}
|
package main
import (
"fmt"
)
func main() {
var a int = 2
switch a {
case 0:
fmt.Println("a = 0")
case 1:
fmt.Println("a = 1")
case 2:
fmt.Println("a = 2")
default:
fmt.Println("a != 0, 1 or, 2")
}
}
|
package day12
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestRotation(t *testing.T) {
f := NewFerry()
require.Equal(t, "E", f.Direction)
require.NoError(t, f.MoveNormal("R90"))
require.Equal(t, "S", f.Direction)
require.NoError(t, f.MoveNormal("L90"))
require.Equal(t, "E", f.Direction)
require.NoError(t, f.MoveNormal("R180"))
require.Equal(t, "W", f.Direction)
require.NoError(t, f.MoveNormal("L180"))
require.Equal(t, "E", f.Direction)
require.NoError(t, f.MoveNormal("R270"))
require.Equal(t, "N", f.Direction)
require.NoError(t, f.MoveNormal("L270"))
require.Equal(t, "E", f.Direction)
}
func TestMovement(t *testing.T) {
commands := []string{
"F10",
"N3",
"F7",
"R90",
"F11",
}
f := NewFerry()
for _, c := range commands {
require.NoError(t, f.MoveNormal(c))
}
require.Equal(t, 25, f.Position.DistanceFromOrigin())
}
func TestWaypointMovement(t *testing.T) {
commands := []string{
"F10",
"N3",
"F7",
"R90",
"F11",
}
f := NewFerry()
for _, c := range commands {
require.NoError(t, f.MoveWaypointed(c))
}
require.Equal(t, 286, f.Position.DistanceFromOrigin())
}
func TestRotatePoint(t *testing.T) {
ship := Point{X: 10, Y: 4}
rotated := ship.Rotate(90)
require.Equal(t, 4, rotated.X)
require.Equal(t, -10, rotated.Y)
require.Equal(t, 10, ship.X)
require.Equal(t, 4, ship.Y)
}
|
package main
import "fmt"
// Functional Programming
func Adder() func(int) int {
// internal state
sum := 0
// return function which has access to internal state
return func(x int) int {
sum += x
return sum
}
}
func main() {
// init the function
sum := Adder()
for i := 0; i < 10; i++ {
// modify state with returned function
fmt.Println(sum(i))
}
}
|
package gobyexample
import (
"fmt"
)
func Make() {
fmt.Println("go make keyword")
x := make([]int, 0, 10)
fmt.Println("x:", x)
fmt.Printf("x: %v\n", x)
fmt.Printf("type: %T", x)
}
|
package main
import (
"fmt"
"time"
"os"
"errors"
endPoint "go_chat/src/core"
"flag"
)
func getGreeting(hour int) (string, error) {
var message string
if hour < 7 {
err := errors.New("Too early, we're closed!")
return message, err
} else if hour < 12 {
message = "Good Morning"
} else if hour < 18 {
message = "Good Afternoon"
} else {
message = "Good evening"
}
return message, nil
}
func main() {
args := os.Args
hourOfDay := time.Now().Hour()
greeting, err := getGreeting(hourOfDay)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if len(args) > 1 {
fmt.Println(args[1])
} else {
fmt.Println("Hello, I am Gopher")
}
fmt.Println(greeting)
isHost := flag.Bool("listen", false, "Listen on the given ip address")
flag.Parse()
if *isHost {
ip := os.Args[2]
host := endPoint.Host{Ip: ip}
host.Run()
} else {
ip := os.Args[1]
guest := endPoint.Guest{Ip: ip}
guest.Run()
}
}
|
package core
type Evaluation struct {
Id string `json:"id"`
JobKind string `json:"job_kind"`
JobId string `json:"job_id"`
Priority int `json:"priority"`
Status string `json:"status"`
}
type NodeAllocationState struct {
NodeId string `json:"node_id"`
AllocationId string `json:"allocation_id"`
JobKind string `json:"job_kind"`
JobId string `json:"job_id"`
DesiredStatus string `json:"desired_status"`
ActualStatus string `json:"actual_status"`
Trace []*Trace `json:"trace"`
}
type Trace struct {
Type string `json:"type"`
Timestamp int64 `json:"timestamp"`
Message string `json:"message"`
}
|
package bot
import (
"log"
"fmt"
)
type contextLogger struct {
Account *BotAccount
Context *Context
}
func newContextLogger(account *BotAccount, context *Context) *contextLogger {
log.Println("Bot::newContextLogger")
return &contextLogger{Account: account, Context: context }
}
func (l *contextLogger) debug(msg string, v ...interface{}) {
l.log(msg, "DEBUG", v...)
}
func (l *contextLogger) info(msg string, v ...interface{}) {
l.log(msg, "INFO", v...)
}
func (l *contextLogger) err(msg string, v ...interface{}) {
l.log(msg, "ERROR", v...)
}
func (l *contextLogger) log(msg, level string, v ...interface{}) {
formatted := fmt.Sprintf(msg, v...)
log.Printf("%v: %v | acc: %+v\n", level, formatted,
l.Account)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.