text
stringlengths 11
4.05M
|
|---|
package spec
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/kaitai-io/kaitai_struct_go_runtime/kaitai"
. "test_formats"
)
func TestStrEncodings(t *testing.T) {
f, err := os.Open("../../src/str_encodings.bin")
if err != nil {
t.Fatal(err)
}
s := kaitai.NewStream(f)
var h StrEncodings
err = h.Read(s, &h, &h)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, "Some ASCII", h.Str1)
assert.Equal(t, "こんにちは", h.Str2)
assert.Equal(t, "こんにちは", h.Str3)
assert.Equal(t, "░▒▓", h.Str4)
}
|
package conf
import (
"fmt"
"log"
"math/rand"
"mi_com_tool_dataset/util"
"strconv"
"strings"
"github.com/Maxgis/tree"
"github.com/go-ini/ini"
)
var (
Tags = make(map[string]*Group)
DEFAULE_TIMEOUT uint64 = 10000
DEFAULT_PROTOCOL = "http"
DEFAULT_MAXRETRY uint = 1
)
type Group struct {
Timeout uint64
Protocol string
Balance string
MaxRetry uint
Host string
Machines *[]*Machine
}
type Machine struct {
Host string
Port int
Enable bool
FailTime int
}
func init() {
file := util.GetCurrentDirectory() + "/conf/group.ini"
var err error
cfg, err = ini.LooseLoad("filename", file)
if err != nil {
log.Fatal(err)
}
sections := cfg.Sections()
for _, section := range sections {
Tags[section.Name()] = initGroup(section)
//log.Printf("%#v\n", Tags[section.Name()])
if Tags[section.Name()].Machines == nil {
continue
}
for _, machine := range *Tags[section.Name()].Machines {
log.Printf("%#v\n", machine)
}
}
tree.Print(Tags)
}
func initGroup(section *ini.Section) *Group {
timeout := getTimeout(section)
protocol := getProtocol(section)
maxRetry := getMaxRetry(section)
host := getHost(section)
machines := getMachines(section)
return &Group{
Timeout: timeout,
Protocol: protocol,
MaxRetry: maxRetry,
Host: host,
Machines: machines,
}
}
func getTimeout(section *ini.Section) uint64 {
var err error
var timeoutKey *ini.Key
timeoutKey, err = section.GetKey("timeout")
if err != nil {
return DEFAULE_TIMEOUT
}
return timeoutKey.MustUint64(DEFAULE_TIMEOUT)
}
func getProtocol(section *ini.Section) string {
var err error
var protocolKey *ini.Key
protocolKey, err = section.GetKey("protocol")
if err != nil {
return DEFAULT_PROTOCOL
}
protocol := protocolKey.String()
if protocol == "" {
return DEFAULT_PROTOCOL
}
return protocol
}
func getMaxRetry(section *ini.Section) uint {
var err error
var maxRetryKey *ini.Key
maxRetryKey, err = section.GetKey("max_retry")
if err != nil {
return DEFAULT_MAXRETRY
}
maxRetry := maxRetryKey.MustUint(DEFAULT_MAXRETRY)
return maxRetry
}
func getHost(section *ini.Section) string {
var err error
var hostKey *ini.Key
hostKey, err = section.GetKey("host")
if err != nil {
return ""
}
return hostKey.MustString("")
}
func getMachines(section *ini.Section) *[]*Machine {
var err error
var machinesKey *ini.Key
machinesKey, err = section.GetKey("machines")
if err != nil {
return nil
}
machinesStr := machinesKey.MustString("")
machinesArr := strings.Split(machinesStr, ",")
length := len(machinesArr)
if length == 0 {
return nil
}
machines := make([]*Machine, length)
for i, machineStr := range machinesArr {
machineInfo := strings.Split(machineStr, ":")
var port int
if len(machineInfo) > 1 {
port, err = strconv.Atoi(machineInfo[1])
}
machines[i] = &Machine{Host: machineInfo[0], Port: port, Enable: true}
}
return &machines
}
func BuildURI(tagName string, path string) string {
group := Tags[tagName]
machine := getMachine(group.Balance, group.Machines)
return fmt.Sprintf("%s://%s:%d%s", group.Protocol, machine.Host, machine.Port, path)
}
func getMachine(balance string, machines *[]*Machine) *Machine {
switch balance {
case "random":
return getRandomMachine(machines)
default:
return getRandomMachine(machines)
}
}
func getRandomMachine(machines *[]*Machine) *Machine {
length := len((*machines))
index := rand.Intn(length)
return (*machines)[index]
}
|
package logserver
import (
"bytes"
"net/http"
"openRPA-basic-module/models"
"io/ioutil"
json2 "encoding/json"
"strconv"
"fmt"
"github.com/davecgh/go-spew/spew"
"github.com/gin-gonic/gin/json"
"github.com/pkg/errors"
)
const (
started = "started"
processing = "processing"
succeeded = "succeeded"
failed = "failed"
)
func Start(JSON *models.JSON, endPoint string) (log *models.Log, err error) {
log, err = createLog(JSON, endPoint, started)
if err != nil {
fmt.Println(err)
return nil, err
}
return log, nil
}
func Update(JSON *models.JSON, oldLog *models.Log) (log *models.Log, err error) {
log, err = updateLog(JSON, oldLog, processing)
if err != nil {
fmt.Println(err)
return nil, err
}
return log, nil
}
func Success(JSON *models.JSON, oldLog *models.Log) (log *models.Log, err error) {
log, err = updateLog(JSON, oldLog, succeeded)
if err != nil {
fmt.Println(err)
return nil, err
}
return log, nil
}
func Fail(JSON *models.JSON, oldLog *models.Log) (log *models.Log, err error) {
log, err = updateLog(JSON, oldLog, failed)
if err != nil {
fmt.Println(err)
return nil, err
}
return log, nil
}
func createLog(JSON *models.JSON, endPoint string, status string) (log *models.Log, err error) {
url := "http://localhost:9001/tasks/"
j := *JSON
j.Input.Files = nil
for _, v := range j.Exports {
v.Files = nil
}
log = &models.Log{
EndPoint: endPoint,
Input: j.Input,
Exports: j.Exports,
Roots: j.Roots,
Status: status,
}
resp, err := HttpPost(url, log)
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, err
}
var logResp *models.LogResponse
err = json2.Unmarshal(body, &logResp)
if err != nil {
return nil, err
}
defer resp.Body.Close()
log = &logResp.Data.Task
return log, nil
}
func updateLog(JSON *models.JSON, oldLog *models.Log, status string) (log *models.Log, err error) {
url := "http://localhost:9001/tasks/"
j := *JSON
j.Input.Files = nil
for _, v := range j.Exports {
v.Files = nil
}
j.Roots = []models.Root{}
log = &models.Log{
Input: j.Input,
Exports: j.Exports,
Roots: j.Roots,
Status: status,
Model: oldLog.Model,
EndPoint: oldLog.EndPoint,
}
resp, err := HttpPut(url, oldLog.ID, log)
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
spew.Dump(oldLog.ID)
if resp.StatusCode != http.StatusOK {
return nil, errors.New("ログサーバとの接続に失敗しました。StatusCode:" + strconv.Itoa(resp.StatusCode))
}
err = json2.Unmarshal(body, &log)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return log, nil
}
func HttpPost(url string, body interface{}) (*http.Response, error) {
jsonStr, err := json.Marshal(body)
if err != nil {
return nil, err
}
req, err := http.NewRequest(
"POST",
url,
bytes.NewBuffer(jsonStr),
)
if err != nil {
return nil, err
}
// Content-Type 設定
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
return resp, err
}
func HttpPut(url string, id uint64, body interface{}) (*http.Response, error) {
jsonStr, err := json.Marshal(body)
if err != nil {
return nil, err
}
req, err := http.NewRequest(
"PUT",
url+strconv.FormatUint(id, 10),
bytes.NewBuffer(jsonStr),
)
if err != nil {
return nil, err
}
// Content-Type 設定
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
return resp, err
}
|
package http
import (
"time"
"net/http"
"../../package/db"
)
type OutputData struct {
Time float64
Path string`json:"Path"`
RemoteAddr string
//ContentLength int64
Message string
Table db.Table
}
func GetOutputData(req *http.Request)OutputData {
outputData := OutputData{}
outputData.Time = float64(time.Now().UnixNano()) / (1000 * 1000 * 1000)
outputData.Path = req.URL.Path
outputData.RemoteAddr = req.RemoteAddr
//outputData.ContentLength = req.ContentLength
return outputData
}
|
package main
import "fmt"
func main() {
s := []int{6, 8, 30, 2, 30, 7, 8, 7, 7}
fmt.Println(maxProfit(s))
}
func maxProfit(prices []int) int {
n := len(prices)
res := 0
for i := 0; i < n; i++ {
for j := i + 1; j < n; j++ {
if prices[j] > prices[i] {
tmp := prices[j] - prices[i]
if tmp > res {
res = tmp
}
}
}
}
return res
}
func min(x, y int) int {
if x > y {
return y
}
return x
}
|
/*
* @file
* @copyright defined in aergo/LICENSE.txt
*/
package p2putil
import (
"bytes"
"fmt"
"github.com/aergoio/aergo/internal/network"
"github.com/aergoio/aergo/p2p/p2pcommon"
"github.com/aergoio/aergo/types"
"github.com/libp2p/go-libp2p-core"
"github.com/libp2p/go-libp2p-core/crypto"
"github.com/libp2p/go-libp2p-core/test"
"github.com/multiformats/go-multiaddr"
"io/ioutil"
"net"
"os"
"path/filepath"
"strconv"
"strings"
)
// NOTE use only in test
func RandomPeerID() types.PeerID {
id,_ := test.RandPeerID()
return id
}
// PeerMetaToMultiAddr make libp2p compatible Multiaddr object from peermeta
func PeerMetaToMultiAddr(m p2pcommon.PeerMeta) (multiaddr.Multiaddr, error) {
ipAddr, err := network.GetSingleIPAddress(m.IPAddress)
if err != nil {
return nil, err
}
return types.ToMultiAddr(ipAddr, m.Port)
}
func FromMultiAddr(targetAddr multiaddr.Multiaddr) (p2pcommon.PeerMeta, error) {
meta := p2pcommon.PeerMeta{}
split := strings.Split(targetAddr.String(), "/")
if len(split) != 7 {
return meta, fmt.Errorf("invalid NPAddPeer addr format %s", targetAddr.String())
}
addrType := split[1]
if addrType != "ip4" && addrType != "ip6" {
return meta, fmt.Errorf("invalid NPAddPeer addr type %s", addrType)
}
peerAddrString := split[2]
peerPortString := split[4]
peerPort, err := strconv.Atoi(peerPortString)
if err != nil {
return meta, fmt.Errorf("invalid Peer port %s", peerPortString)
}
peerIDString := split[6]
peerID, err := types.IDB58Decode(peerIDString)
if err != nil {
return meta, fmt.Errorf("invalid PeerID %s", peerIDString)
}
meta = p2pcommon.PeerMeta{
ID: peerID,
Port: uint32(peerPort),
IPAddress: peerAddrString,
}
return meta, nil
}
func FromMultiAddrString(str string) (p2pcommon.PeerMeta, error) {
ma, err := types.ParseMultiaddrWithResolve(str)
if err != nil {
return p2pcommon.PeerMeta{}, err
}
return FromMultiAddr(ma)
}
func FromMultiAddrStringWithPID(str string, id types.PeerID) (p2pcommon.PeerMeta, error) {
addr1, err := types.ParseMultiaddrWithResolve(str)
if err != nil {
return p2pcommon.PeerMeta{}, err
}
pidAddr, err := multiaddr.NewComponent(multiaddr.ProtocolWithCode(multiaddr.P_P2P).Name, id.Pretty())
if err != nil {
return p2pcommon.PeerMeta{}, err
}
ma := multiaddr.Join(addr1, pidAddr)
return FromMultiAddr(ma)
}
// ExtractIPAddress returns ip address from multiaddr. it return null if ma has no ip field.
func ExtractIPAddress(ma multiaddr.Multiaddr) net.IP {
ipStr, err := ma.ValueForProtocol(multiaddr.P_IP4)
if err == nil {
return net.ParseIP(ipStr)
}
ipStr, err = ma.ValueForProtocol(multiaddr.P_IP6)
if err == nil {
return net.ParseIP(ipStr)
}
return nil
}
func LoadKeyFile(keyFile string) (crypto.PrivKey, crypto.PubKey, error) {
dat, err := ioutil.ReadFile(keyFile)
if err == nil {
priv, err := crypto.UnmarshalPrivateKey(dat)
if err != nil {
return nil,nil, fmt.Errorf("invalid keyfile. It's not private key file")
}
return priv, priv.GetPublic(), nil
} else {
return nil, nil, fmt.Errorf("Invalid keyfile path '"+ keyFile +"'. Check the key file exists.")
}
}
func GenerateKeyFile(dir, prefix string) (crypto.PrivKey, crypto.PubKey, error) {
// invariant: key file must not exists.
if _, err2 := os.Stat(dir); os.IsNotExist(err2) {
mkdirErr := os.MkdirAll(dir, os.ModePerm)
if mkdirErr != nil {
return nil, nil, mkdirErr
}
}
// file not exist and create new file
priv, pub, err := crypto.GenerateKeyPair(crypto.Secp256k1, 256)
if err != nil {
return nil, nil, err
}
err = writeToKeyFiles(priv, pub, dir, prefix)
if err != nil {
return nil, nil, fmt.Errorf("Failed to generate files %s.{key,id}: %v", prefix, err.Error())
}
return priv, priv.GetPublic(), nil
}
func writeToKeyFiles(priv crypto.PrivKey, pub crypto.PubKey, dir, prefix string) error {
pkFile := filepath.Join(dir, prefix+p2pcommon.DefaultPkKeyExt)
// pubFile := filepath.Join(dir, prefix+".pub")
idFile := filepath.Join(dir, prefix+p2pcommon.DefaultPeerIDExt)
// Write private key file
pkf, err := os.Create(pkFile)
if err != nil {
return err
}
pkBytes, err := priv.Bytes()
if err != nil {
return err
}
pkf.Write(pkBytes)
pkf.Sync()
// Write id file
idf, err := os.Create(idFile)
if err != nil {
return err
}
pid, _ := types.IDFromPublicKey(pub)
idBytes := []byte(types.IDB58Encode(pid))
idf.Write(idBytes)
idf.Sync()
return nil
}
func ProtocolIDsToString(sli []core.ProtocolID) string {
sb := bytes.NewBuffer(nil)
sb.WriteByte('[')
if len(sli) > 0 {
stop := len(sli)-1
for i:=0 ; i<stop; i++ {
sb.WriteString(string(sli[i]))
sb.WriteByte(',')
}
sb.WriteString(string(sli[stop]))
}
sb.WriteByte(']')
return sb.String()
}
|
package main
import (
"fmt"
hyperclient "github.com/Cloud-Foundations/Dominator/hypervisor/client"
"github.com/Cloud-Foundations/Dominator/lib/log"
"github.com/Cloud-Foundations/Dominator/lib/srpc"
)
func registerExternalLeasesSubcommand(args []string,
logger log.DebugLogger) error {
err := registerExternalLeases(logger)
if err != nil {
return fmt.Errorf("error registering external leases: %s", err)
}
return nil
}
func registerExternalLeases(logger log.DebugLogger) error {
clientName := fmt.Sprintf("%s:%d", *hypervisorHostname, *hypervisorPortNum)
client, err := srpc.DialHTTP("tcp", clientName, 0)
if err != nil {
return err
}
defer client.Close()
return hyperclient.RegisterExternalLeases(client, externalLeaseAddresses,
externalLeaseHostnames)
}
|
package main
import (
"log"
"net/http"
"fmt"
"time"
"encoding/json"
)
//Customer is the data structure that defines a customer
type Customer struct {
CustID int `json:"cust_id"`
FirstName string `json:"first_name"`
LastName string `json:"last_name"`
CreatedAt time.Time `json:"created_at"`
LastLogin time.Time `json:"last_login"`
FaveGame string `json:"fave_game"`
}
//Customers is an array of all instances of Customer
type Customers []Customer
func allCustomers(w http.ResponseWriter, r *http.Request) {
customers := Customers{
Customer{CustID:00000, FirstName:"Test", LastName:"Tested", CreatedAt: time.Now() , LastLogin: time.Now(), FaveGame:"Test Game"},
}
fmt.Println("Reached all customer api endpoint")
json.NewEncoder(w).Encode(customers)
}
func home (w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
switch r.Method {
case "GET":
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{"message": "get called"}`))
case "POST":
w.WriteHeader(http.StatusCreated)
w.Write([]byte(`{"message": "post called"}`))
case "PUT":
w.WriteHeader(http.StatusAccepted)
w.Write([]byte(`{"message": "put called"}`))
case "DELETE":
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{"message": "delete called"}`))
default:
w.WriteHeader(http.StatusNotFound)
w.Write([]byte(`{"message": "not found"}`))
}
}
func handleRequests() {
http.HandleFunc("/", home)
http.HandleFunc("/customers", allCustomers)
log.Fatal(http.ListenAndServe(":8080", nil))
}
func main() {
handleRequests()
}
|
package techpalace
import (
"strings"
)
// WelcomeMessage returns a welcome message for the customer.
func WelcomeMessage(customer string) string {
return "Welcome to the Tech Palace, " + strings.ToUpper(customer)
}
// AddBorder adds a border to a welcome message.
func AddBorder(welcomeMsg string, numStarsPerLine int) string {
starts := strings.Repeat("*", numStarsPerLine)
return starts + "\n" + welcomeMsg + "\n" + starts
}
// CleanupMessage cleans up an old marketing message.
func CleanupMessage(oldMsg string) string {
oneLineString := strings.ReplaceAll(oldMsg, "\n", "")
withOutStars := strings.ReplaceAll(oneLineString, "*","")
return strings.Trim(withOutStars, " ")
}
|
/*
* @lc app=leetcode.cn id=461 lang=golang
*
* [461] 汉明距离
*/
package main
import "fmt"
// @lc code=start
func hammingDistance(x int, y int) int {
res := x ^ y
ans := 0
for ; res != 0; res &= res - 1 {
ans++
}
return ans
}
// @lc code=end
func main() {
fmt.Println(hammingDistance(3, 1))
}
|
/*
Given a binary message, and the number of parity bits, generate the associated parity bits.
A parity bit is a simple form of error detection. It's generated by counting the number of 1's in the message, if it's even attach a 0 to the end, if it's odd attach 1.
That way, if there's a 1-bit error, 3-bit error, 5-bit error, ... in the message, because of the parity-bit you know the message has been altered.
Although if there were an even number of bits altered, the parity stays the same, so you wouldn't know if the message has been changed.
Only 50% of the time you'd know if bits have been altered with one parity bit.
Generate Parity bits
To generate n parity bits for a given binary message:
Count the number of 1's in the message
Modulo by 2^n
Attach the remainder to the message
For example, using three parity bits (n=3) and the message 10110111110110111:
10110111110110111 -> 13 1's
13 mod 2^3 -> 5
10110111110110111 with 5 attached (in binary) -> 10110111110110111101
The last three digits act as parity bits.
The advantage with parity bits is, that they can't detect errors only when a multiple of 2^n bits have been altered. With three parity bits, you can't detect errors when 8, 16, 24, 32, 40, ... bits have been altered.
(1 - 1/2^n)% of the time you'd know when bits have been altered, significantly more than with just one parity bit.
Rules
Take as input a binary string or a binary array and an integer (0<n, the number of parity bits)
Output a binary string/binary array with n parity bits attached
The parity bits should be padded with n zeros
Leading zeros are allowed
This is code-golf, so the shortest answer wins
Test Cases
[In]: 10110, 1
[Out]: 101101
[In]: 0110101, 2
[Out]: 011010100
[In]: 1011101110, 3
[Out]: 1011101110111
[In]: 0011001100111101111010011111, 4
[Out]: 00110011001111011110100111110010
*/
package main
import (
"fmt"
"strings"
)
func main() {
assert(parity("10110", 1) == "101101")
assert(parity("0110101", 2) == "011010100")
assert(parity("1011101110", 3) == "1011101110111")
assert(parity("0011001100111101111010011111", 4) == "00110011001111011110100111110010")
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func parity(s string, n int) string {
o := strings.Count(s, "1")
p := s + fmt.Sprintf("%0*b", n, o%(1<<n))
return p
}
|
package agent_mgr
import (
"github.com/kataras/iris/core/errors"
"gosconf"
"goslib/gen_server"
"goslib/logger"
"sync"
"time"
)
type connectApp struct {
Uuid string
Host string
Port string
Ccu int32
CcuMax int32
ActiveAt int64
}
type DispatchCache struct {
app *connectApp
activeAt int64
}
const SERVER = "ConnectAppDispatcher"
var agentInfos = &sync.Map{}
type AgentInfo struct {
uuid string
host string
port string
ccu int32
activeAt int64
}
func startAgentDispatcher() {
gen_server.Start(SERVER, new(Dispatcher))
}
func handleReportAgentInfo(uuid string, host string, port string, ccu int32) {
if agentInfo, ok := agentInfos.Load(uuid); ok {
agentInfo.(*AgentInfo).ccu = ccu
agentInfo.(*AgentInfo).activeAt = time.Now().Unix()
} else {
agentInfo = &AgentInfo{
uuid: uuid,
host: host,
port: port,
ccu: ccu,
activeAt: time.Now().Unix(),
}
agentInfos.Store(uuid, agentInfo)
}
}
func dispatchAgent(accountId string, groupId string) (appId string, host string, port string, err error) {
result, err := gen_server.Call(SERVER, "Dispatch", accountId, groupId)
if err != nil {
logger.ERR("connectApp Dispatch failed: %v", err)
return "", "", "", err
}
app := result.(*connectApp)
if app == nil {
return "", "", "", nil
}
appId = app.Uuid
host = app.Host
port = app.Port
return
}
/*
GenServer Callbacks
*/
type Dispatcher struct {
apps map[string]*connectApp
dispatchCache map[string]*DispatchCache
appMapGroups map[string][]string
groupMapApps map[string][]string
printTimer *time.Timer
agentCheckTimer *time.Timer
}
func (self *Dispatcher) startPrintTimer() {
self.printTimer = time.AfterFunc(5*time.Second, func() {
gen_server.Cast(SERVER, "printStatus")
})
self.agentCheckTimer = time.AfterFunc(5*time.Second, func() {
gen_server.Cast(SERVER, "agentCheck")
})
}
func (self *Dispatcher) startAgentCheckTimer() {
self.agentCheckTimer = time.AfterFunc(5*time.Second, func() {
gen_server.Cast(SERVER, "agentCheck")
})
}
func (self *Dispatcher) Init(args []interface{}) (err error) {
self.apps = make(map[string]*connectApp)
self.dispatchCache = make(map[string]*DispatchCache)
self.appMapGroups = make(map[string][]string)
self.groupMapApps = make(map[string][]string)
self.startPrintTimer()
self.startAgentCheckTimer()
return nil
}
func (self *Dispatcher) HandleCast(args []interface{}) {
handle := args[0].(string)
if handle == "printStatus" {
for _, app := range self.apps {
activeAt := time.Unix(app.ActiveAt, 0)
logger.INFO("Agent uuid: ", app.Uuid, " address: ", app.Host, ":", app.Port, " ccu: ", app.Ccu, " activeAt: ", activeAt)
}
//logger.WARN("=============App Groups===================")
//for appId, groupIds := range self.appMapGroups {
// logger.INFO(appId, " groupIds: ", strings.Join(groupIds, ","))
//}
//logger.WARN("=============Group Apps===================")
//for groupId, appIds := range self.groupMapApps {
// logger.INFO(groupId, " appIds: ", strings.Join(appIds, ","))
//}
self.startPrintTimer()
} else if handle == "agentCheck" {
now := time.Now().Unix()
var needDelIds = make([]string, 0)
agentInfos.Range(func(key, value interface{}) bool {
agentInfo := value.(*AgentInfo)
if isAgentAlive(now, agentInfo.activeAt) {
if agent, ok := self.apps[agentInfo.uuid]; ok {
agent.Ccu = agentInfo.ccu
agent.ActiveAt = now
} else {
logger.WARN("addAgent: ", agentInfo.uuid)
self.addAgent(agentInfo)
}
} else {
logger.WARN("delAgent: ", agentInfo.uuid)
needDelIds = append(needDelIds, agentInfo.uuid)
self.delAgent(agentInfo.uuid)
}
return true
})
for _, needDelId := range needDelIds {
agentInfos.Delete(needDelId)
}
self.startAgentCheckTimer()
}
}
func isAgentAlive(now int64, activeAt int64) bool {
return activeAt+gosconf.SERVICE_DEAD_DURATION > now
}
func (self *Dispatcher) addAgent(info *AgentInfo) {
agent := &connectApp{
Uuid: info.uuid,
Host: info.host,
Port: info.port,
Ccu: 0,
CcuMax: gosconf.AGENT_CCU_MAX,
ActiveAt: time.Now().Unix(),
}
self.apps[agent.Uuid] = agent
}
func (self *Dispatcher) delAgent(uuid string) {
delete(self.apps, uuid)
}
func (self *Dispatcher) HandleCall(args []interface{}) (interface{}, error) {
handle := args[0].(string)
if handle == "Dispatch" {
accountId := args[1].(string)
groupId := args[2].(string)
return self.doDispatch(accountId, groupId)
}
return nil, nil
}
func (self *Dispatcher) Terminate(reason string) (err error) {
return nil
}
/*
* 关于连接服务的路由思考
* 如果groupId不为空,优先将相同服玩家分配到相同代理,如果没有空间则寻找最空闲代理,建立分部
* 如果groupId为空,直接分配到空闲服务器,没有空闲服务器就分配到负载最低的服务器
*/
func (self *Dispatcher) doDispatch(accountId string, groupId string) (*connectApp, error) {
if cache, ok := self.dispatchCache[accountId]; ok {
if cache == nil {
delete(self.dispatchCache, accountId)
} else {
if self.matchDispatch(accountId, groupId, cache.app) {
cache.activeAt = time.Now().Unix()
return cache.app, nil
}
}
}
var dispatchApp *connectApp
if groupId == "" {
dispatchApp = self.dispatchByAccountId(accountId)
} else {
dispatchApp = self.dispatchByGroupId(accountId, groupId)
self.appendAppIdToGroup(dispatchApp.Uuid, groupId)
self.appendGroupIdToApp(dispatchApp.Uuid, groupId)
}
if dispatchApp == nil {
return nil, errors.New("No working agent found!")
}
dispatchApp.Ccu++
self.dispatchCache[accountId] = &DispatchCache{
dispatchApp,
time.Now().Unix(),
}
return dispatchApp, nil
}
func (self *Dispatcher) appendAppIdToGroup(appId string, groupId string) {
list, ok := self.groupMapApps[groupId]
if !ok {
list = []string{appId}
} else {
for _, aid := range list {
if aid == appId {
return
}
}
list = append(list, appId)
}
self.groupMapApps[groupId] = list
}
func (self *Dispatcher) appendGroupIdToApp(appId string, groupId string) {
list, ok := self.appMapGroups[appId]
if !ok {
list = []string{groupId}
} else {
for _, gid := range list {
if gid == groupId {
return
}
}
list = append(list, groupId)
}
self.appMapGroups[appId] = list
}
func (self *Dispatcher) dispatchByAccountId(accountId string) *connectApp {
var minPressureApp *connectApp
for _, app := range self.apps {
if app.Ccu < app.CcuMax {
return app
}
minPressureApp = chooseLessPresure(minPressureApp, app, 0)
}
return minPressureApp
}
func (self *Dispatcher) dispatchByGroupId(accountId string, groupId string) *connectApp {
appIds, ok := self.groupMapApps[groupId]
var minPresureApp *connectApp
var minGroupedPresureApp *connectApp
// Dispatch to old group
if ok {
for _, appId := range appIds {
app := self.apps[appId]
if app.Ccu < app.CcuMax {
return app
}
minGroupedPresureApp = chooseLessPresure(minGroupedPresureApp, app, 0)
}
}
// Dispatch to min presure app
for _, app := range self.apps {
minPresureApp = chooseLessPresure(minPresureApp, app, 0)
}
return chooseLessPresure(minPresureApp, minGroupedPresureApp, 0.3)
}
/*
* Check targetApp is a valid agent for account
* 1.has enough space
* 2.is same group
*/
func (self *Dispatcher) matchDispatch(accountId string, groupId string, targetApp *connectApp) bool {
if groupId == "" {
return self.matchDispatchByAccountId(accountId, targetApp)
} else {
return self.matchDispatchByGroupId(groupId, targetApp)
}
}
func (self *Dispatcher) matchDispatchByAccountId(accountId string, targetApp *connectApp) bool {
return targetApp.Ccu < targetApp.CcuMax
}
func (self *Dispatcher) matchDispatchByGroupId(groupId string, targetApp *connectApp) bool {
appIds, ok := self.groupMapApps[groupId]
if !ok {
return false
}
for _, appId := range appIds {
app := self.apps[appId]
if app.Uuid == targetApp.Uuid {
return true
}
}
return false
}
func chooseLessPresure(appA *connectApp, appB *connectApp, weightB float32) *connectApp {
if appA == nil {
return appB
}
if appB == nil {
return appA
}
presureA := float32(appA.Ccu) / float32(appA.CcuMax)
presureB := float32(appB.Ccu) / float32(appB.CcuMax)
if presureA < presureB/(1+weightB) {
return appA
} else {
return appB
}
}
|
package db
import (
"database/sql"
_ "github.com/go-sql-driver/mysql"
)
//MySQLConnect function for connect to mysql
func MySQLConnect() (dbs *sql.DB) {
dbs, err := sql.Open("mysql", "root@tcp(127.0.0.1:3306)/serviciotest")
if err != nil {
panic(err.Error())
}
return dbs
}
|
package kdtree
import "math"
type Point interface {
Dimensions() int
Dimension(i int) float64
}
func equals(p1, p2 Point) bool {
if p1.Dimensions() != p2.Dimensions() {
return false
}
for i := 0; i < p1.Dimensions(); i++ {
if p1.Dimension(i) != p2.Dimension(i) {
return false
}
}
return true
}
func distance2(p1, p2 Point) float64 {
if p1.Dimensions() != p2.Dimensions() {
return math.MaxFloat64
}
d := 0.0
for i := 0; i < p1.Dimensions(); i++ {
d += (p1.Dimension(i)-p2.Dimension(i))*(p1.Dimension(i)-p2.Dimension(i))
}
return d
}
func distance(p1, p2 Point) float64 {
return math.Sqrt(distance2(p1,p2))
}
|
// Copyright (c) 2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package walletdb
import (
"errors"
)
// Errors that can occur during driver registration.
var (
// ErrDbTypeRegistered is returned when two different database drivers
// attempt to register with the name database type.
ErrDbTypeRegistered = errors.New("database type already registered")
)
// Errors that the various database functions may return.
var (
// ErrDbUnknownType is returned when there is no driver registered for
// the specified database type.
ErrDbUnknownType = errors.New("unknown database type")
// ErrDbDoesNotExist is returned when open is called for a database that
// does not exist.
ErrDbDoesNotExist = errors.New("database does not exist")
// ErrDbExists is returned when create is called for a database that
// already exists.
ErrDbExists = errors.New("database already exists")
// ErrDbNotOpen is returned when a database instance is accessed before
// it is opened or after it is closed.
ErrDbNotOpen = errors.New("database not open")
// ErrDbAlreadyOpen is returned when open is called on a database that
// is already open.
ErrDbAlreadyOpen = errors.New("database already open")
// ErrInvalid is returned if the specified database is not valid.
ErrInvalid = errors.New("invalid database")
// ErrDryRunRollBack is returned if a database transaction should be
// rolled back because its changes were a dry-run only.
ErrDryRunRollBack = errors.New("dry run only; should roll back")
)
// Errors that can occur when beginning or committing a transaction.
var (
// ErrTxClosed is returned when attempting to commit or rollback a
// transaction that has already had one of those operations performed.
ErrTxClosed = errors.New("tx closed")
// ErrTxNotWritable is returned when an operation that requires write
// access to the database is attempted against a read-only transaction.
ErrTxNotWritable = errors.New("tx not writable")
)
// Errors that can occur when putting or deleting a value or bucket.
var (
// ErrBucketNotFound is returned when trying to access a bucket that has
// not been created yet.
ErrBucketNotFound = errors.New("bucket not found")
// ErrBucketExists is returned when creating a bucket that already exists.
ErrBucketExists = errors.New("bucket already exists")
// ErrBucketNameRequired is returned when creating a bucket with a blank name.
ErrBucketNameRequired = errors.New("bucket name required")
// ErrKeyRequired is returned when inserting a zero-length key.
ErrKeyRequired = errors.New("key required")
// ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize.
ErrKeyTooLarge = errors.New("key too large")
// ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
ErrValueTooLarge = errors.New("value too large")
// ErrIncompatibleValue is returned when trying create or delete a
// bucket on an existing non-bucket key or when trying to create or
// delete a non-bucket key on an existing bucket key.
ErrIncompatibleValue = errors.New("incompatible value")
)
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package importer
import (
"bytes"
"context"
"database/sql"
"fmt"
"io"
"strings"
mysql_sql_driver "github.com/go-sql-driver/mysql"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/br/pkg/lightning/backend"
"github.com/pingcap/tidb/br/pkg/lightning/backend/encode"
"github.com/pingcap/tidb/br/pkg/lightning/backend/kv"
"github.com/pingcap/tidb/br/pkg/lightning/backend/local"
"github.com/pingcap/tidb/br/pkg/lightning/backend/tidb"
"github.com/pingcap/tidb/br/pkg/lightning/checkpoints"
"github.com/pingcap/tidb/br/pkg/lightning/common"
"github.com/pingcap/tidb/br/pkg/lightning/config"
"github.com/pingcap/tidb/br/pkg/lightning/errormanager"
ropts "github.com/pingcap/tidb/br/pkg/lightning/importer/opts"
"github.com/pingcap/tidb/br/pkg/lightning/log"
"github.com/pingcap/tidb/br/pkg/lightning/mydump"
"github.com/pingcap/tidb/br/pkg/lightning/verification"
"github.com/pingcap/tidb/br/pkg/lightning/worker"
"github.com/pingcap/tidb/br/pkg/storage"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/parser"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/model"
_ "github.com/pingcap/tidb/planner/core" // to setup expression.EvalAstExpr. Otherwise we cannot parse the default value
"github.com/pingcap/tidb/store/pdtypes"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/dbterror"
"github.com/pingcap/tidb/util/mock"
pd "github.com/tikv/pd/client"
"go.uber.org/zap"
"golang.org/x/exp/maps"
)
// compressionRatio is the tikv/tiflash's compression ratio
const compressionRatio = float64(1) / 3
// EstimateSourceDataSizeResult is the object for estimated data size result.
type EstimateSourceDataSizeResult struct {
// SizeWithIndex is the tikv size with the index.
SizeWithIndex int64
// SizeWithoutIndex is the tikv size without the index.
SizeWithoutIndex int64
// HasUnsortedBigTables indicates whether the source data has unsorted big tables or not.
HasUnsortedBigTables bool
// TiFlashSize is the size of tiflash.
TiFlashSize int64
}
// PreImportInfoGetter defines the operations to get information from sources and target.
// These information are used in the preparation of the import ( like precheck ).
type PreImportInfoGetter interface {
TargetInfoGetter
// GetAllTableStructures gets all the table structures with the information from both the source and the target.
GetAllTableStructures(ctx context.Context, opts ...ropts.GetPreInfoOption) (map[string]*checkpoints.TidbDBInfo, error)
// ReadFirstNRowsByTableName reads the first N rows of data of an importing source table.
ReadFirstNRowsByTableName(ctx context.Context, schemaName string, tableName string, n int) (cols []string, rows [][]types.Datum, err error)
// ReadFirstNRowsByFileMeta reads the first N rows of an data file.
ReadFirstNRowsByFileMeta(ctx context.Context, dataFileMeta mydump.SourceFileMeta, n int) (cols []string, rows [][]types.Datum, err error)
// EstimateSourceDataSize estimates the datasize to generate during the import as well as some other sub-informaiton.
// It will return:
// * the estimated data size to generate during the import,
// which might include some extra index data to generate besides the source file data
// * the total data size of all the source files,
// * whether there are some unsorted big tables
EstimateSourceDataSize(ctx context.Context, opts ...ropts.GetPreInfoOption) (*EstimateSourceDataSizeResult, error)
}
// TargetInfoGetter defines the operations to get information from target.
type TargetInfoGetter interface {
// FetchRemoteTableModels fetches the table structures from the remote target.
FetchRemoteTableModels(ctx context.Context, schemaName string) ([]*model.TableInfo, error)
// CheckVersionRequirements performs the check whether the target satisfies the version requirements.
CheckVersionRequirements(ctx context.Context) error
// IsTableEmpty checks whether the specified table on the target DB contains data or not.
IsTableEmpty(ctx context.Context, schemaName string, tableName string) (*bool, error)
// GetTargetSysVariablesForImport gets some important systam variables for importing on the target.
GetTargetSysVariablesForImport(ctx context.Context, opts ...ropts.GetPreInfoOption) map[string]string
// GetReplicationConfig gets the replication config on the target.
GetReplicationConfig(ctx context.Context) (*pdtypes.ReplicationConfig, error)
// GetStorageInfo gets the storage information on the target.
GetStorageInfo(ctx context.Context) (*pdtypes.StoresInfo, error)
// GetEmptyRegionsInfo gets the region information of all the empty regions on the target.
GetEmptyRegionsInfo(ctx context.Context) (*pdtypes.RegionsInfo, error)
}
type preInfoGetterKey string
const (
preInfoGetterKeyDBMetas preInfoGetterKey = "PRE_INFO_GETTER/DB_METAS"
)
// WithPreInfoGetterDBMetas returns a new context with the specified dbMetas.
func WithPreInfoGetterDBMetas(ctx context.Context, dbMetas []*mydump.MDDatabaseMeta) context.Context {
return context.WithValue(ctx, preInfoGetterKeyDBMetas, dbMetas)
}
// TargetInfoGetterImpl implements the operations to get information from the target.
type TargetInfoGetterImpl struct {
cfg *config.Config
db *sql.DB
tls *common.TLS
backend backend.TargetInfoGetter
pdCli pd.Client
}
// NewTargetInfoGetterImpl creates a TargetInfoGetterImpl object.
func NewTargetInfoGetterImpl(
cfg *config.Config,
targetDB *sql.DB,
pdCli pd.Client,
) (*TargetInfoGetterImpl, error) {
tls, err := cfg.ToTLS()
if err != nil {
return nil, errors.Trace(err)
}
var backendTargetInfoGetter backend.TargetInfoGetter
switch cfg.TikvImporter.Backend {
case config.BackendTiDB:
backendTargetInfoGetter = tidb.NewTargetInfoGetter(targetDB)
case config.BackendLocal:
if pdCli == nil {
return nil, common.ErrUnknown.GenWithStack("pd client is required when using local backend")
}
backendTargetInfoGetter = local.NewTargetInfoGetter(tls, targetDB, pdCli)
default:
return nil, common.ErrUnknownBackend.GenWithStackByArgs(cfg.TikvImporter.Backend)
}
return &TargetInfoGetterImpl{
cfg: cfg,
tls: tls,
db: targetDB,
backend: backendTargetInfoGetter,
pdCli: pdCli,
}, nil
}
// FetchRemoteTableModels fetches the table structures from the remote target.
// It implements the TargetInfoGetter interface.
func (g *TargetInfoGetterImpl) FetchRemoteTableModels(ctx context.Context, schemaName string) ([]*model.TableInfo, error) {
return g.backend.FetchRemoteTableModels(ctx, schemaName)
}
// CheckVersionRequirements performs the check whether the target satisfies the version requirements.
// It implements the TargetInfoGetter interface.
// Mydump database metas are retrieved from the context.
func (g *TargetInfoGetterImpl) CheckVersionRequirements(ctx context.Context) error {
var dbMetas []*mydump.MDDatabaseMeta
dbmetasVal := ctx.Value(preInfoGetterKeyDBMetas)
if dbmetasVal != nil {
if m, ok := dbmetasVal.([]*mydump.MDDatabaseMeta); ok {
dbMetas = m
}
}
return g.backend.CheckRequirements(ctx, &backend.CheckCtx{
DBMetas: dbMetas,
})
}
// IsTableEmpty checks whether the specified table on the target DB contains data or not.
// It implements the TargetInfoGetter interface.
// It tries to select the row count from the target DB.
func (g *TargetInfoGetterImpl) IsTableEmpty(ctx context.Context, schemaName string, tableName string) (*bool, error) {
var result bool
failpoint.Inject("CheckTableEmptyFailed", func() {
failpoint.Return(nil, errors.New("mock error"))
})
exec := common.SQLWithRetry{
DB: g.db,
Logger: log.FromContext(ctx),
}
var dump int
err := exec.QueryRow(ctx, "check table empty",
// Here we use the `USE INDEX()` hint to skip fetch the record from index.
// In Lightning, if previous importing is halted half-way, it is possible that
// the data is partially imported, but the index data has not been imported.
// In this situation, if no hint is added, the SQL executor might fetch the record from index,
// which is empty. This will result in missing check.
fmt.Sprintf("SELECT 1 FROM %s USE INDEX() LIMIT 1", common.UniqueTable(schemaName, tableName)),
&dump,
)
isNoSuchTableErr := false
rootErr := errors.Cause(err)
if mysqlErr, ok := rootErr.(*mysql_sql_driver.MySQLError); ok && mysqlErr.Number == errno.ErrNoSuchTable {
isNoSuchTableErr = true
}
switch {
case isNoSuchTableErr:
result = true
case errors.ErrorEqual(err, sql.ErrNoRows):
result = true
case err != nil:
return nil, errors.Trace(err)
default:
result = false
}
return &result, nil
}
// GetTargetSysVariablesForImport gets some important system variables for importing on the target.
// It implements the TargetInfoGetter interface.
// It uses the SQL to fetch sys variables from the target.
func (g *TargetInfoGetterImpl) GetTargetSysVariablesForImport(ctx context.Context, _ ...ropts.GetPreInfoOption) map[string]string {
sysVars := ObtainImportantVariables(ctx, g.db, !isTiDBBackend(g.cfg))
// override by manually set vars
maps.Copy(sysVars, g.cfg.TiDB.Vars)
return sysVars
}
// GetReplicationConfig gets the replication config on the target.
// It implements the TargetInfoGetter interface.
// It uses the PD interface through TLS to get the information.
func (g *TargetInfoGetterImpl) GetReplicationConfig(ctx context.Context) (*pdtypes.ReplicationConfig, error) {
result := new(pdtypes.ReplicationConfig)
if err := g.tls.WithHost(g.pdCli.GetLeaderAddr()).GetJSON(ctx, pdReplicate, &result); err != nil {
return nil, errors.Trace(err)
}
return result, nil
}
// GetStorageInfo gets the storage information on the target.
// It implements the TargetInfoGetter interface.
// It uses the PD interface through TLS to get the information.
func (g *TargetInfoGetterImpl) GetStorageInfo(ctx context.Context) (*pdtypes.StoresInfo, error) {
result := new(pdtypes.StoresInfo)
if err := g.tls.WithHost(g.pdCli.GetLeaderAddr()).GetJSON(ctx, pdStores, result); err != nil {
return nil, errors.Trace(err)
}
return result, nil
}
// GetEmptyRegionsInfo gets the region information of all the empty regions on the target.
// It implements the TargetInfoGetter interface.
// It uses the PD interface through TLS to get the information.
func (g *TargetInfoGetterImpl) GetEmptyRegionsInfo(ctx context.Context) (*pdtypes.RegionsInfo, error) {
result := new(pdtypes.RegionsInfo)
if err := g.tls.WithHost(g.pdCli.GetLeaderAddr()).GetJSON(ctx, pdEmptyRegions, &result); err != nil {
return nil, errors.Trace(err)
}
return result, nil
}
// PreImportInfoGetterImpl implements the operations to get information used in importing preparation.
type PreImportInfoGetterImpl struct {
cfg *config.Config
getPreInfoCfg *ropts.GetPreInfoConfig
srcStorage storage.ExternalStorage
ioWorkers *worker.Pool
encBuilder encode.EncodingBuilder
targetInfoGetter TargetInfoGetter
dbMetas []*mydump.MDDatabaseMeta
mdDBMetaMap map[string]*mydump.MDDatabaseMeta
mdDBTableMetaMap map[string]map[string]*mydump.MDTableMeta
dbInfosCache map[string]*checkpoints.TidbDBInfo
sysVarsCache map[string]string
estimatedSizeCache *EstimateSourceDataSizeResult
}
// NewPreImportInfoGetter creates a PreImportInfoGetterImpl object.
func NewPreImportInfoGetter(
cfg *config.Config,
dbMetas []*mydump.MDDatabaseMeta,
srcStorage storage.ExternalStorage,
targetInfoGetter TargetInfoGetter,
ioWorkers *worker.Pool,
encBuilder encode.EncodingBuilder,
opts ...ropts.GetPreInfoOption,
) (*PreImportInfoGetterImpl, error) {
if ioWorkers == nil {
ioWorkers = worker.NewPool(context.Background(), cfg.App.IOConcurrency, "pre_info_getter_io")
}
if encBuilder == nil {
switch cfg.TikvImporter.Backend {
case config.BackendTiDB:
encBuilder = tidb.NewEncodingBuilder()
case config.BackendLocal:
encBuilder = local.NewEncodingBuilder(context.Background())
default:
return nil, common.ErrUnknownBackend.GenWithStackByArgs(cfg.TikvImporter.Backend)
}
}
getPreInfoCfg := ropts.NewDefaultGetPreInfoConfig()
for _, o := range opts {
o(getPreInfoCfg)
}
result := &PreImportInfoGetterImpl{
cfg: cfg,
getPreInfoCfg: getPreInfoCfg,
dbMetas: dbMetas,
srcStorage: srcStorage,
ioWorkers: ioWorkers,
encBuilder: encBuilder,
targetInfoGetter: targetInfoGetter,
}
result.Init()
return result, nil
}
// Init initializes some internal data and states for PreImportInfoGetterImpl.
func (p *PreImportInfoGetterImpl) Init() {
mdDBMetaMap := make(map[string]*mydump.MDDatabaseMeta)
mdDBTableMetaMap := make(map[string]map[string]*mydump.MDTableMeta)
for _, dbMeta := range p.dbMetas {
dbName := dbMeta.Name
mdDBMetaMap[dbName] = dbMeta
mdTableMetaMap, ok := mdDBTableMetaMap[dbName]
if !ok {
mdTableMetaMap = make(map[string]*mydump.MDTableMeta)
mdDBTableMetaMap[dbName] = mdTableMetaMap
}
for _, tblMeta := range dbMeta.Tables {
tblName := tblMeta.Name
mdTableMetaMap[tblName] = tblMeta
}
}
p.mdDBMetaMap = mdDBMetaMap
p.mdDBTableMetaMap = mdDBTableMetaMap
}
// GetAllTableStructures gets all the table structures with the information from both the source and the target.
// It implements the PreImportInfoGetter interface.
// It has a caching mechanism: the table structures will be obtained from the source only once.
func (p *PreImportInfoGetterImpl) GetAllTableStructures(ctx context.Context, opts ...ropts.GetPreInfoOption) (map[string]*checkpoints.TidbDBInfo, error) {
var (
dbInfos map[string]*checkpoints.TidbDBInfo
err error
)
getPreInfoCfg := p.getPreInfoCfg.Clone()
for _, o := range opts {
o(getPreInfoCfg)
}
dbInfos = p.dbInfosCache
if dbInfos != nil && !getPreInfoCfg.ForceReloadCache {
return dbInfos, nil
}
dbInfos, err = LoadSchemaInfo(ctx, p.dbMetas, func(ctx context.Context, dbName string) ([]*model.TableInfo, error) {
return p.getTableStructuresByFileMeta(ctx, p.mdDBMetaMap[dbName], getPreInfoCfg)
})
if err != nil {
return nil, errors.Trace(err)
}
p.dbInfosCache = dbInfos
return dbInfos, nil
}
func (p *PreImportInfoGetterImpl) getTableStructuresByFileMeta(ctx context.Context, dbSrcFileMeta *mydump.MDDatabaseMeta, getPreInfoCfg *ropts.GetPreInfoConfig) ([]*model.TableInfo, error) {
dbName := dbSrcFileMeta.Name
failpoint.Inject(
"getTableStructuresByFileMeta_BeforeFetchRemoteTableModels",
func(v failpoint.Value) {
fmt.Println("failpoint: getTableStructuresByFileMeta_BeforeFetchRemoteTableModels")
const defaultMilliSeconds int = 5000
sleepMilliSeconds, ok := v.(int)
if !ok || sleepMilliSeconds <= 0 || sleepMilliSeconds > 30000 {
sleepMilliSeconds = defaultMilliSeconds
}
//nolint: errcheck
failpoint.Enable("github.com/pingcap/tidb/br/pkg/lightning/backend/tidb/FetchRemoteTableModels_BeforeFetchTableAutoIDInfos", fmt.Sprintf("sleep(%d)", sleepMilliSeconds))
},
)
currentTableInfosFromDB, err := p.targetInfoGetter.FetchRemoteTableModels(ctx, dbName)
if err != nil {
if getPreInfoCfg != nil && getPreInfoCfg.IgnoreDBNotExist {
dbNotExistErr := dbterror.ClassSchema.NewStd(errno.ErrBadDB).FastGenByArgs(dbName)
// The returned error is an error showing get info request error,
// and attaches the detailed error response as a string.
// So we cannot get the error chain and use error comparison,
// and instead, we use the string comparison on error messages.
if strings.Contains(err.Error(), dbNotExistErr.Error()) {
log.L().Warn("DB not exists. But ignore it", zap.Error(err))
goto get_struct_from_src
}
}
return nil, errors.Trace(err)
}
get_struct_from_src:
currentTableInfosMap := make(map[string]*model.TableInfo)
for _, tblInfo := range currentTableInfosFromDB {
currentTableInfosMap[tblInfo.Name.L] = tblInfo
}
resultInfos := make([]*model.TableInfo, len(dbSrcFileMeta.Tables))
for i, tableFileMeta := range dbSrcFileMeta.Tables {
if curTblInfo, ok := currentTableInfosMap[strings.ToLower(tableFileMeta.Name)]; ok {
resultInfos[i] = curTblInfo
continue
}
createTblSQL, err := tableFileMeta.GetSchema(ctx, p.srcStorage)
if err != nil {
return nil, errors.Annotatef(err, "get create table statement from schema file error: %s", tableFileMeta.Name)
}
theTableInfo, err := newTableInfo(createTblSQL, 0)
log.L().Info("generate table info from SQL", zap.Error(err), zap.String("sql", createTblSQL), zap.String("table_name", tableFileMeta.Name), zap.String("db_name", dbSrcFileMeta.Name))
if err != nil {
errMsg := "generate table info from SQL error"
log.L().Error(errMsg, zap.Error(err), zap.String("sql", createTblSQL), zap.String("table_name", tableFileMeta.Name))
return nil, errors.Annotatef(err, "%s: %s", errMsg, tableFileMeta.Name)
}
resultInfos[i] = theTableInfo
}
return resultInfos, nil
}
func newTableInfo(createTblSQL string, tableID int64) (*model.TableInfo, error) {
parser := parser.New()
astNode, err := parser.ParseOneStmt(createTblSQL, "", "")
if err != nil {
errMsg := "parse sql statement error"
log.L().Error(errMsg, zap.Error(err), zap.String("sql", createTblSQL))
return nil, errors.Trace(err)
}
sctx := mock.NewContext()
createTableStmt, ok := astNode.(*ast.CreateTableStmt)
if !ok {
return nil, errors.New("cannot transfer the parsed SQL as an CREATE TABLE statement")
}
info, err := ddl.MockTableInfo(sctx, createTableStmt, tableID)
if err != nil {
return nil, errors.Trace(err)
}
info.State = model.StatePublic
return info, nil
}
// ReadFirstNRowsByTableName reads the first N rows of data of an importing source table.
// It implements the PreImportInfoGetter interface.
func (p *PreImportInfoGetterImpl) ReadFirstNRowsByTableName(ctx context.Context, schemaName string, tableName string, n int) ([]string, [][]types.Datum, error) {
mdTableMetaMap, ok := p.mdDBTableMetaMap[schemaName]
if !ok {
return nil, nil, errors.Errorf("cannot find the schema: %s", schemaName)
}
mdTableMeta, ok := mdTableMetaMap[tableName]
if !ok {
return nil, nil, errors.Errorf("cannot find the table: %s.%s", schemaName, tableName)
}
if len(mdTableMeta.DataFiles) <= 0 {
return nil, [][]types.Datum{}, nil
}
return p.ReadFirstNRowsByFileMeta(ctx, mdTableMeta.DataFiles[0].FileMeta, n)
}
// ReadFirstNRowsByFileMeta reads the first N rows of an data file.
// It implements the PreImportInfoGetter interface.
func (p *PreImportInfoGetterImpl) ReadFirstNRowsByFileMeta(ctx context.Context, dataFileMeta mydump.SourceFileMeta, n int) ([]string, [][]types.Datum, error) {
reader, err := mydump.OpenReader(ctx, &dataFileMeta, p.srcStorage)
if err != nil {
return nil, nil, errors.Trace(err)
}
var parser mydump.Parser
blockBufSize := int64(p.cfg.Mydumper.ReadBlockSize)
switch dataFileMeta.Type {
case mydump.SourceTypeCSV:
hasHeader := p.cfg.Mydumper.CSV.Header
// Create a utf8mb4 convertor to encode and decode data with the charset of CSV files.
charsetConvertor, err := mydump.NewCharsetConvertor(p.cfg.Mydumper.DataCharacterSet, p.cfg.Mydumper.DataInvalidCharReplace)
if err != nil {
return nil, nil, errors.Trace(err)
}
parser, err = mydump.NewCSVParser(ctx, &p.cfg.Mydumper.CSV, reader, blockBufSize, p.ioWorkers, hasHeader, charsetConvertor)
if err != nil {
return nil, nil, errors.Trace(err)
}
case mydump.SourceTypeSQL:
parser = mydump.NewChunkParser(ctx, p.cfg.TiDB.SQLMode, reader, blockBufSize, p.ioWorkers)
case mydump.SourceTypeParquet:
parser, err = mydump.NewParquetParser(ctx, p.srcStorage, reader, dataFileMeta.Path)
if err != nil {
return nil, nil, errors.Trace(err)
}
default:
panic(fmt.Sprintf("unknown file type '%s'", dataFileMeta.Type))
}
//nolint: errcheck
defer parser.Close()
rows := [][]types.Datum{}
for i := 0; i < n; i++ {
err := parser.ReadRow()
if err != nil {
if errors.Cause(err) != io.EOF {
return nil, nil, errors.Trace(err)
}
break
}
lastRowDatums := append([]types.Datum{}, parser.LastRow().Row...)
rows = append(rows, lastRowDatums)
}
return parser.Columns(), rows, nil
}
// EstimateSourceDataSize estimates the datasize to generate during the import as well as some other sub-informaiton.
// It implements the PreImportInfoGetter interface.
// It has a cache mechanism. The estimated size will only calculated once.
// The caching behavior can be changed by appending the `ForceReloadCache(true)` option.
func (p *PreImportInfoGetterImpl) EstimateSourceDataSize(ctx context.Context, opts ...ropts.GetPreInfoOption) (*EstimateSourceDataSizeResult, error) {
var result *EstimateSourceDataSizeResult
getPreInfoCfg := p.getPreInfoCfg.Clone()
for _, o := range opts {
o(getPreInfoCfg)
}
result = p.estimatedSizeCache
if result != nil && !getPreInfoCfg.ForceReloadCache {
return result, nil
}
var (
sizeWithIndex = int64(0)
tiflashSize = int64(0)
sourceTotalSize = int64(0)
tableCount = 0
unSortedBigTableCount = 0
errMgr = errormanager.New(nil, p.cfg, log.FromContext(ctx))
)
dbInfos, err := p.GetAllTableStructures(ctx)
if err != nil {
return nil, errors.Trace(err)
}
sysVars := p.GetTargetSysVariablesForImport(ctx)
for _, db := range p.dbMetas {
info, ok := dbInfos[db.Name]
if !ok {
continue
}
for _, tbl := range db.Tables {
sourceTotalSize += tbl.TotalSize
tableInfo, ok := info.Tables[tbl.Name]
if ok {
tableSize := tbl.TotalSize
// Do not sample small table because there may a large number of small table and it will take a long
// time to sample data for all of them.
if isTiDBBackend(p.cfg) || tbl.TotalSize < int64(config.SplitRegionSize) {
tbl.IndexRatio = 1.0
tbl.IsRowOrdered = false
} else {
sampledIndexRatio, isRowOrderedFromSample, err := p.sampleDataFromTable(ctx, db.Name, tbl, tableInfo.Core, errMgr, sysVars)
if err != nil {
return nil, errors.Trace(err)
}
tbl.IndexRatio = sampledIndexRatio
tbl.IsRowOrdered = isRowOrderedFromSample
tableSize = int64(float64(tbl.TotalSize) * tbl.IndexRatio)
if tbl.TotalSize > int64(config.DefaultBatchSize)*2 && !tbl.IsRowOrdered {
unSortedBigTableCount++
}
}
sizeWithIndex += tableSize
if tableInfo.Core.TiFlashReplica != nil && tableInfo.Core.TiFlashReplica.Available {
tiflashSize += tableSize * int64(tableInfo.Core.TiFlashReplica.Count)
}
tableCount++
}
}
}
if isLocalBackend(p.cfg) {
sizeWithIndex = int64(float64(sizeWithIndex) * compressionRatio)
tiflashSize = int64(float64(tiflashSize) * compressionRatio)
}
result = &EstimateSourceDataSizeResult{
SizeWithIndex: sizeWithIndex,
SizeWithoutIndex: sourceTotalSize,
HasUnsortedBigTables: (unSortedBigTableCount > 0),
TiFlashSize: tiflashSize,
}
p.estimatedSizeCache = result
return result, nil
}
// sampleDataFromTable samples the source data file to get the extra data ratio for the index
// It returns:
// * the extra data ratio with index size accounted
// * is the sample data ordered by row
func (p *PreImportInfoGetterImpl) sampleDataFromTable(
ctx context.Context,
dbName string,
tableMeta *mydump.MDTableMeta,
tableInfo *model.TableInfo,
errMgr *errormanager.ErrorManager,
sysVars map[string]string,
) (float64, bool, error) {
resultIndexRatio := 1.0
isRowOrdered := false
if len(tableMeta.DataFiles) == 0 {
return resultIndexRatio, isRowOrdered, nil
}
sampleFile := tableMeta.DataFiles[0].FileMeta
reader, err := mydump.OpenReader(ctx, &sampleFile, p.srcStorage)
if err != nil {
return 0.0, false, errors.Trace(err)
}
idAlloc := kv.NewPanickingAllocators(0)
tbl, err := tables.TableFromMeta(idAlloc, tableInfo)
if err != nil {
return 0.0, false, errors.Trace(err)
}
logger := log.FromContext(ctx).With(zap.String("table", tableMeta.Name))
kvEncoder, err := p.encBuilder.NewEncoder(ctx, &encode.EncodingConfig{
SessionOptions: encode.SessionOptions{
SQLMode: p.cfg.TiDB.SQLMode,
Timestamp: 0,
SysVars: sysVars,
AutoRandomSeed: 0,
},
Table: tbl,
Logger: logger,
})
if err != nil {
return 0.0, false, errors.Trace(err)
}
blockBufSize := int64(p.cfg.Mydumper.ReadBlockSize)
var parser mydump.Parser
switch tableMeta.DataFiles[0].FileMeta.Type {
case mydump.SourceTypeCSV:
hasHeader := p.cfg.Mydumper.CSV.Header
// Create a utf8mb4 convertor to encode and decode data with the charset of CSV files.
charsetConvertor, err := mydump.NewCharsetConvertor(p.cfg.Mydumper.DataCharacterSet, p.cfg.Mydumper.DataInvalidCharReplace)
if err != nil {
return 0.0, false, errors.Trace(err)
}
parser, err = mydump.NewCSVParser(ctx, &p.cfg.Mydumper.CSV, reader, blockBufSize, p.ioWorkers, hasHeader, charsetConvertor)
if err != nil {
return 0.0, false, errors.Trace(err)
}
case mydump.SourceTypeSQL:
parser = mydump.NewChunkParser(ctx, p.cfg.TiDB.SQLMode, reader, blockBufSize, p.ioWorkers)
case mydump.SourceTypeParquet:
parser, err = mydump.NewParquetParser(ctx, p.srcStorage, reader, sampleFile.Path)
if err != nil {
return 0.0, false, errors.Trace(err)
}
default:
panic(fmt.Sprintf("file '%s' with unknown source type '%s'", sampleFile.Path, sampleFile.Type.String()))
}
//nolint: errcheck
defer parser.Close()
logger.Begin(zap.InfoLevel, "sample file")
igCols, err := p.cfg.Mydumper.IgnoreColumns.GetIgnoreColumns(dbName, tableMeta.Name, p.cfg.Mydumper.CaseSensitive)
if err != nil {
return 0.0, false, errors.Trace(err)
}
initializedColumns := false
var (
columnPermutation []int
kvSize uint64
rowSize uint64
extendVals []types.Datum
)
rowCount := 0
dataKVs := p.encBuilder.MakeEmptyRows()
indexKVs := p.encBuilder.MakeEmptyRows()
lastKey := make([]byte, 0)
isRowOrdered = true
outloop:
for {
offset, _ := parser.Pos()
err = parser.ReadRow()
columnNames := parser.Columns()
switch errors.Cause(err) {
case nil:
if !initializedColumns {
ignoreColsMap := igCols.ColumnsMap()
if len(columnPermutation) == 0 {
columnPermutation, err = createColumnPermutation(
columnNames,
ignoreColsMap,
tableInfo,
log.FromContext(ctx))
if err != nil {
return 0.0, false, errors.Trace(err)
}
}
if len(sampleFile.ExtendData.Columns) > 0 {
_, extendVals = filterColumns(columnNames, sampleFile.ExtendData, ignoreColsMap, tableInfo)
}
initializedColumns = true
lastRow := parser.LastRow()
lastRowLen := len(lastRow.Row)
extendColsMap := make(map[string]int)
for i, c := range sampleFile.ExtendData.Columns {
extendColsMap[c] = lastRowLen + i
}
for i, col := range tableInfo.Columns {
if p, ok := extendColsMap[col.Name.O]; ok {
columnPermutation[i] = p
}
}
}
case io.EOF:
break outloop
default:
err = errors.Annotatef(err, "in file offset %d", offset)
return 0.0, false, errors.Trace(err)
}
lastRow := parser.LastRow()
rowCount++
lastRow.Row = append(lastRow.Row, extendVals...)
var dataChecksum, indexChecksum verification.KVChecksum
kvs, encodeErr := kvEncoder.Encode(lastRow.Row, lastRow.RowID, columnPermutation, offset)
if encodeErr != nil {
encodeErr = errMgr.RecordTypeError(ctx, log.FromContext(ctx), tableInfo.Name.O, sampleFile.Path, offset,
"" /* use a empty string here because we don't actually record */, encodeErr)
if encodeErr != nil {
return 0.0, false, errors.Annotatef(encodeErr, "in file at offset %d", offset)
}
if rowCount < maxSampleRowCount {
continue
}
break
}
if isRowOrdered {
kvs.ClassifyAndAppend(&dataKVs, &dataChecksum, &indexKVs, &indexChecksum)
for _, kv := range kv.Rows2KvPairs(dataKVs) {
if len(lastKey) == 0 {
lastKey = kv.Key
} else if bytes.Compare(lastKey, kv.Key) > 0 {
isRowOrdered = false
break
}
}
dataKVs = dataKVs.Clear()
indexKVs = indexKVs.Clear()
}
kvSize += kvs.Size()
rowSize += uint64(lastRow.Length)
parser.RecycleRow(lastRow)
failpoint.Inject("mock-kv-size", func(val failpoint.Value) {
kvSize += uint64(val.(int))
})
if rowSize > maxSampleDataSize || rowCount > maxSampleRowCount {
break
}
}
if rowSize > 0 && kvSize > rowSize {
resultIndexRatio = float64(kvSize) / float64(rowSize)
}
log.FromContext(ctx).Info("Sample source data", zap.String("table", tableMeta.Name), zap.Float64("IndexRatio", tableMeta.IndexRatio), zap.Bool("IsSourceOrder", tableMeta.IsRowOrdered))
return resultIndexRatio, isRowOrdered, nil
}
// GetReplicationConfig gets the replication config on the target.
// It implements the PreImportInfoGetter interface.
func (p *PreImportInfoGetterImpl) GetReplicationConfig(ctx context.Context) (*pdtypes.ReplicationConfig, error) {
return p.targetInfoGetter.GetReplicationConfig(ctx)
}
// GetStorageInfo gets the storage information on the target.
// It implements the PreImportInfoGetter interface.
func (p *PreImportInfoGetterImpl) GetStorageInfo(ctx context.Context) (*pdtypes.StoresInfo, error) {
return p.targetInfoGetter.GetStorageInfo(ctx)
}
// GetEmptyRegionsInfo gets the region information of all the empty regions on the target.
// It implements the PreImportInfoGetter interface.
func (p *PreImportInfoGetterImpl) GetEmptyRegionsInfo(ctx context.Context) (*pdtypes.RegionsInfo, error) {
return p.targetInfoGetter.GetEmptyRegionsInfo(ctx)
}
// IsTableEmpty checks whether the specified table on the target DB contains data or not.
// It implements the PreImportInfoGetter interface.
func (p *PreImportInfoGetterImpl) IsTableEmpty(ctx context.Context, schemaName string, tableName string) (*bool, error) {
return p.targetInfoGetter.IsTableEmpty(ctx, schemaName, tableName)
}
// FetchRemoteTableModels fetches the table structures from the remote target.
// It implements the PreImportInfoGetter interface.
func (p *PreImportInfoGetterImpl) FetchRemoteTableModels(ctx context.Context, schemaName string) ([]*model.TableInfo, error) {
return p.targetInfoGetter.FetchRemoteTableModels(ctx, schemaName)
}
// CheckVersionRequirements performs the check whether the target satisfies the version requirements.
// It implements the PreImportInfoGetter interface.
// Mydump database metas are retrieved from the context.
func (p *PreImportInfoGetterImpl) CheckVersionRequirements(ctx context.Context) error {
return p.targetInfoGetter.CheckVersionRequirements(ctx)
}
// GetTargetSysVariablesForImport gets some important systam variables for importing on the target.
// It implements the PreImportInfoGetter interface.
// It has caching mechanism.
func (p *PreImportInfoGetterImpl) GetTargetSysVariablesForImport(ctx context.Context, opts ...ropts.GetPreInfoOption) map[string]string {
var sysVars map[string]string
getPreInfoCfg := p.getPreInfoCfg.Clone()
for _, o := range opts {
o(getPreInfoCfg)
}
sysVars = p.sysVarsCache
if sysVars != nil && !getPreInfoCfg.ForceReloadCache {
return sysVars
}
sysVars = p.targetInfoGetter.GetTargetSysVariablesForImport(ctx)
p.sysVarsCache = sysVars
return sysVars
}
|
/*
Package logger handles application logging
*/
package logger
import (
"context"
"errors"
"fmt"
"github.com/sirupsen/logrus"
"net/url"
"os"
"strings"
)
const (
// TraceLevel represents the TRACE logging level
TraceLevel = "trace"
// DebugLevel represents the DEBUG logging level
DebugLevel = "debug"
// InfoLevel represents the INFO logging level
InfoLevel = "info"
// WarnLevel represents the WARN logging level
WarnLevel = "warn"
// ErrorLevel represents the ERROR logging level
ErrorLevel = "error"
// FatalLevel represents the FATAL logging level
FatalLevel = "fatal"
// PanicLevel represents the PANIC logging level
PanicLevel = "panic"
// JSONFormat represents the JSON logging format
JSONFormat = "json"
// TextFormat represents the text logging format
TextFormat = "text"
// ECSFormat represents the Elasticstack Common Schema (ECS) JSON logging format
ECSFormat = "elastic"
)
var (
// rootLogger is the singleton application logger
rootLogger *logrus.Logger
// rootLoggerFields is the map of fields to include in every log message
rootLoggerFields = make(logrus.Fields)
)
// SetFormat configures the logger message format
func SetFormat(format string) {
formatStr := strings.TrimSpace(strings.ToLower(format))
switch formatStr {
case ECSFormat:
Root().SetFormatter(&logrus.JSONFormatter{
FieldMap: logrus.FieldMap{
logrus.FieldKeyTime: "@timestamp",
logrus.FieldKeyMsg: "message",
},
})
Root().AddHook(NewElasticHook())
case JSONFormat:
Root().SetFormatter(&logrus.JSONFormatter{})
case TextFormat:
Root().SetFormatter(&logrus.TextFormatter{
FullTimestamp: true,
})
default:
Root().WithField("function", "SetFormat").Errorf("unknown format: %s", formatStr)
}
}
// SetLevel configures the logger logging level
func SetLevel(level string) {
levelStr := strings.TrimSpace(strings.ToLower(level))
switch levelStr {
case TraceLevel:
Root().SetLevel(logrus.TraceLevel)
case DebugLevel:
Root().SetLevel(logrus.DebugLevel)
case InfoLevel:
Root().SetLevel(logrus.InfoLevel)
case WarnLevel:
Root().SetLevel(logrus.WarnLevel)
case ErrorLevel:
Root().SetLevel(logrus.ErrorLevel)
case FatalLevel:
Root().SetLevel(logrus.FatalLevel)
case PanicLevel:
Root().SetLevel(logrus.PanicLevel)
default:
Root().WithField("function", "SetLevel").Errorf("unknown level: %s", level)
}
}
// Root returns the singleton application logger
func Root() *logrus.Logger {
if rootLogger == nil {
rootLogger = logrus.StandardLogger()
rootLogger.SetLevel(logrus.DebugLevel)
// Get machine hostname
hostname, err := os.Hostname()
if err != nil {
Root().WithField("function", "SetFormat").
Errorf("error getting hostname: %s", err)
hostname = ""
}
// If the hostname is non-empty, add it as a logger field
if hostname != "" {
rootLoggerFields["host"] = map[string]interface{}{
"name": hostname,
}
}
}
return rootLogger
}
// Log returns a logrus FieldLogger including the root fields and an optional context
func Log(args ...interface{}) logrus.FieldLogger {
var entry *logrus.Entry
for _, arg := range args {
switch obj := arg.(type) {
case context.Context:
entry = rootLogger.WithContext(obj)
}
}
if entry == nil {
entry = rootLogger.WithFields(rootLoggerFields)
} else {
entry = entry.WithFields(rootLoggerFields)
}
return entry
}
// ErrorfAsError logs an Error message to the supplied logger and then returns a
// new error object initialized with the message. The message is formatted with
// fmt.Sprintf() before passing to the logger and the error object.
func ErrorfAsError(log logrus.FieldLogger, format string, args ...interface{}) error {
message := fmt.Sprintf(format, args...)
log.Error(message)
return errors.New(message)
}
// SanitizedURLString returns a parsed URL string with user credentials removed
func SanitizedURLString(urlWithCreds string) string {
log := Log().
WithField("function", "SanitizedURLString")
clone, err := url.Parse(urlWithCreds)
if err != nil {
log.Errorf("unable to clone url: %s", err)
return urlWithCreds
}
if clone.User != nil {
clone.User = url.User(clone.User.Username())
}
return clone.String()
}
|
package main
import (
"crypto/tls"
"log"
"net"
"time"
"gopkg.in/mgo.v2"
)
var dbSession *mgo.Session
var db *mgo.Database
// initDb initialises the dbSession and db variables with a new mongodb session and the default database
func initDb() (err error) {
var dbInfo = &mgo.DialInfo{
Addrs: config.DB.Addrs,
ReplicaSetName: config.DB.ReplicaSetName,
Database: config.DB.AuthDB,
Username: config.DB.Username,
Password: config.DB.Password,
Timeout: time.Second * 5,
}
dbInfo.DialServer = func(addr *mgo.ServerAddr) (net.Conn, error) {
tlsConfig := &tls.Config{}
conn, err := tls.Dial("tcp", addr.String(), tlsConfig)
return conn, err
}
dbSession, err = mgo.DialWithInfo(dbInfo)
if err == nil {
log.Println("successfully connected to the database")
}
db = dbSession.DB(config.DB.AppDB)
return err
}
|
package schema
import (
"crypto/tls"
"net/mail"
"net/url"
"time"
)
// Notifier represents the configuration of the notifier to use when sending notifications to users.
type Notifier struct {
DisableStartupCheck bool `koanf:"disable_startup_check" json:"disable_startup_check" jsonschema:"default=false,title=Disable Startup Check" jsonschema_description:"Disables the notifier startup checks"`
FileSystem *NotifierFileSystem `koanf:"filesystem" json:"filesystem" jsonschema:"title=File System" jsonschema_description:"The File System notifier"`
SMTP *NotifierSMTP `koanf:"smtp" json:"smtp" jsonschema:"title=SMTP" jsonschema_description:"The SMTP notifier"`
TemplatePath string `koanf:"template_path" json:"template_path" jsonschema:"title=Template Path" jsonschema_description:"The path for notifier template overrides"`
}
// NotifierFileSystem represents the configuration of the notifier writing emails in a file.
type NotifierFileSystem struct {
Filename string `koanf:"filename" json:"filename" jsonschema:"title=Filename" jsonschema_description:"The file path of the notifications"`
}
// NotifierSMTP represents the configuration of the SMTP server to send emails with.
type NotifierSMTP struct {
Address *AddressSMTP `koanf:"address" json:"address" jsonschema:"default=smtp://localhost:25,title=Address" jsonschema_description:"The SMTP server address"`
Timeout time.Duration `koanf:"timeout" json:"timeout" jsonschema:"default=5 seconds,title=Timeout" jsonschema_description:"The SMTP server connection timeout"`
Username string `koanf:"username" json:"username" jsonschema:"title=Username" jsonschema_description:"The username for SMTP authentication"`
Password string `koanf:"password" json:"password" jsonschema:"title=Password" jsonschema_description:"The password for SMTP authentication"`
Identifier string `koanf:"identifier" json:"identifier" jsonschema:"default=localhost,title=Identifier" jsonschema_description:"The identifier used during the HELO/EHLO command"`
Sender mail.Address `koanf:"sender" json:"sender" jsonschema:"title=Sender" jsonschema_description:"The sender used for SMTP"`
Subject string `koanf:"subject" json:"subject" jsonschema:"default=[Authelia] {title},title=Subject" jsonschema_description:"The subject format used"`
StartupCheckAddress mail.Address `koanf:"startup_check_address" json:"startup_check_address" jsonschema:"default=Authelia Test <test@authelia.com>,title=Startup Check Address" jsonschema_description:"The address used for the recipient in the startup check"`
DisableRequireTLS bool `koanf:"disable_require_tls" json:"disable_require_tls" jsonschema:"default=false,title=Disable Require TLS" jsonschema_description:"Disables the requirement to use TLS"`
DisableHTMLEmails bool `koanf:"disable_html_emails" json:"disable_html_emails" jsonschema:"default=false,title=Disable HTML Emails" jsonschema_description:"Disables the mixed content type of emails and only sends the plaintext version"`
DisableStartTLS bool `koanf:"disable_starttls" json:"disable_starttls" jsonschema:"default=false,title=Disable StartTLS" jsonschema_description:"Disables the opportunistic StartTLS functionality which is useful for bad SMTP servers which advertise support for it but don't actually support it'"`
TLS *TLS `koanf:"tls" json:"tls" jsonschema:"title=TLS" jsonschema_description:"The SMTP server TLS connection properties"`
// Deprecated: use address instead.
Host string `koanf:"host" json:"host" jsonschema:"deprecated"`
// Deprecated: use address instead.
Port int `koanf:"port" json:"port" jsonschema:"deprecated"`
}
// DefaultSMTPNotifierConfiguration represents default configuration parameters for the SMTP notifier.
var DefaultSMTPNotifierConfiguration = NotifierSMTP{
Address: &AddressSMTP{Address{true, false, -1, 25, &url.URL{Scheme: AddressSchemeSMTP, Host: "localhost:25"}}},
Timeout: time.Second * 5,
Subject: "[Authelia] {title}",
Identifier: "localhost",
StartupCheckAddress: mail.Address{Name: "Authelia Test", Address: "test@authelia.com"},
TLS: &TLS{
MinimumVersion: TLSVersion{tls.VersionTLS12},
},
}
|
package graphicgo
import (
"errors"
)
const (
Slim = iota
Middle
Bold
)
func abs(x int64) (abs int64) {
if x > 0 {
return x
} else {
return -x
}
}
func DrawDot(x int64, y int64, color [4]byte, width int) (err error) {
if width == Slim {
dot(x, y, color)
} else if width == Middle {
dot(x-1, y, color)
dot(x, y, color)
dot(x+1, y, color)
dot(x, y-1, color)
dot(x, y+1, color)
} else if width == Bold {
dot(x, y-2, color)
dot(x-1, y-1, color)
dot(x, y-1, color)
dot(x+1, y-1, color)
dot(x-2, y, color)
dot(x-1, y, color)
dot(x, y, color)
dot(x+1, y, color)
dot(x+2, y, color)
dot(x-1, y+1, color)
dot(x, y+1, color)
dot(x+1, y+1, color)
dot(x+2, y, color)
} else {
return errors.New("width type not found")
}
return nil
}
func DrawLine(x1 int64, y1 int64, x2 int64, y2 int64, color [4]byte, width int) {
var dx int64 = abs(x2 - x1)
var dy int64 = abs(y2 - y1)
greater_than_45 := false
if dx < dy {
greater_than_45 = true
x1, y1 = y1, x1
x2, y2 = y2, x2
dx, dy = dy, dx
}
var ix, iy int64 = 1, 1
if x2-x1 < 0 {
ix = -1
}
if y2-y1 < 0 {
iy = -1
}
cx := x1
cy := y1
n2dy := dy * 2
n2DyDx := (dy - dx) * 2
d := dy*2 - dx
for cx != x2 {
if d < 0 {
d += n2dy
} else {
cy += iy
d += n2DyDx
}
if greater_than_45 {
DrawDot(cy, cx, color, width)
} else {
DrawDot(cx, cy, color, width)
}
cx += ix
}
}
func drawCircle8(xc int64, yc int64, x int64, y int64, color [4]byte, width int) {
DrawDot(xc+x, yc-y, color, width)
DrawDot(xc-x, yc+y, color, width)
DrawDot(xc-x, yc-y, color, width)
DrawDot(xc+x, yc+y, color, width)
DrawDot(xc+y, yc+x, color, width)
DrawDot(xc-y, yc+x, color, width)
DrawDot(xc+y, yc-x, color, width)
DrawDot(xc-y, yc-x, color, width)
}
func DrawCircle(xc int64, yc int64, r int64, color [4]byte, width int, fill bool) {
if xc+r < 0 ||
xc-r >= screenWidth ||
yc+r < 0 ||
yc-r >= screenHeight {
return
}
var x int64 = 0
var y int64 = r
var d int64 = 3 - 2*r
var yi int64
for x <= y {
if fill {
for yi = x; yi <= y; yi++ {
drawCircle8(xc, yc, x, yi, color, width)
}
} else {
drawCircle8(xc, yc, x, y, color, width)
}
if d < 0 {
d = d + 4*x + 6
} else {
d = d + 4*(x-y) + 10
y--
}
x++
}
}
|
//sandbox is command line interface for the Sandbox without docker wrapped.
// Example:
// compile before running
// sandbox --lang=c -c -s src/main.c -b bin/main --memory=10000 --time=1000 --input=judge/input --output==judge/output
// running without compiling
// sandbox --lang=c -b bin/main -i judge/input -o judge/output
// if input or output not set, use /dev/null instead
// sandbox --lang=c -b bin/main
// result:
// output fllows the order below,if result is wrong answer,5th argument will be attached.
// status:time:memory:times:wrong_answer
package main
import (
"bytes"
"fmt"
"io"
"log"
"os"
"path"
"github.com/pjudge/sandbox"
"github.com/codegangsta/cli"
)
//render more information
const (
BINARY = "binary"
COMPILE = "compile"
SOURCE = "source"
TIME = "time"
MEMORY = "memory"
INPUT = "input"
OUTPUT = "output"
LANG = "lang"
C = "c"
CPP = "cpp"
GO = "go"
DELIM = "!-_-\n"
OUTPUT_LIMIT = 255
)
func panicErr(e error) {
if e != nil {
log.Println(e)
}
}
//read byte from the file
func readFile(f *os.File) (testOut []byte) {
tmp := make([]byte, 256)
for n, err := f.Read(tmp); err != io.EOF; n, err = f.Read(tmp) {
testOut = append(testOut, tmp[:n]...)
}
return testOut
}
//obj records process information and n is the nth test,if n is 0 ,all test are passed
func checkStatus(obj *sandbox.RunningObject, nTh int) (hasErr bool) {
switch obj.Status {
case sandbox.MLE:
fmt.Printf("ML:%d:%d:%d:", obj.Memory, obj.Time, nTh)
hasErr = true
case sandbox.TLE:
fmt.Printf("TL:%d:%d:%d:", obj.Memory, obj.Time, nTh)
hasErr = true
case sandbox.RE:
fmt.Printf("RE:%d:%d:%d:", obj.Memory, obj.Time, nTh)
hasErr = true
default:
hasErr = false
}
return hasErr
}
func main() {
app := cli.NewApp()
app.Name = "sandbox"
app.Usage = `test untrusted source code'
example:
compile before running with -c option
sandbox --lang=c -c -s src/main.c -b bin/main --memory=10000 --time=1000 --input=judge/input --output=judge/output
running without compile
sandbox --lang=c -b bin/main -i judge/input -o judge/output
if input or output not set, use /dev/null instead
sandbox --lang=c -b bin/main
note: input file and output file is splited by flag "!-_-\n"
result:
output fllows the format,if result is wrong answer,5th argument will be attached.
do not gurantee no more ':' appears
status(error or AC):time(MS):memory(KB):times(int):wrong_answer(string)`
app.Author = "pjudge"
app.Version = "0.0.3"
app.Email = "gaopeg01@gmail.com"
app.Flags = []cli.Flag{
cli.StringFlag{Name: "lang,l", Value: "c,cpp,go",
Usage: "source code languge"},
cli.IntFlag{Name: "time,t", Value: 1000,
Usage: "time limit in MS"},
cli.IntFlag{Name: "memory,m", Value: 10000,
Usage: "memory limit in KB"},
cli.BoolFlag{Name: "compile,c",
Usage: "wether complie before running", EnvVar: ""},
cli.StringFlag{Name: "input,i", Value: "",
Usage: "input file path"},
cli.StringFlag{Name: "output,o", Value: "",
Usage: "output file path"},
cli.StringFlag{Name: "source,s", Value: "",
Usage: "source file path"},
cli.StringFlag{Name: "binary,b", Value: "",
Usage: "binary file path"},
}
app.Action = func(c *cli.Context) {
var in *os.File //input file instance
var out *os.File //output file instance
var src string //source file path
var bin string //binary file path
var err error
pwd, err := os.Getwd()
panicErr(err)
if c.String(LANG) == "" {
fmt.Println("Needs a specified language,use tag -h for help")
return
}
//target binary file path is neccessary
if c.String(BINARY) != "" {
p := c.String(BINARY)
if path.IsAbs(p) {
bin = p
} else {
bin = path.Join(pwd, p)
}
} else {
fmt.Println("Needs target binary file" +
" path as argument,user tag -h for help")
return
}
//if input is not set , use /dev/null as input
if c.String(INPUT) == "" {
in, err = os.Open(os.DevNull)
} else {
p := c.String(INPUT)
if path.IsAbs(p) {
in, err = os.Open(p)
} else {
in, err = os.Open(path.Join(pwd, p))
}
}
panicErr(err)
defer in.Close()
if c.Bool(COMPILE) {
if c.String(SOURCE) == "" {
fmt.Println("compiler needs source file!")
return
} else {
//get source file path
p := c.String(SOURCE)
if path.IsAbs(p) {
src = p
} else {
src = path.Join(pwd, p)
}
//compile code ,if compile set , not compile
if c.Bool(COMPILE) {
var language uint64
switch c.String(LANG) {
case C:
language = sandbox.C
case CPP:
language = sandbox.CPP
case GO:
language = sandbox.GO
}
if err = sandbox.Complie(src, bin, language); err != nil {
fmt.Printf("CE:0:0:0:")
return
}
}
}
}
var obj = &sandbox.RunningObject{}
time := int64(c.Int(TIME))
memory := int64(c.Int(MEMORY))
if c.String(OUTPUT) != "" {
//get out test and check if every output matches the single input
outPath := c.String(OUTPUT)
if !path.IsAbs(outPath) {
outPath = path.Join(pwd, outPath)
}
out, err = os.Open(outPath)
} else {
out, err = os.Open(os.DevNull)
}
panicErr(err)
defer out.Close()
//form a scope
if c.String(OUTPUT) != "" {
//get input tests and run every test one by one
i := readFile(in)
inputs := bytes.Split(i, []byte(DELIM))
o := readFile(out)
outputs := bytes.Split(o, []byte(DELIM))
for i, v := range inputs {
inBytes := bytes.NewBuffer(v)
out := bytes.Buffer{}
obj = sandbox.Run(bin, inBytes, &out, []string{""}, time, memory)
if checkStatus(obj, 0) {
return
}
if len(out.Bytes()) > OUTPUT_LIMIT {
fmt.Printf("OL:%d:%d:%d:", obj.Memory, obj.Time, i+1)
return
}
if !bytes.Equal(out.Bytes(), outputs[i]) {
o1F := bytes.Fields(out.Bytes())
o1J := bytes.Join(o1F, []byte(""))
o2F := bytes.Fields(outputs[i])
o2J := bytes.Join(o2F, []byte(""))
if bytes.Equal(o1J, o2J) {
fmt.Printf("FE:%d:%d:%d:%s",
obj.Memory,
obj.Time,
i+1, //0 represent no error
out.Bytes())
} else {
fmt.Printf("WA:%d:%d:%d:%s",
obj.Memory,
obj.Time,
i+1, //o represent no error
out.Bytes())
}
return
}
}
} else {
out := bytes.Buffer{}
in := bytes.NewBuffer([]byte{})
obj = sandbox.Run(bin, in, &out,
[]string{""}, time, memory)
if checkStatus(obj, 0) {
return
}
}
//if there is no problem for all checks
fmt.Printf("AC:%d:%d:%d:", obj.Memory, obj.Time, 0)
return
}
app.Run(os.Args)
}
|
package build
import (
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"reflect"
"sort"
"strconv"
"strings"
"github.com/pkg/errors"
"github.com/outofforest/ioc/v2"
"github.com/outofforest/libexec"
"github.com/outofforest/logger"
"github.com/outofforest/run"
"github.com/ridge/must"
)
const maxStack = 100
type Command struct {
Description string
Fn interface{}
}
// DepsFunc represents function for executing dependencies
type DepsFunc func(deps ...interface{})
// Executor defines interface of command executor
type Executor interface {
// Execute executes commands by their paths
Execute(ctx context.Context, name string, paths []string) error
}
// NewIoCExecutor returns new executor using IoC container to resolve parameters of commands
func NewIoCExecutor(commands map[string]Command, c *ioc.Container) Executor {
return &iocExecutor{c: c, commands: commands}
}
type iocExecutor struct {
c *ioc.Container
commands map[string]Command
}
func (e *iocExecutor) Execute(ctx context.Context, name string, paths []string) error {
executed := map[reflect.Value]bool{}
stack := map[reflect.Value]bool{}
c := e.c.SubContainer()
c.Singleton(func() context.Context {
return withName(ctx, name)
})
errReturn := errors.New("return")
errChan := make(chan error, 1)
worker := func(queue <-chan interface{}, done chan<- struct{}) {
defer close(done)
defer func() {
if r := recover(); r != nil {
var err error
if err2, ok := r.(error); ok {
if err2 == errReturn {
return
}
err = err2
} else {
err = errors.Errorf("command panicked: %v", r)
}
errChan <- err
close(errChan)
}
}()
for {
select {
case <-ctx.Done():
errChan <- ctx.Err()
close(errChan)
return
case cmd, ok := <-queue:
if !ok {
return
}
cmdValue := reflect.ValueOf(cmd)
if executed[cmdValue] {
continue
}
var err error
switch {
case stack[cmdValue]:
err = errors.New("build: dependency cycle detected")
case len(stack) >= maxStack:
err = errors.New("build: maximum length of stack reached")
default:
stack[cmdValue] = true
c.Call(cmd, &err)
delete(stack, cmdValue)
executed[cmdValue] = true
}
if err != nil {
errChan <- err
close(errChan)
return
}
}
}
}
depsFunc := func(deps ...interface{}) {
queue := make(chan interface{})
done := make(chan struct{})
go worker(queue, done)
loop:
for _, d := range deps {
select {
case <-done:
break loop
case queue <- d:
}
}
close(queue)
<-done
if len(errChan) > 0 {
panic(errReturn)
}
}
c.Singleton(func() DepsFunc {
return depsFunc
})
initDeps := make([]interface{}, 0, len(paths))
for _, p := range paths {
cmd, exists := e.commands[p]
if !exists {
return errors.Errorf("build: command %s does not exist", p)
}
initDeps = append(initDeps, cmd.Fn)
}
func() {
defer func() {
if r := recover(); r != nil {
if err, ok := r.(error); ok && err == errReturn {
return
}
panic(r)
}
}()
depsFunc(initDeps...)
}()
if len(errChan) > 0 {
return <-errChan
}
return nil
}
// Main receives configuration and runs commands
func Main(name string, containerBuilder func(c *ioc.Container), commands map[string]Command) {
run.Tool("build", containerBuilder, func(ctx context.Context, c *ioc.Container) error {
flags := logger.Flags(logger.ToolDefaultConfig, "build")
if err := flags.Parse(os.Args[1:]); err != nil {
return err
}
if len(os.Args) >= 2 && os.Args[1] == "@" {
listCommands(commands)
return nil
}
executor := NewIoCExecutor(commands, c)
if isAutocomplete() {
autocompleteDo(commands)
return nil
}
ctx = withName(ctx, name)
changeWorkingDir()
setPath()
if len(flags.Args()) == 0 {
return activate(ctx, name)
}
return execute(ctx, name, flags.Args(), executor)
})
}
func isAutocomplete() bool {
_, ok := autocompletePrefix()
return ok
}
func listCommands(commands map[string]Command) {
paths := paths(commands)
var maxLen int
for _, path := range paths {
if len(path) > maxLen {
maxLen = len(path)
}
}
fmt.Println("\n Available commands:")
fmt.Println()
for _, path := range paths {
fmt.Printf(fmt.Sprintf(` %%-%ds`, maxLen)+" %s\n", path, commands[path].Description)
}
fmt.Println("")
}
func setPath() {
binDir := binDir()
var path string
for _, p := range strings.Split(os.Getenv("PATH"), ":") {
if !strings.HasPrefix(p, binDir) {
if path != "" {
path += ":"
}
path += p
}
}
must.OK(os.Setenv("PATH", binToolsDir()+":"+binDir+":"+path))
}
func activate(ctx context.Context, name string) error {
bash := exec.Command("bash")
bash.Env = append(os.Environ(),
fmt.Sprintf("PS1=%s", "("+name+`) [\u@\h \W]\$ `),
)
bash.Stdin = os.Stdin
bash.Stdout = os.Stdout
bash.Stderr = os.Stderr
err := libexec.Exec(ctx, bash)
if bash.ProcessState != nil && bash.ProcessState.ExitCode() != 0 {
return nil
}
return err
}
func execute(ctx context.Context, name string, paths []string, executor Executor) error {
pathsTrimmed := make([]string, 0, len(paths))
for _, p := range paths {
if p[len(p)-1] == '/' {
p = p[:len(p)-1]
}
pathsTrimmed = append(pathsTrimmed, p)
}
return executor.Execute(ctx, name, pathsTrimmed)
}
func autocompletePrefix() (string, bool) {
exeName := os.Args[0]
cLine := os.Getenv("COMP_LINE")
cPoint := os.Getenv("COMP_POINT")
if cLine == "" || cPoint == "" {
return "", false
}
cPointInt, err := strconv.ParseInt(cPoint, 10, 64)
if err != nil {
panic(err)
}
prefix := strings.TrimLeft(cLine[:cPointInt], exeName)
lastSpace := strings.LastIndex(prefix, " ") + 1
return prefix[lastSpace:], true
}
func autocompleteDo(commands map[string]Command) {
prefix, _ := autocompletePrefix()
choices := choicesForPrefix(paths(commands), prefix)
switch os.Getenv("COMP_TYPE") {
case "9":
startPos := strings.LastIndex(prefix, "/") + 1
prefix = prefix[:startPos]
if len(choices) == 1 {
for choice, children := range choices {
if children {
choice += "/"
} else {
choice += " "
}
fmt.Println(prefix + choice)
}
} else if chPrefix := longestPrefix(choices); chPrefix != "" {
fmt.Println(prefix + chPrefix)
}
case "63":
if len(choices) > 1 {
for choice, children := range choices {
if children {
choice += "/"
}
fmt.Println(choice)
}
}
}
}
func paths(commands map[string]Command) []string {
paths := make([]string, 0, len(commands))
for path := range commands {
paths = append(paths, path)
}
sort.Strings(paths)
return paths
}
func choicesForPrefix(paths []string, prefix string) map[string]bool {
startPos := strings.LastIndex(prefix, "/") + 1
choices := map[string]bool{}
for _, path := range paths {
if strings.HasPrefix(path, prefix) {
choice := path[startPos:]
endPos := strings.Index(choice, "/")
children := false
if endPos != -1 {
choice = choice[:endPos]
children = true
}
if _, ok := choices[choice]; !ok || children {
choices[choice] = children
}
}
}
return choices
}
func longestPrefix(choices map[string]bool) string {
if len(choices) == 0 {
return ""
}
prefix := ""
for i := 0; true; i++ {
var ch uint8
for choice := range choices {
if i >= len(choice) {
return prefix
}
if ch == 0 {
ch = choice[i]
continue
}
if choice[i] != ch {
return prefix
}
}
prefix += string(ch)
}
return prefix
}
func changeWorkingDir() {
must.OK(os.Chdir(filepath.Dir(filepath.Dir(must.String(filepath.EvalSymlinks(must.String(os.Executable())))))))
}
|
package types
import (
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
)
var (
ErrInvalidBasicMsg = sdkerrors.Register(ModuleName, 1, "InvalidBasicMsg")
ErrBadDataValue = sdkerrors.Register(ModuleName, 2, "BadDataValue")
ErrUnauthorizedPermission = sdkerrors.Register(ModuleName, 3, "UnauthorizedPermission")
ErrItemDuplication = sdkerrors.Register(ModuleName, 4, "ItemDuplication")
ErrItemNotFound = sdkerrors.Register(ModuleName, 5, "ItemNotFound")
ErrInvalidState = sdkerrors.Register(ModuleName, 6, "InvalidState")
ErrBadWasmExecution = sdkerrors.Register(ModuleName, 7, "BadWasmExecution")
ErrOnlyOneDenomAllowed = sdkerrors.Register(ModuleName, 8, "OnlyOneDenomAllowed")
ErrInvalidDenom = sdkerrors.Register(ModuleName, 9, "InvalidDenom")
ErrUnknownClientID = sdkerrors.Register(ModuleName, 10, "UnknownClientID")
Error = sdkerrors.Register(ModuleName, 11, "Error")
)
|
package ssh
import (
"bufio"
"errors"
"net"
"os"
"time"
gossh "github.com/coreos/fleet/third_party/code.google.com/p/go.crypto/ssh"
"github.com/coreos/fleet/third_party/code.google.com/p/go.crypto/ssh/terminal"
)
func Execute(client *gossh.ClientConn, cmd string) (*bufio.Reader, error) {
session, err := client.NewSession()
if err != nil {
return nil, err
}
stdout, _ := session.StdoutPipe()
bstdout := bufio.NewReader(stdout)
session.Start(cmd)
go session.Wait()
return bstdout, nil
}
func Shell(client *gossh.ClientConn) error {
session, err := client.NewSession()
if err != nil {
return err
}
defer session.Close()
modes := gossh.TerminalModes{
gossh.ECHO: 1, // enable echoing
gossh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud
gossh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud
}
fd := int(os.Stdin.Fd())
oldState, err := terminal.MakeRaw(fd)
defer terminal.Restore(fd, oldState)
termWidth, termHeight, err := terminal.GetSize(fd)
if err != nil {
return err
}
session.Stdout = os.Stdout
session.Stderr = os.Stderr
session.Stdin = os.Stdin
if err := session.RequestPty("xterm-256color", termHeight, termWidth, modes); err != nil {
return err
}
if err = session.Shell(); err != nil {
return err
}
session.Wait()
return nil
}
func sshClientConfig(user string) (*gossh.ClientConfig, error) {
sock := os.Getenv("SSH_AUTH_SOCK")
if sock == "" {
return nil, errors.New("SSH_AUTH_SOCK environment variable is not set. Verify ssh-agent is running. See https://github.com/coreos/fleet/blob/master/Documentation/remote-access.md for help.")
}
agent, err := net.Dial("unix", sock)
if err != nil {
return nil, err
}
cfg := gossh.ClientConfig{
User: user,
Auth: []gossh.ClientAuth{
gossh.ClientAuthAgent(gossh.NewAgentClient(agent)),
},
}
return &cfg, nil
}
func NewSSHClient(user, addr string) (*gossh.ClientConn, error) {
clientConfig, err := sshClientConfig(user)
if err != nil {
return nil, err
}
var client *gossh.ClientConn
dialFunc := func(echan chan error) {
var err error
client, err = gossh.Dial("tcp", addr, clientConfig)
echan <- err
}
err = timeoutSSHDial(dialFunc)
return client, err
}
func NewTunnelledSSHClient(user, tunaddr, tgtaddr string) (*gossh.ClientConn, error) {
clientConfig, err := sshClientConfig(user)
if err != nil {
return nil, err
}
var tunnelClient *gossh.ClientConn
dialFunc := func(echan chan error) {
var err error
tunnelClient, err = gossh.Dial("tcp", tunaddr, clientConfig)
echan <- err
}
err = timeoutSSHDial(dialFunc)
if err != nil {
return nil, err
}
var targetConn net.Conn
dialFunc = func(echan chan error) {
var err error
targetConn, err = tunnelClient.Dial("tcp", tgtaddr)
echan <- err
}
err = timeoutSSHDial(dialFunc)
if err != nil {
return nil, err
}
targetClient, err := gossh.Client(targetConn, clientConfig)
if err != nil {
return nil, err
}
return targetClient, nil
}
func timeoutSSHDial(dial func(chan error)) error {
var err error
echan := make(chan error)
go dial(echan)
select {
case <-time.After(time.Duration(time.Second * 10)):
return errors.New("Timed out while initiating SSH connection")
case err = <-echan:
return err
}
}
|
package main
import (
"KServer/manage"
"KServer/manage/config"
"KServer/server/lock/services"
"KServer/server/utils"
"KServer/server/utils/msg"
"fmt"
)
func main() {
mConf := config.NewManageConfig()
mConf.DB.Redis = true
mConf.Server.Head = msg.LockTopic
mConf.Message.Kafka = true
mConf.Lock.Open = true
mConf.Lock.Head = msg.LockTopic
m := manage.NewManage(mConf)
// 初始化redisPool
redisConfig := config.NewRedisConfig(utils.RedisConFile)
redis := m.DB().Redis()
if !redis.StartMasterPool(redisConfig.GetMasterAddr(), redisConfig.Master.PassWord, redisConfig.Master.MaxIdle, redisConfig.Master.MaxActive) ||
!redis.StartSlavePool(redisConfig.GetSlaveAddr(), redisConfig.Slave.PassWord, redisConfig.Slave.MaxIdle, redisConfig.Slave.MaxActive) {
fmt.Println("Redis 开启失败")
return
}
// 启动消息通道
kafkaConf := config.NewKafkaConfig(utils.KafkaConFile)
err := m.Message().Kafka().Send().Open([]string{kafkaConf.GetAddr()})
if err != nil {
fmt.Println("消息通道启动失败")
return
}
unLockService := services.NewUnLock(m)
m.Message().Kafka().AddRouter(msg.LockTopic, msg.LockId, unLockService.UnlockHandle)
m.Message().Kafka().StartListen([]string{kafkaConf.GetAddr()}, msg.LockTopic, utils.NewOffset)
m.Server().Start()
}
|
package main
import (
"os"
"fmt"
"io"
)
func main() {
CopyFile()
}
func CopyFile() {
copyFile("output.txt", "outputCpy.txt")
fmt.Println("copy done!")
}
func copyFile(srcName, dstName string) (written int64, err error) {
srcFile, err := os.Open(srcName)
if err != nil {
fmt.Println("open src file err:", err)
return
}
defer srcFile.Close()
dstFile, err := os.OpenFile(dstName, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
fmt.Println("open dst file err:", err)
return
}
defer dstFile.Close()
return io.Copy(dstFile, srcFile)
}
|
package main
import (
"fmt"
"sort"
"golang.org/x/exp/constraints"
)
// Map turns a []T1 to a []T2 using a mapping function.
// This function has two type parameters, T1 and T2.
// This works with slices of any type.
func Map[T1, T2 any](s []T1, f func(T1) T2) []T2 {
r := make([]T2, len(s))
for i, v := range s {
r[i] = f(v)
}
return r
}
// Reduce reduces a []T1 to a single value using a reduction function.
func Reduce[T1, T2 any](s []T1, initializer T2, f func(T2, T1) T2) T2 {
r := initializer
for _, v := range s {
r = f(r, v)
}
return r
}
// Filter filters values from a slice using a filter function.
// It returns a new slice with only the elements of s
// for which f returned true.
func Filter[T any](s []T, f func(T) bool) []T {
var r []T
for _, v := range s {
if f(v) {
r = append(r, v)
}
}
return r
}
// Merge - receives slices of type T and merges them into a single slice of type T.
func Merge[T any](slices ...[]T) (mergedSlice []T) {
for _, slice := range slices {
mergedSlice = append(mergedSlice, slice...)
}
return mergedSlice
}
// Includes - given a slice of type T and a value of type T,
// determines whether the value is contained by the slice.
func Includes[T comparable](slice []T, value T) bool {
for _, el := range slice {
if el == value {
return true
}
}
return false
}
// // Sort - sorts given a slice of any orderable type T
// The constraints.Ordered constraint in the Sort() function guarantees that
// the function can sort values of any type supporting the operators <, <=, >=, >.
func Sort[T constraints.Ordered](s []T) {
sort.Slice(s, func(i, j int) bool {
return s[i] < s[j]
})
}
// Keys returns the keys of the map m in a slice.
// The keys will be returned in an unpredictable order.
// This function has two type parameters, K and V.
// Map keys must be comparable, so key has the predeclared
// constraint comparable. Map values can be any type.
func Keys[K comparable, V any](m map[K]V) []K {
r := make([]K, 0, len(m))
for k := range m {
r = append(r, k)
}
return r
}
// Sum sums the values of map containing numeric or float values
func Sum[K comparable, V constraints.Float | constraints.Integer](m map[K]V) V {
var s V
for _, v := range m {
s += v
}
return s
}
func main() {
s := []int{1, 2, 3, 7, 5, 22, 18}
j := []int{4, 5, 6}
floats := Map(s, func(i int) float64 { return float64(i) })
fmt.Println(floats)
sum := Reduce(s, 0, func(i, j int) int { return i + j })
fmt.Println(sum)
evens := Filter(s, func(i int) bool { return i%2 == 0 })
fmt.Println(evens)
merged := Merge(s, j)
fmt.Println(merged)
i := Includes(s, 22)
fmt.Println(i)
Sort(s)
fmt.Println(s)
k := Merge(s, j)
Sort(k)
odds := Filter(k, func(i int) bool { return i%2 != 0 })
fmt.Println(odds)
l := Keys(map[int]int{1: 2, 2: 4})
fmt.Println(l)
t := Sum(map[int]int{1: 2, 2: 4})
fmt.Println(t)
}
|
package gcalbot
import (
"bytes"
"crypto/hmac"
"encoding/base64"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
"google.golang.org/api/googleapi"
"github.com/malware-unicorn/go-keybase-chat-bot/kbchat/types/chat1"
"github.com/malware-unicorn/go-keybase-chat-bot/kbchat"
"github.com/malware-unicorn/managed-bots/base"
"golang.org/x/oauth2"
)
type HTTPSrv struct {
*base.HTTPSrv
kbc *kbchat.API
oauth *oauth2.Config
db *DB
handler *Handler
reminderScheduler ReminderScheduler
}
func NewHTTPSrv(
stats *base.StatsRegistry,
kbc *kbchat.API,
debugConfig *base.ChatDebugOutputConfig,
db *DB,
oauthConfig *oauth2.Config,
reminderScheduler ReminderScheduler,
handler *Handler,
) *HTTPSrv {
h := &HTTPSrv{
kbc: kbc,
oauth: oauthConfig,
db: db,
handler: handler,
reminderScheduler: reminderScheduler,
}
h.HTTPSrv = base.NewHTTPSrv(stats, debugConfig)
http.HandleFunc("/gcalbot", h.configHandler)
http.HandleFunc("/gcalbot/healthcheck", h.healthCheckHandler)
http.HandleFunc("/gcalbot/home", h.homeHandler)
http.HandleFunc("/gcalbot/oauth", h.oauthHandler)
http.HandleFunc("/gcalbot/image/logo", h.logoHandler)
http.HandleFunc("/gcalbot/image/screenshot", h.screenshotHandler)
http.HandleFunc("/gcalbot/events/webhook", h.handleEventUpdateWebhook)
return h
}
var reminders = []ReminderType{
{"0", "At time of event"},
{"1", "1 minute before"},
{"5", "5 minutes before"},
{"10", "10 minutes before"},
{"15", "15 minutes before"},
{"30", "30 minutes before"},
{"60", "60 minutes before"},
}
func (h *HTTPSrv) healthCheckHandler(w http.ResponseWriter, r *http.Request) {}
func (h *HTTPSrv) configHandler(w http.ResponseWriter, r *http.Request) {
h.Stats.Count("config")
var err error
defer func() {
if err != nil {
h.Errorf("error in configHandler: %s", err)
h.showConfigError(w)
}
}()
err = r.ParseForm()
if err != nil {
return
}
keybaseUsername, keybaseConvID, ok := h.authUser(w, r)
if !ok {
h.showLoginInstructions(w)
return
}
keybaseConv, err := h.handler.kbc.GetConversation(keybaseConvID)
if err != nil {
return
}
isAdmin, err := base.IsAdmin(h.kbc, keybaseUsername, keybaseConv.Channel)
if err != nil {
return
} else if !isAdmin {
// should only be able to configure notifications if isAdmin
h.showConfigError(w)
return
}
isPrivate := base.IsDirectPrivateMessage(h.kbc.GetUsername(), keybaseUsername, keybaseConv.Channel)
accountNickname := r.Form.Get("account")
calendarID := r.Form.Get("calendar")
previousAccountNickname := r.Form.Get("previous_account")
previousCalendarID := r.Form.Get("previous_calendar")
reminderInput := r.Form.Get("reminder")
inviteInput := r.Form.Get("invite")
accounts, err := h.db.GetAccountListForUsername(keybaseUsername)
if err != nil {
return
}
if len(accounts) == 0 {
h.servePage(w, "account help", AccountHelpPage{
Title: "gcalbot | config",
})
return
}
page := ConfigPage{
Title: "gcalbot | config",
ConvID: keybaseConvID,
ConvHelpText: GetConvHelpText(keybaseConv.Channel, false),
ConvIsPrivate: isPrivate,
Account: accountNickname,
Accounts: accounts,
Reminders: reminders,
}
if accountNickname == "" {
h.servePage(w, "config", page)
return
}
var selectedAccount *Account
for _, account := range accounts {
if account.AccountNickname == accountNickname {
selectedAccount = account
}
}
if selectedAccount == nil {
h.showConfigError(w)
return
}
srv, err := GetCalendarService(selectedAccount, h.oauth)
if err != nil {
return
}
calendarList, err := srv.CalendarList.List().Do()
if err != nil {
return
}
page.Calendars = calendarList.Items
if accountNickname != previousAccountNickname {
// if the account has changed, clear the calendar
calendarID = ""
previousCalendarID = ""
}
// default to the primary calendar
if calendarID == "" {
for _, calendarItem := range calendarList.Items {
if calendarItem.Primary {
calendarID = calendarItem.Id
}
}
}
page.CalendarID = calendarID
var subscriptions []*Subscription
subscriptions, err = h.db.GetSubscriptions(selectedAccount, calendarID, keybaseConvID)
if err != nil {
return
}
for _, subscription := range subscriptions {
switch subscription.Type {
case SubscriptionTypeInvite:
page.Invite = true
case SubscriptionTypeReminder:
page.Reminder = strconv.Itoa(GetMinutesFromDuration(subscription.DurationBefore))
}
}
// if the calendar hasn't changed, update the settings
if calendarID == previousCalendarID {
h.Stats.Count("config - update")
if (!page.Invite && page.Reminder == "") && (inviteInput != "" || reminderInput != "") {
// this update must open a new webhook channel, do that now and if it errors, fail early
err = h.handler.createEventChannel(selectedAccount, calendarID)
if err != nil {
switch typedErr := err.(type) {
case *googleapi.Error:
for _, errorItem := range typedErr.Errors {
if errorItem.Reason == "pushNotSupportedForRequestedResource" {
page.PushNotAllowed = true
h.servePage(w, "config", page)
err = nil // clear error
return
}
}
}
return
}
}
// the conv must be private (direct message) for the user to subscribe to invites
if isPrivate {
h.Stats.Count("config - update - direct message")
inviteSubscription := Subscription{
CalendarID: calendarID,
KeybaseConvID: keybaseConvID,
Type: SubscriptionTypeInvite,
}
var invite bool
if inviteInput != "" {
invite = true
}
if page.Invite && !invite {
// remove invite subscription
h.Stats.Count("config - update - invite - remove")
err = h.handler.removeSubscription(selectedAccount, inviteSubscription)
if err != nil {
return
}
} else if !page.Invite && invite {
// create invite subscription
h.Stats.Count("config - update - invite - create")
_, err = h.handler.createSubscription(selectedAccount, inviteSubscription)
if err != nil {
return
}
}
page.Invite = invite
} else {
h.Stats.Count("config - update - team")
}
if page.Reminder != "" {
// remove old reminder subscription
h.Stats.Count("config - update - reminder - remove")
var oldMinutesBefore int
oldMinutesBefore, err = strconv.Atoi(page.Reminder)
if err != nil {
return
}
err = h.handler.removeSubscription(selectedAccount, Subscription{
CalendarID: calendarID,
KeybaseConvID: keybaseConvID,
DurationBefore: GetDurationFromMinutes(oldMinutesBefore),
Type: SubscriptionTypeReminder,
})
if err != nil {
return
}
}
if reminderInput != "" {
// create new reminder subscription
h.Stats.Count("config - update - reminder - create")
var newMinutesBefore int
newMinutesBefore, err = strconv.Atoi(reminderInput)
if err != nil {
return
}
_, err = h.handler.createSubscription(selectedAccount, Subscription{
CalendarID: calendarID,
KeybaseConvID: keybaseConvID,
DurationBefore: GetDurationFromMinutes(newMinutesBefore),
Type: SubscriptionTypeReminder,
})
if err != nil {
return
}
}
page.Reminder = reminderInput
page.Updated = true
}
h.servePage(w, "config", page)
}
func (h *HTTPSrv) showConfigError(w http.ResponseWriter) {
h.Stats.Count("configError")
w.WriteHeader(http.StatusInternalServerError)
h.servePage(w, "error", ErrorPage{
Title: "gcalbot | error",
})
}
func (h *HTTPSrv) homeHandler(w http.ResponseWriter, r *http.Request) {
h.Stats.Count("home")
homePage := `Google Calendar Bot is a <a href="https://keybase.io">Keybase</a> chatbot
which connects with your Google calendar to notify you of invites, upcoming events and more!
<div style="padding-top:25px;">
<img style="width:300px;" src="/gcalbot/image/screenshot">
</div>
`
if _, err := w.Write(base.MakeOAuthHTML("gcalbot", "home", homePage, "/gcalbot/image/logo")); err != nil {
h.Errorf("homeHandler: unable to write: %v", err)
}
}
func (h *HTTPSrv) logoHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Cache-Control", "max-age=86400")
dat, _ := base64.StdEncoding.DecodeString(base.Images["logo"])
if _, err := io.Copy(w, bytes.NewBuffer(dat)); err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
}
func (h *HTTPSrv) screenshotHandler(w http.ResponseWriter, r *http.Request) {
dat, _ := base64.StdEncoding.DecodeString(screenshot)
if _, err := io.Copy(w, bytes.NewBuffer(dat)); err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
}
func (h *HTTPSrv) showLoginInstructions(w http.ResponseWriter) {
h.Stats.Count("loginInstructions")
w.WriteHeader(http.StatusForbidden)
h.servePage(w, "login", LoginPage{Title: "gcalbot | login"})
}
func (h *HTTPSrv) authUser(w http.ResponseWriter, r *http.Request) (keybaseUsername string, keybaseConvID chat1.ConvIDStr, ok bool) {
h.Stats.Count("authUser")
keybaseUsername = r.Form.Get("username")
token := r.Form.Get("token")
keybaseConvID = chat1.ConvIDStr(r.Form.Get("conv_id"))
if keybaseConvID == "" {
return "", "", false
}
if keybaseUsername == "" || token == "" {
cookie, err := r.Cookie("auth")
if err != nil {
h.Debug("error getting cookie: %s", err)
return "", "", false
}
if cookie == nil {
return "", "", false
}
auth := cookie.Value
toks := strings.Split(auth, ":")
if len(toks) != 2 {
h.Debug("malformed auth cookie", auth)
return "", "", false
}
keybaseUsername = toks[0]
token = toks[1]
}
realToken := h.handler.LoginToken(keybaseUsername)
if !hmac.Equal([]byte(realToken), []byte(token)) {
h.Debug("invalid auth token")
return "", "", false
}
http.SetCookie(w, &http.Cookie{
Name: "auth",
Value: fmt.Sprintf("%s:%s", keybaseUsername, token),
Expires: time.Now().Add(8760 * time.Hour),
})
return keybaseUsername, keybaseConvID, true
}
|
package aoc2020
import (
"testing"
aoc "github.com/janreggie/aoc/internal"
"github.com/stretchr/testify/assert"
)
func Test_readBoardingPass(t *testing.T) {
assert := assert.New(t)
// use example board passes
testCases := []struct {
input string
seat planeSeat
seatID int
}{
{"FBFBBFFRLR", planeSeat{row: 44, col: 5}, 357},
{"BFFFBBFRRR", planeSeat{row: 70, col: 7}, 567},
{"FFFBBBFRRR", planeSeat{row: 14, col: 7}, 119},
{"BBFFBBFRLL", planeSeat{row: 102, col: 4}, 820},
}
for _, tt := range testCases {
actual, err := readBoardingPass(tt.input)
assert.NoError(err)
assert.Equal(tt.seat, actual)
assert.Equal(tt.seatID, actual.seatID())
}
}
func TestDay05(t *testing.T) {
assert := assert.New(t)
testCases := []aoc.TestCase{
{
Details: "Y2020D05 my input",
Input: day05myInput,
Result1: "930",
Result2: "515",
},
}
for _, tt := range testCases {
tt.Test(Day05, assert)
}
}
|
package datatransformer
import (
"sync"
"time"
"github.com/google/uuid"
)
type DataTransformerManager struct {
sync.Mutex
instances map[uuid.UUID]*DataTransformer
}
func (d *DataTransformerManager) NewTransformer() uuid.UUID {
d.Lock()
defer d.Unlock()
id := uuid.New()
d.instances[id] = &DataTransformer{}
return id
}
func (d *DataTransformerManager) GetTransformer(id uuid.UUID) *DataTransformer {
d.Lock()
defer d.Unlock()
return d.instances[id]
}
func (d *DataTransformerManager) RemoveTransformer(id uuid.UUID) {
d.Lock()
defer d.Unlock()
delete(d.instances, id)
}
type Value struct {
ValueString string
ValueInt int64
ValueFloat float64
ValueDuration time.Duration
ValueTime time.Time
PrintedValue string
}
type DataTransformer struct {
DataPoints []DataPoint
XAxises []Axis
YAxises []Axis
}
type DataField struct {
Value
}
type DataPoint struct {
Fields map[FieldName]DataField
}
type FieldName struct {
}
type Axis struct {
Labels Labels
Name string
}
type Label struct {
Value
}
type Labels struct {
ForField FieldName
Labels []Label
}
|
package examples
import (
"encoding/json"
"fmt"
)
type user struct {
Id int `json:"id"`
Name string `json:"name"`
City city `json:"city"`
}
type city struct {
Id int `json:"id"`
Name string `json:"name"`
}
var userJohn = user{
Id: 1,
Name: "John",
City: city{
Id: 3,
Name: "London",
},
}
func JsonStart() {
jsonString := encode()
fmt.Printf("JSON строка:\n %s \n", jsonString)
iUser := decode(jsonString, &user{})
u := iUser.(*user)
fmt.Printf("Структура из JSON:\n %+v \n", u)
}
func encode() string {
jsonBytes, err := json.Marshal(userJohn)
if err != nil {
fmt.Errorf(err.Error())
}
return string(jsonBytes)
}
func decode(jsonString string, s interface{}) interface{} {
userBill := s
jsonBytes := []byte(jsonString)
if err := json.Unmarshal(jsonBytes, &userBill); err != nil {
fmt.Errorf("%s", err)
}
return userBill
}
|
package util
import (
"os"
)
// DirExists checks if the path exists and is a directory
func DirExists(path string) (bool, error) {
info, err := os.Stat(path)
if err == nil {
return info.IsDir(), nil
} else if os.IsNotExist(err) {
return false, nil
}
return false, err
}
// MkdirAll creates a directory named path, along with any necessary parents.
func MkdirAll(path string) error {
return os.MkdirAll(path, os.ModePerm)
}
|
package main
import (
"fmt"
"math/rand"
"time"
)
func main() {
fmt.Println("Hello, world!")
rand.Seed(time.Now().Unix())
// set numbers
numbers := [5]int{rand.Intn(10), rand.Intn(10), rand.Intn(10), rand.Intn(10), rand.Intn(10)}
fmt.Println(numbers)
sort(numbers[:])
}
func sort(numbers []int) {
// var for length of array
arrl := len(numbers) - 1
// set a var; while a is less than length of array; add 1 to a
for a := 0; a < arrl; a++ {
// set b var; while b is less than array length minus a; add 1 to b
for b := 0; b < arrl-a; b++ {
// if first in set is larger than second in set
if numbers[b] > numbers[b+1] {
// switch sets
numbers[b], numbers[b+1] = numbers[b+1], numbers[b]
}
}
}
fmt.Println(numbers)
}
|
package ipam
import (
"context"
"encoding/json"
"fmt"
"math/bits"
"math/rand"
"net"
"sort"
"sync"
"time"
g8sv1alpha1 "github.com/giantswarm/apiextensions/pkg/apis/cluster/v1alpha1"
"github.com/giantswarm/apiextensions/pkg/apis/provider/v1alpha1"
"github.com/giantswarm/apiextensions/pkg/clientset/versioned"
"github.com/giantswarm/ipam"
"github.com/giantswarm/microerror"
"github.com/giantswarm/operatorkit/controller/context/reconciliationcanceledcontext"
"golang.org/x/sync/errgroup"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
cmav1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1"
"sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset"
"github.com/giantswarm/aws-operator/service/controller/legacy/v28/controllercontext"
"github.com/giantswarm/aws-operator/service/controller/legacy/v28/key"
"github.com/giantswarm/aws-operator/service/network"
)
func init() {
// Seed RNG for AZ shuffling.
rand.Seed(time.Now().UnixNano())
}
// EnsureCreated allocates guest cluster network segment. It gathers existing
// subnets from existing AWSConfig/Status objects and existing VPCs from AWS.
func (r *Resource) EnsureCreated(ctx context.Context, obj interface{}) error {
var cr v1alpha1.AWSConfig
{
r.logger.LogCtx(ctx, "level", "debug", "message", "fetching latest version of custom resource")
oldObj, err := key.ToCustomObject(obj)
if err != nil {
return microerror.Mask(err)
}
newObj, err := r.g8sClient.ProviderV1alpha1().AWSConfigs(oldObj.GetNamespace()).Get(oldObj.GetName(), metav1.GetOptions{})
if err != nil {
return microerror.Mask(err)
}
cr = *newObj
r.logger.LogCtx(ctx, "level", "debug", "message", "fetched latest version of custom resource")
}
r.logger.LogCtx(ctx, "level", "debug", "message", "finding out if subnet needs to be allocated for cluster")
if key.StatusNetworkCIDR(cr) == "" {
var subnetCIDR net.IPNet
{
r.logger.LogCtx(ctx, "level", "debug", "message", "allocating cluster subnet CIDR")
randomAZs, err := r.selectRandomAZs(key.SpecAvailabilityZones(cr))
if err != nil {
return microerror.Mask(err)
}
callbacks := network.AllocationCallbacks{
GetReservedNetworks: r.getReservedNetworks,
PersistAllocatedNetwork: r.persistAllocatedNetwork(cr, randomAZs),
}
subnetCIDR, err = r.networkAllocator.Allocate(ctx, r.networkRange, r.allocatedSubnetMask, callbacks)
if err != nil {
return microerror.Mask(err)
}
r.logger.LogCtx(ctx, "level", "debug", "message", fmt.Sprintf("updated CR status with allocated cluster subnet CIDR %#q", subnetCIDR))
r.logger.LogCtx(ctx, "level", "debug", "message", "canceling reconciliation")
reconciliationcanceledcontext.SetCanceled(ctx)
}
} else {
r.logger.LogCtx(ctx, "level", "debug", "message", "found out subnet doesn't need to be allocated for cluster")
}
return nil
}
func (r *Resource) getReservedNetworks(ctx context.Context) ([]net.IPNet, error) {
var err error
var mutex sync.Mutex
var reservedSubnets []net.IPNet
g := &errgroup.Group{}
g.Go(func() error {
r.logger.LogCtx(ctx, "level", "debug", "message", "finding allocated subnets from VPCs")
subnets, err := getVPCSubnets(ctx)
if err != nil {
return microerror.Mask(err)
}
mutex.Lock()
reservedSubnets = append(reservedSubnets, subnets...)
mutex.Unlock()
r.logger.LogCtx(ctx, "level", "debug", "message", "found allocated subnets from VPCs")
return nil
})
g.Go(func() error {
r.logger.LogCtx(ctx, "level", "debug", "message", "finding allocated subnets from AWSConfigs")
subnets, err := getAWSConfigSubnets(r.g8sClient)
if err != nil {
return microerror.Mask(err)
}
mutex.Lock()
reservedSubnets = append(reservedSubnets, subnets...)
mutex.Unlock()
r.logger.LogCtx(ctx, "level", "debug", "message", "found allocated subnets from AWSConfigs")
return nil
})
g.Go(func() error {
r.logger.LogCtx(ctx, "level", "debug", "message", "finding allocated subnets from Cluster CRs")
subnets, err := getClusterSubnets(r.cmaClient)
if err != nil {
return microerror.Mask(err)
}
mutex.Lock()
reservedSubnets = append(reservedSubnets, subnets...)
mutex.Unlock()
r.logger.LogCtx(ctx, "level", "debug", "message", "found allocated subnets from Cluster CRs")
return nil
})
err = g.Wait()
if err != nil {
return nil, microerror.Mask(err)
}
reservedSubnets = ipam.CanonicalizeSubnets(r.networkRange, reservedSubnets)
return reservedSubnets, nil
}
func (r *Resource) persistAllocatedNetwork(cr v1alpha1.AWSConfig, azs []string) func(ctx context.Context, subnet net.IPNet) error {
return func(ctx context.Context, subnet net.IPNet) error {
return r.splitAndPersistReservedSubnet(ctx, cr, subnet, azs)
}
}
func (r *Resource) splitAndPersistReservedSubnet(ctx context.Context, cr v1alpha1.AWSConfig, subnet net.IPNet, azs []string) error {
statusAZs, err := splitSubnetToStatusAZs(subnet, azs)
if err != nil {
return microerror.Mask(err)
}
r.logger.LogCtx(ctx, "level", "debug", "message", "updating CR status to persist network allocation and chosen availability zones")
cr.Status.Cluster.Network.CIDR = subnet.String()
cr.Status.AWS.AvailabilityZones = statusAZs
_, err = r.g8sClient.ProviderV1alpha1().AWSConfigs(cr.Namespace).UpdateStatus(&cr)
if err != nil {
return microerror.Mask(err)
}
r.logger.LogCtx(ctx, "level", "debug", "message", "updated CR status to persist network allocation and chosen availability zones")
return nil
}
func (r *Resource) selectRandomAZs(n int) ([]string, error) {
if n > len(r.availabilityZones) {
return nil, microerror.Maskf(invalidParameterError, "requested nubmer of AZs %d is bigger than number of available AZs %d", n, len(r.availabilityZones))
}
// availabilityZones must be copied so that original slice doesn't get shuffled.
shuffledAZs := make([]string, len(r.availabilityZones))
copy(shuffledAZs, r.availabilityZones)
rand.Shuffle(len(shuffledAZs), func(i, j int) {
shuffledAZs[i], shuffledAZs[j] = shuffledAZs[j], shuffledAZs[i]
})
shuffledAZs = shuffledAZs[0:n]
sort.Strings(shuffledAZs)
return shuffledAZs, nil
}
func getAWSConfigSubnets(g8sClient versioned.Interface) ([]net.IPNet, error) {
awsConfigList, err := g8sClient.ProviderV1alpha1().AWSConfigs(metav1.NamespaceAll).List(metav1.ListOptions{})
if err != nil {
return nil, microerror.Mask(err)
}
var results []net.IPNet
for _, ac := range awsConfigList.Items {
cidr := key.StatusNetworkCIDR(ac)
if cidr == "" {
continue
}
_, n, err := net.ParseCIDR(cidr)
if err != nil {
return nil, microerror.Mask(err)
}
results = append(results, *n)
}
return results, nil
}
func getClusterSubnets(cmaClient clientset.Interface) ([]net.IPNet, error) {
clusterList, err := cmaClient.Cluster().Clusters(metav1.NamespaceAll).List(metav1.ListOptions{})
if err != nil {
return nil, microerror.Mask(err)
}
var results []net.IPNet
for _, c := range clusterList.Items {
cidr := statusClusterNetworkCIDR(c)
if cidr == "" {
continue
}
_, n, err := net.ParseCIDR(cidr)
if err != nil {
return nil, microerror.Mask(err)
}
results = append(results, *n)
}
return results, nil
}
func getVPCSubnets(ctx context.Context) ([]net.IPNet, error) {
cc, err := controllercontext.FromContext(ctx)
if err != nil {
return nil, microerror.Mask(err)
}
out, err := cc.Client.TenantCluster.AWS.EC2.DescribeSubnets(nil)
if err != nil {
return nil, microerror.Mask(err)
}
var results []net.IPNet
for _, subnet := range out.Subnets {
_, n, err := net.ParseCIDR(*subnet.CidrBlock)
if err != nil {
return nil, microerror.Mask(err)
}
results = append(results, *n)
}
return results, nil
}
func statusClusterNetworkCIDR(cluster cmav1alpha1.Cluster) string {
return mustG8sClusterStatusFromCMAClusterStatus(cluster.Status.ProviderStatus).Provider.Network.CIDR
}
func mustG8sClusterStatusFromCMAClusterStatus(cmaStatus *runtime.RawExtension) g8sv1alpha1.AWSClusterStatus {
var g8sStatus g8sv1alpha1.AWSClusterStatus
{
if cmaStatus == nil {
return g8sStatus
}
if len(cmaStatus.Raw) == 0 {
return g8sStatus
}
err := json.Unmarshal(cmaStatus.Raw, &g8sStatus)
if err != nil {
panic(err)
}
}
return g8sStatus
}
// splitSubnetToStatusAZs splits subnet such that each AZ gets private and
// public network. Size of these subnets depends on subnet.Mask and number of
// AZs.
func splitSubnetToStatusAZs(subnet net.IPNet, AZs []string) ([]v1alpha1.AWSConfigStatusAWSAvailabilityZone, error) {
subnets, err := splitNetwork(subnet, uint(len(AZs)*2))
if err != nil {
return nil, microerror.Mask(err)
}
var statusAZs []v1alpha1.AWSConfigStatusAWSAvailabilityZone
subnetIdx := 0
for _, az := range AZs {
private := subnets[subnetIdx]
subnetIdx++
public := subnets[subnetIdx]
subnetIdx++
statusAZ := v1alpha1.AWSConfigStatusAWSAvailabilityZone{
Name: az,
Subnet: v1alpha1.AWSConfigStatusAWSAvailabilityZoneSubnet{
Private: v1alpha1.AWSConfigStatusAWSAvailabilityZoneSubnetPrivate{
CIDR: private.String(),
},
Public: v1alpha1.AWSConfigStatusAWSAvailabilityZoneSubnetPublic{
CIDR: public.String(),
},
},
}
statusAZs = append(statusAZs, statusAZ)
}
return statusAZs, nil
}
// calculateSubnetMask calculates new subnet mask to accommodate n subnets.
func calculateSubnetMask(networkMask net.IPMask, n uint) (net.IPMask, error) {
if n == 0 {
return nil, microerror.Maskf(invalidParameterError, "divide by zero")
}
// Amount of bits needed to accommodate enough subnets for public and
// private subnet in each AZ.
subnetBitsNeeded := bits.Len(n - 1)
maskOnes, maskBits := networkMask.Size()
if subnetBitsNeeded > maskBits-maskOnes {
return nil, microerror.Maskf(invalidParameterError, "no room in network mask %s to accommodate %d subnets", networkMask.String(), n)
}
return net.CIDRMask(maskOnes+subnetBitsNeeded, maskBits), nil
}
// splitNetwork returns n subnets from network.
func splitNetwork(network net.IPNet, n uint) ([]net.IPNet, error) {
mask, err := calculateSubnetMask(network.Mask, n)
if err != nil {
return nil, microerror.Mask(err)
}
var subnets []net.IPNet
for i := uint(0); i < n; i++ {
subnet, err := ipam.Free(network, mask, subnets)
if err != nil {
return nil, microerror.Mask(err)
}
subnets = append(subnets, subnet)
}
return subnets, nil
}
|
package game
import (
"github.com/tanema/amore"
"github.com/tanema/amore/keyboard"
)
// World encapsulates the whole environment
type World struct {
size int
terrain [][]*Cell
camera *Camera
timeOfDay float32
sin float32
sky *Sky
player *Voxel
}
const (
worldSaturation float32 = 0.99
baseShine float32 = 0.4
playerShineRange float32 = 25
sunSize float32 = 60
moonSize float32 = 30
)
// NewWorld generates a new world to render
func NewWorld(worldSize, visible, iterations int, smooth bool) *World {
return &World{
size: worldSize,
terrain: generateTerrain(worldSize, iterations, smooth),
camera: newCamera(visible),
sky: newSky(),
player: newVoxel(0, 0, 0, 1, 1, 0, worldSaturation, 0.5, true),
}
}
func (world *World) getCell(x, y int) *Cell {
// if x >= len(world.terrain) || x < 0 || y >= len(world.terrain[x]) || y < 0 {
// return nil
// }
i := (x + world.size) % world.size
j := (y + world.size) % world.size
return world.terrain[i][j]
}
// Update updates a step in the world
func (world *World) Update(dt float32) {
world.camera.update()
world.updateInput()
world.timeOfDay += dt / 10
world.sin = sin(world.timeOfDay)
world.sky.update(world)
world.camera.forVisible(world, func(cell *Cell, x, y, distX, distY float32) {
cell.update(world, x, y, distX, distY)
})
}
// Draw draws one frame
func (world *World) Draw() {
world.sky.draw(world)
world.camera.forVisible(world, func(cell *Cell, x, y, distX, distY float32) {
cell.draw(world.camera, x, y)
if x == world.player.x && y == world.player.y {
world.player.draw(world.camera, x, y)
}
})
}
func (world *World) updateInput() {
if keyboard.IsDown(keyboard.KeyEscape) {
amore.Quit()
}
if keyboard.IsDown(keyboard.KeyLeft) {
world.player.x--
} else if keyboard.IsDown(keyboard.KeyRight) {
world.player.x++
}
if keyboard.IsDown(keyboard.KeyUp) {
world.player.y--
} else if keyboard.IsDown(keyboard.KeyDown) {
world.player.y++
}
world.player.x = float32((int(world.player.x) + world.size) % world.size)
world.player.y = float32((int(world.player.y) + world.size) % world.size)
world.camera.lookAt(world.player.x, world.player.y)
cell := world.getCell(int(world.player.x), int(world.player.y))
if keyboard.IsDown(keyboard.KeySpace) {
cell.setZ(cell.getZ() + 1)
} else if keyboard.IsDown(keyboard.KeyC) {
cell.setZ(cell.getZ() - 1)
}
world.player.z = cell.getZ()
if keyboard.IsDown(keyboard.KeyV) {
world.camera.visible++
} else if keyboard.IsDown(keyboard.KeyB) {
world.camera.visible--
}
}
|
package totp
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/binary"
"fmt"
"hash"
"time"
)
const (
VERSION = "1.0.0"
)
type Token struct {
key []byte
epoch time.Time
interval time.Duration
hash func() hash.Hash
}
func New(key []byte) Token {
token := Token{
key: key,
epoch: time.Unix(0, 0).UTC(),
interval: 30 * time.Second,
hash: sha256.New,
}
return token
}
func NewHashToken(key []byte, h func() hash.Hash) Token {
token := New(key)
token.hash = h
return token
}
func (token Token) Generate(t time.Time, length int) string {
c := (t.Unix() - token.epoch.Unix()) / int64(token.interval.Seconds())
h := hmac.New(token.hash, token.key)
var b bytes.Buffer
binary.Write(&b, binary.BigEndian, c)
h.Write(b.Bytes())
v := h.Sum(nil)
o := v[len(v)-1] & 0xf
val := (int32(v[o]&0x7f)<<24 |
int32(v[o+1])<<16 |
int32(v[o+2])<<8 |
int32(v[o+3])) % 1000000000
return fmt.Sprintf("%010d", val)[10-length : 10]
}
func (t Token) String() string {
return t.Generate(time.Now().UTC(), 6)
}
|
package app
import (
"bytes"
"fmt"
"os/exec"
"strings"
)
type Error interface {
Error() string
}
func ExtractVideoDevices() ([]string, Error) {
cmd := exec.Command("imagesnap", "-l")
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
err := cmd.Run()
if err != nil {
fmt.Println(fmt.Sprint(err) + ": " + stderr.String())
return []string{}, err
}
raw := strings.Split(out.String(), "\n")
result := []string{}
for v := 0; v < len(raw); v++ {
if raw[v] != "Video Devices:" && raw[v] != "" {
result = append(result, raw[v])
}
}
return result, nil
}
func contains(slice []string, item string) bool {
set := make(map[string]struct{}, len(slice))
for _, s := range slice {
set[s] = struct{}{}
}
_, ok := set[item]
return ok
}
func ExtractPreferableDeviceName(input []string) string {
if contains(input, "HD Pro Webcam C920") {
return "HD Pro Webcam C920"
}
if contains(input, "FaceTime HD Camera") {
return "FaceTime HD Camera"
}
return input[0]
}
|
package main
import (
"context"
"errors"
"fmt"
"io"
"os"
"github.com/werf/werf/pkg/docker"
"github.com/werf/werf/pkg/util"
"github.com/werf/werf/pkg/buildah"
"github.com/werf/werf/pkg/werf"
)
var errUsage = errors.New("./buildah-test {auto|native-rootless|docker-with-fuse} DOCKERFILE_PATH [CONTEXT_PATH]")
func do(ctx context.Context) error {
var mode buildah.Mode
if v := os.Getenv("BUILDAH_TEST_MODE"); v != "" {
mode = buildah.Mode(v)
} else {
if len(os.Args) < 2 {
return errUsage
}
mode = buildah.ResolveMode(buildah.Mode(os.Args[1]))
os.Setenv("BUILDAH_TEST_MODE", string(mode))
}
shouldTerminate, err := buildah.ProcessStartupHook(mode)
if err != nil {
return fmt.Errorf("buildah process startup hook failed: %s", err)
}
if shouldTerminate {
return nil
}
if err := werf.Init("", ""); err != nil {
return fmt.Errorf("unable to init werf subsystem: %s", err)
}
mode = buildah.ResolveMode(mode)
fmt.Printf("Using buildah mode: %s\n", mode)
if mode == buildah.ModeDockerWithFuse {
if err := docker.Init(ctx, "", false, false, ""); err != nil {
return err
}
}
if len(os.Args) < 3 {
return errUsage
}
var dockerfilePath = os.Args[2]
var contextDir string
if len(os.Args) > 3 {
contextDir = os.Args[3]
}
b, err := buildah.NewBuildah(mode, buildah.BuildahOpts{})
if err != nil {
return fmt.Errorf("unable to create buildah client: %s", err)
}
dockerfileData, err := os.ReadFile(dockerfilePath)
if err != nil {
return fmt.Errorf("error reading %q: %s", dockerfilePath, err)
}
var contextTar io.Reader
if contextDir != "" {
contextTar = util.ReadDirAsTar(contextDir)
}
imageId, err := b.BuildFromDockerfile(ctx, dockerfileData, buildah.BuildFromDockerfileOpts{
ContextTar: contextTar,
CommonOpts: buildah.CommonOpts{
LogWriter: os.Stdout,
},
})
if err != nil {
return fmt.Errorf("BuildFromDockerfile failed: %s", err)
}
fmt.Fprintf(os.Stdout, "INFO: built imageId is %s\n", imageId)
return nil
}
func main() {
if err := do(context.Background()); err != nil {
fmt.Fprintf(os.Stderr, "Error: %s\n", err)
os.Exit(1)
}
}
|
package main
import (
"strconv"
"fmt"
)
// Your Codec object will be instantiated and called as such:
// Codec codec;
// codec.deserialize(codec.serialize(root));
const (
PlaceHolder = "#"
Delimiter = "|"
)
type TreeNode struct{
Val int
Left *TreeNode
Right *TreeNode
}
func serialize(root *TreeNode) string{
restr := ""
//序列化
if root == nil {
restr += PlaceHolder
return restr
}
return serilaizeDe(root)
}
func serilaizeDe(root *TreeNode) string{
str := ""
if root == nil {
str = str + PlaceHolder + Delimiter
return str
}
str = strconv.FormatInt(int64(root.Val),10) + Delimiter
return str + serilaizeDe(root.Left)+ serilaizeDe(root.Right)
}
func deserialize(data string) *TreeNode{
loc := 0
if string(data[loc]) == PlaceHolder {
return nil
}
return deserilaizeDe(data,&loc)
}
func deserilaizeDe ( data string,loc *int) *TreeNode{
lenStr := *loc
//数组结束或遇到占位返回 nil 同时指针后移两位
if string(data[*loc]) == PlaceHolder || *loc >= len(data){
*loc+=2
return nil
}
//正常数据指针后移
for string(data[*loc]) != Delimiter {
*loc ++
}
val,_ := strconv.ParseInt(string(data[lenStr:*loc]),10,64)
//取数后跨个分隔符
*loc ++
node := &TreeNode{
Val:int(val),
Left:deserilaizeDe(data,loc),
Right:deserilaizeDe(data,loc),
}
return node
}
func main(){
root := TreeNode{
Val:322,
Left:nil,
Right:nil,
}
leftNode_leftNode := TreeNode{
Val:9,
Left:nil,
Right:nil,
}
leftNode := TreeNode{
Val:2,
Left:&leftNode_leftNode,
Right:nil,
}
rightNode:= TreeNode{
Val:5,
Left:nil,
Right:nil,
}
root.Left = &leftNode
root.Right = &rightNode
str := serialize(&root)
fmt.Println(str)
root1 := deserialize(str)
fmt.Println(serialize(root1))
}
|
// time: O(n), space: O(n)
func spiralOrder(matrix [][]int) []int {
visited := make([][]int, len(matrix))
for i := 0; i < len(matrix); i++ {
visited[i] = make([]int, len(matrix[0]))
}
dirs := [][]int{[]int{0, 1}, []int{1, 0}, []int{0, -1}, []int{-1, 0}}
curDir := 0
curX, curY := 0, 0
res := make([]int, 0, len(matrix) + len(matrix[0]))
for {
res = append(res, matrix[curX][curY])
visited[curX][curY] = 1
nextX, nextY := curX + dirs[curDir][0], curY + dirs[curDir][1]
if nextX >= len(matrix) || nextX < 0 || nextY >= len(matrix[0]) || nextY < 0 || visited[nextX][nextY] == 1 {
curDir = (curDir + 1) % 4
nextX, nextY = curX + dirs[curDir][0], curY + dirs[curDir][1]
}
if nextX >= len(matrix) || nextX < 0 || nextY >= len(matrix[0]) || nextY < 0 || visited[nextX][nextY] == 1 {
break
}
curX, curY = nextX, nextY
}
return res
}
|
package sudoku
import (
"testing"
)
func TestSubsetCellsWithNUniquePossibilities(t *testing.T) {
grid := NewGrid()
grid, err := MutableLoadSDKFromFile(puzzlePath("hiddenpair1_filled.sdk"))
if err != nil {
t.Log("Failed to load hiddenpair1_filled.sdk")
t.Fail()
}
cells, nums := subsetCellsWithNUniquePossibilities(2, grid.Row(4))
if len(cells) != 1 {
t.Log("Didn't get right number of subset cells unique with n possibilities: ", len(cells))
t.FailNow()
}
cellList := cells[0]
numList := nums[0]
if len(cellList) != 2 {
t.Log("Number of subset cells did not match k: ", len(cellList))
t.Fail()
}
if cellList[0].Row != 4 || cellList[0].Col != 7 || cellList[1].Row != 4 || cellList[1].Col != 8 {
t.Log("Subset cells unique came back with wrong cells: ", cellList)
t.Fail()
}
if !numList.SameContentAs(IntSlice([]int{3, 5})) {
t.Error("Subset cells unique came back with wrong numbers: ", numList)
}
}
func TestHiddenPairRow(t *testing.T) {
options := solveTechniqueTestHelperOptions{
targetCells: []CellRef{{4, 7}, {4, 8}},
pointerCells: []CellRef{{4, 7}, {4, 8}},
targetSame: _GROUP_ROW,
targetGroup: 4,
targetNums: IntSlice([]int{7, 8, 2}),
pointerNums: IntSlice([]int{3, 5}),
description: "3 and 5 are only possible in (4,7) and (4,8) within row 4, which means that only those numbers could be in those cells",
}
humanSolveTechniqueTestHelper(t, "hiddenpair1_filled.sdk", "Hidden Pair Row", options)
techniqueVariantsTestHelper(t, "Hidden Pair Row")
}
func TestHiddenPairCol(t *testing.T) {
options := solveTechniqueTestHelperOptions{
transpose: true,
targetCells: []CellRef{{7, 4}, {8, 4}},
pointerCells: []CellRef{{7, 4}, {8, 4}},
targetSame: _GROUP_COL,
targetGroup: 4,
targetNums: IntSlice([]int{7, 8, 2}),
pointerNums: IntSlice([]int{3, 5}),
description: "3 and 5 are only possible in (7,4) and (8,4) within column 4, which means that only those numbers could be in those cells",
}
humanSolveTechniqueTestHelper(t, "hiddenpair1_filled.sdk", "Hidden Pair Col", options)
techniqueVariantsTestHelper(t, "Hidden Pair Col")
}
func TestHiddenPairBlock(t *testing.T) {
options := solveTechniqueTestHelperOptions{
targetCells: []CellRef{{4, 7}, {4, 8}},
pointerCells: []CellRef{{4, 7}, {4, 8}},
//Yes, in this case we want them to be the same row.
targetSame: _GROUP_ROW,
targetGroup: 4,
targetNums: IntSlice([]int{7, 8, 2}),
pointerNums: IntSlice([]int{3, 5}),
description: "3 and 5 are only possible in (4,7) and (4,8) within block 5, which means that only those numbers could be in those cells",
}
humanSolveTechniqueTestHelper(t, "hiddenpair1_filled.sdk", "Hidden Pair Block", options)
techniqueVariantsTestHelper(t, "Hidden Pair Block")
}
//TODO: Test HiddenTriple. The file I have on hand doesn't require the technique up front.
//TODO: Test HiddenQuad. The file I ahve on hand doesn't require the technique up front.
|
package api
import (
"encoding/json"
"io/ioutil"
"github.com/enjoy-web/ehttp"
"github.com/enjoy-web/ehttp/examples/restful-demo/model"
"github.com/gin-gonic/gin"
)
var DocPostBook = &ehttp.APIDocCommon{
Summary: "new a book",
Produces: []string{ehttp.Application_Json},
Consumes: []string{ehttp.Application_Json},
Parameters: map[string]ehttp.Parameter{
"version": ehttp.Parameter{InHeader: &ehttp.ValueInfo{Type: "string", Desc: "the version of api"}},
},
Request: &ehttp.Request{
Description: "the book info",
Model: &model.Book{},
},
Responses: map[int]ehttp.Response{
200: ehttp.Response{
Description: "successful operation",
Model: &model.Book{},
},
400: ehttp.Response{
Description: "failed operation",
Model: &model.ErrorMessage{},
},
},
}
func HandlePostBook(c *gin.Context, err error) {
if err != nil {
c.JSON(400, model.NewErrorMessage(model.ErrorCodeParameter, err))
return
}
body, err := ioutil.ReadAll(c.Request.Body)
if err != nil {
c.JSON(400, model.NewErrorMessage(model.ErrorCodeParameter, err))
return
}
book := &model.Book{}
err = json.Unmarshal(body, book)
if err != nil {
c.JSON(400, model.NewErrorMessage(model.ErrorCodeParameter, err))
return
}
c.JSON(200, book)
}
|
package main
import (
"fmt"
"time"
)
/*
reference:
https://docs.studygolang.com/pkg/time/
*/
/*
go 语言 time.go 时间库 常用的一些方法
//1、Now()返回当前本地时间
//2、Local()将时间转成本地时区,但指向同一时间点的Time。
//3、UTC()将时间转成UTC和零时区,但指向同一时间点的Time。
//4、Date()可以根据指定数值,返回一个本地或国际标准的时间格式。
//5、Parse()能将一个格式化的时间字符串解析成它所代表的时间。就是string转time
//6、Format()根据指定的时间格式,将时间格式化成文本。就是time转string
//7、String()将时间格式化成字符串,格式为:"2006-01-02 15:04:05.999999999 -0700 MST"
//8、Unix()将t表示为Unix时间(时间戳,一个int64整数),即从时间点January 1, 1970 UTC到时间点t所经过的时间(单位秒)。
//9、UnixNano()将t表示为Unix时间(时间戳,一个int64整数),即从时间点January 1, 1970 UTC到时间点t所经过的时间(单位纳秒)。
//10、Equal()判断时间是否相等
//11、Before()如果t代表的时间点在u之前,返回真;否则返回假。
//12、After()如果t代表的时间点在u之后,返回真;否则返回假。
//13、Date()返回时间点对应的年、月、日信息
//14、Year()返回时间点对应的年的信息
//15、Month()返回时间点对应的月的信息
//16、Day()返回时间点对应的日的信息
//17、Weekday()返回时间点对应的星期的信息
//18、Clock()返回时间点对应的时、分、秒信息
//19、Hour()返回时间点对应的小时的信息
//20、Minute()返回时间点对应的分的信息
//21、Second()返回时间点对应的秒的信息
//22、Nanosecond()返回时间点对应的纳秒的信息
//23、Sub()返回一个时间段t-u。
//24、Hours()将时间段表示为float64类型的小时数。
//25、Minutes()将时间段表示为float64类型的分钟数。
//26、Seconds()将时间段表示为float64类型的秒数。
//27、Nanoseconds()将时间段表示为int64类型的纳秒数,等价于int64(d)。
//28、String()返回时间段采用"72h3m0.5s"格式的字符串表示。
//29、ParseDuration解析一个时间段字符串。
//30、Add()返回时间点t+d。
//31、AddDate()返回增加了给出的年份、月份和天数的时间点Time。
*/
func main() {
time1 := time.Now()
testTime()
//time.Sleep(5 * time.Second)
time2 := time.Now()
//计算函数执行时间
fmt.Println(time2.Sub(time1).Seconds())
TestSleep()
}
func expensiveCall() {
fmt.Println("sleep 2 seconds")
time.Sleep(time.Second * 2)
}
func TestSleep() {
t0 := time.Now()
expensiveCall()
t1 := time.Now()
fmt.Printf("The call took %v to run.\n", t1.Sub(t0))
}
func testTime() {
//1、Now()返回当前本地时间
t := time.Now()
fmt.Println("1、", t)
//2、Local()将时间转成本地时区,但指向同一时间点的Time。
fmt.Println("2、", t.Local())
//3、UTC()将时间转成UTC和零时区,但指向同一时间点的Time。
fmt.Println("3、", t.UTC())
//4、Date()可以根据指定数值,返回一个本地或国际标准的时间格式。
t = time.Date(2018, time.January, 1, 1, 1, 1, 0, time.Local)
fmt.Printf("4、本地时间%s , 国际统一时间:%s \n", t, t.UTC())
//5、Parse()能将一个格式化的时间字符串解析成它所代表的时间。就是string转time
//预定义的ANSIC、UnixDate、RFC3339
//ANSIC = "Mon Jan _2 15:04:05 2006"//1 1 2 3 4 5 6
t, _ = time.Parse("2006-01-02 15:04:05", "2018-07-19 05:47:13")
fmt.Println("5、", t)
//6、Format()根据指定的时间格式,将时间格式化成文本。就是time转string
fmt.Println("6、", time.Now().Format("2006-01-02 15:04:05"))
//7、String()将时间格式化成字符串,格式为:"2006-01-02 15:04:05.999999999 -0700 MST"
fmt.Println("7、", time.Now().String())
//8、Unix()将t表示为Unix时间(时间戳,一个int64整数),即从时间点January 1, 1970 UTC到时间点t所经过的时间(单位秒)。
fmt.Println("8、", time.Now().Unix())
//9、UnixNano()将t表示为Unix时间(时间戳,一个int64整数),即从时间点January 1, 1970 UTC到时间点t所经过的时间(单位纳秒)。
fmt.Println("9、", time.Now().UnixNano())
//10、Equal()判断时间是否相等
fmt.Println("10、", t.Equal(time.Now()))
//11、Before()如果t代表的时间点在u之前,返回真;否则返回假。
fmt.Println("11、", t.Before(time.Now()))
//12、After()如果t代表的时间点在u之后,返回真;否则返回假。
fmt.Println("12、", t.After(time.Now()))
//13、Date()返回时间点对应的年、月、日信息
year, month, day := time.Now().Date()
fmt.Println("13、", year, month, day)
//14、Year()返回时间点对应的年的信息
fmt.Println("14、", time.Now().Year())
//15、Month()返回时间点对应的月的信息
fmt.Println("15、", time.Now().Month())
//16、Day()返回时间点对应的日的信息
fmt.Println("16、", time.Now().Day())
//17、Weekday()返回时间点对应的星期的信息
fmt.Println("17、", time.Now().Weekday())
//18、Clock()返回时间点对应的时、分、秒信息
hour, minute, second := time.Now().Clock()
fmt.Println("18、", hour, minute, second)
//19、Hour()返回时间点对应的小时的信息
fmt.Println("19、", time.Now().Hour())
//20、Minute()返回时间点对应的分的信息
fmt.Println("20、", time.Now().Minute())
//21、Second()返回时间点对应的秒的信息
fmt.Println("21、", time.Now().Second())
//22、Nanosecond()返回时间点对应的纳秒的信息
fmt.Println("22、", time.Now().Nanosecond())
//23、Sub()返回一个时间段t-u。
fmt.Println("23、", time.Now().Sub(time.Now()))
//24、Hours()将时间段表示为float64类型的小时数。
fmt.Println("24、", time.Now().Sub(time.Now()).Hours())
//25、Minutes()将时间段表示为float64类型的分钟数。
fmt.Println("25、", time.Now().Sub(time.Now()).Minutes())
//26、Seconds()将时间段表示为float64类型的秒数。
fmt.Println("26、", time.Now().Sub(time.Now()).Seconds())
//27、Nanoseconds()将时间段表示为int64类型的纳秒数,等价于int64(d)。
fmt.Println("27、", time.Now().Sub(time.Now()).Nanoseconds())
//28、String()返回时间段采用"72h3m0.5s"格式的字符串表示。
fmt.Println("28、", "时间间距:", t.Sub(time.Now()).String())
//29、ParseDuration解析一个时间段字符串。
d, _ := time.ParseDuration("1h30m")
fmt.Println("29、", d)
//30、Add()返回时间点t+d。
fmt.Println("30、", "交卷时间:", time.Now().Add(d))
//31、AddDate()返回增加了给出的年份、月份和天数的时间点Time。
fmt.Println("31、", "一年一个月零一天之后的日期:", time.Now().AddDate(1, 1, 1))
}
|
package model
import (
"github.com/go-xorm/xorm"
)
var (
initUserSql = `INSERT INTO user (address) VALUES (?)`
)
// Init user information,
func initUser(session *xorm.Session, userAddress string) (int64, error) {
// Execute SQL.
r, err := session.Exec(initUserSql, userAddress)
if err != nil {
return 0, err
}
// Get last insert id.
id, err := r.LastInsertId()
if err != nil {
return 0, err
}
return id, nil
}
|
package main
import (
"fmt"
"github.com/Cloud-Foundations/Dominator/dom/lib"
"github.com/Cloud-Foundations/Dominator/lib/log"
"github.com/Cloud-Foundations/Dominator/lib/srpc"
)
func fetchImageSubcommand(args []string, logger log.DebugLogger) error {
startTime := showStart("getSubClient()")
srpcClient := getSubClientRetry(logger)
defer srpcClient.Close()
showTimeTaken(startTime)
if err := fetchImage(srpcClient, args[0]); err != nil {
return fmt.Errorf("error fetching image: %s: %s", args[0], err)
}
return nil
}
func fetchImage(srpcClient *srpc.Client, imageName string) error {
imageServerAddress := fmt.Sprintf("%s:%d",
*imageServerHostname, *imageServerPortNum)
img, err := getImageRetry(imageServerAddress, imageName, timeoutTime)
if err != nil {
logger.Fatalf("Error getting image: %s\n", err)
}
subObj := lib.Sub{
Hostname: *subHostname,
Client: srpcClient,
}
return pollFetchAndPush(&subObj, img, imageServerAddress, timeoutTime, true,
logger)
}
|
package BLC
import (
"bytes"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/sha256"
"fmt"
ripemd1602 "golang.org/x/crypto/ripemd160"
"log"
)
const version = byte(0x00)
const addressCHecksumLen = 4
type Wallet struct {
// 1.私钥
PrivateKey ecdsa.PrivateKey
// 2.公钥(私钥生成的公钥)
PublicKey []byte
}
//创建钱包
func NewWallet() *Wallet {
//通过私要
privateKey, publicKey := newKeyPair()
return &Wallet{privateKey, publicKey}
}
func IsValidForAddress(address []byte) bool {
version_public_checksumBytes := Base58Decode(address)
checkSumBytes := version_public_checksumBytes[len(version_public_checksumBytes)-addressCHecksumLen:]
version_ripemd160 := version_public_checksumBytes[len(version_public_checksumBytes)-addressCHecksumLen:]
fmt.Println(len(checkSumBytes))
fmt.Println(len(version_ripemd160))
if bytes.Compare(checkSumBytes, version_ripemd160) == 0 {
return true
}
return false
}
func (w *Wallet) GetAddress() []byte {
// 1. hash160 先将PublicKey
ripemd160Hash := Ripemd160Hash(w.PublicKey)
version_ripemd160Hash := append([]byte{version}, ripemd160Hash...)
checkSumBytes := CheckSum(version_ripemd160Hash)
bashBytes := append(version_ripemd160Hash, checkSumBytes...)
return Base58Encode(bashBytes)
}
/**
* 返回 2次 sha256 后的数组 4位的
*/
func CheckSum(payload []byte) []byte {
firstSHA := sha256.Sum256(payload)
secondSHA := sha256.Sum256(firstSHA[:])
return secondSHA[:addressCHecksumLen]
}
func Ripemd160Hash(publicKey []byte) []byte {
//1. 256
hash256 := sha256.New()
hash256.Write(publicKey)
hash := hash256.Sum(nil)
// 2. 160
ripemd160 := ripemd1602.New()
ripemd160.Write(hash)
return ripemd160.Sum(nil)
}
// 通过私钥 产生 公角
func newKeyPair() (ecdsa.PrivateKey, []byte) {
//1.
curve := elliptic.P256()
private, err := ecdsa.GenerateKey(curve, rand.Reader)
if err != nil {
log.Panic(err)
}
pubKey := append(private.PublicKey.X.Bytes(), private.PublicKey.Y.Bytes()...)
return *private, pubKey
}
|
package main
import (
"fmt"
"strings"
"strconv"
)
func main() {
s := "hello world"
fmt.Println(strings.Contains(s, "hello"), strings.Contains(s, "?"))
fmt.Println(strings.Index(s, "o"))
ss := "1#2#345"
splitedStr := strings.Split(ss, "#")
fmt.Println(splitedStr)
fmt.Println(strings.Join(splitedStr, "#"))
fmt.Println(strings.HasPrefix(s, "he"), strings.HasSuffix(s,"ld"))
fmt.Println(strconv.Itoa(10))
fmt.Println(strconv.Atoi("711"))
fmt.Println(strconv.ParseBool("false"))
//浮点的精度有float32和float64
fmt.Println(strconv.ParseFloat("3.14", 32))
fmt.Println(strconv.ParseFloat("3.14", 64))
fmt.Println(strconv.FormatBool(true))
fmt.Println(strconv.FormatInt(123, 10))
fmt.Println(strconv.FormatInt(123, 2))
fmt.Println(strconv.FormatInt(20, 16))
}
|
package linkedstack
import (
"sync"
)
//node the type of the LinkedStack, actually it's a node of linkedlist
type node struct {
value interface{}
next *node
}
//LinkedStack the structure of LinkedStack
type LinkedStack struct {
length int
lock *sync.RWMutex
next *node
}
//NewLinkedStack creates a new LinkedStack
func NewLinkedStack() *LinkedStack {
return &LinkedStack{0, &sync.RWMutex{}, nil}
}
//IsEmpty return true if LinkedStack is empty,else false
func (s *LinkedStack) IsEmpty() bool {
return s.length == 0
}
//Push add an node to the top of the LinkedStack
func (s *LinkedStack) Push(t interface{}) {
s.lock.Lock()
defer s.lock.Unlock()
s.next = &node{t, s.next}
s.length++
}
//Pop remove an node from the top of the LinkedStack
func (s *LinkedStack) Pop() interface{} {
s.lock.Lock()
defer s.lock.Unlock()
if s.IsEmpty() {
return nil
}
tmp := s.next
s.next = s.next.next
s.length--
return tmp.value
}
//Peek return top node of the LinkedStack
func (s *LinkedStack) Peek() interface{} {
if s.IsEmpty() {
return nil
}
return s.next.value
}
|
package orderagregate
//Order agregate root for the order domain
type Order struct {
ID string `json:"id,omitempty" bson:"_id,omitempty"`
OrderItems []OrderItem `json:"orderItems" bson:"order_items"`
Status string `json:"status" bson:"status"`
}
//CreateOrder order
func CreateOrder(orderItem []OrderItem) Order {
return Order{
Status: "PENDING",
OrderItems: orderItem,
}
}
//Complete dfajdsfad
func (o *Order) Complete() {
o.Status = "COMPLETE"
}
//Reject changes the order status to rejected
func (o *Order) Reject() {
o.Status = "REJECT"
}
//Accept changes the order status to accept
func (o *Order) Accept() {
o.Status = "ACCEPT"
}
|
package leclog
import (
"fmt"
"log"
"time"
)
type timeOnlyLogWriter struct {
}
func (writer timeOnlyLogWriter) Write(bytes []byte) (int, error) {
return fmt.Print(time.Now().Format("15:04:05") + " " + string(bytes))
}
type Pattern int
const (
TimeOnly Pattern = iota
)
func SetLogPattern(pattern Pattern) {
switch pattern {
case TimeOnly:
log.SetFlags(0)
log.SetOutput(new(timeOnlyLogWriter))
default:
log.Printf("Unhandled logFormat: %v\n", pattern)
}
}
|
package main
import (
"bytes"
"testing"
"github.com/mumoshu/gosh"
"github.com/mumoshu/gosh/goshtest"
"github.com/stretchr/testify/assert"
)
func TestMain(t *testing.T) {
sh := New()
goshtest.Run(t, sh, func() {
t.Run("foo", func(t *testing.T) {
var stdout bytes.Buffer
err := sh.Run(t, "foo", "a", "b", gosh.WriteStdout(&stdout))
if err != nil {
t.Fatal(err)
}
assert.Equal(t, "running setup1\ndir=aa\na b\na b\n", stdout.String())
})
})
}
|
package main
import "fmt"
func main() {
nums := []int{0, 1, 0, 1, 0, 1, 99}
fmt.Println(singleNumber(nums))
}
func singleNumber(nums []int) int {
x, y := 0, 0
for _, v := range nums {
y = ^x & (y ^ v)
x = ^y & (x ^ v)
}
return y
}
|
package usecases
import (
"persons.com/api/domain/person"
)
type PersonUseCases interface {
FindById(id string) (*person.Person, error)
GetAll() ([]*person.Person, error)
Create(person *person.Person) error
}
//cache service port
type PersonsCacheService interface {
Set(key string, person *person.Person) error
SetAll(key string, persons []*person.Person) error
Get(key string) (*person.Person, error)
GetAll(key string) ([]*person.Person, error)
}
//validator port
type PersonValidator func(person *person.Person) error
type PersonUseCase struct {
personService person.PersonService
personCache PersonsCacheService
personEventService person.PersonEventsService
personValidator PersonValidator
}
func NewPersonUseCases(personService person.PersonService, personEventService person.PersonEventsService, personCache PersonsCacheService, personValidator PersonValidator) PersonUseCases {
return &PersonUseCase{
personService: personService,
personCache: personCache,
personEventService: personEventService,
personValidator: personValidator,
}
}
|
// Copyright © 2016 Kim Eik
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package pbapi
import (
"fmt"
"net/url"
"github.com/google/go-querystring/query"
)
//API constant
const APIURL = "https://pixabay.com/api/"
//Constants for ImageType attribute
const (
ImageTypeAll = "all"
ImageTypePhoto = "photo"
ImageTypeIllustration = "illustration"
ImageTypeVector = "vector"
)
//Constants for ResponseGroup attribute
const (
ResponseGroupImageDetails = "image_details"
ResponseGroupHighResolution = "high_resolution"
)
//Constants for Orientation attribute
const (
OrientationAll = "all"
OrientationHorizontal = "horizontal"
OrientationVertical = "vertical"
)
//Constants for Category attribute
const (
CategoryFashion = "fashion"
CategoryNature = "nature"
CategoryBackgrounds = "backgrounds"
CategoryScience = "science"
CategoryEducation = "education"
CategoryPeople = "people"
CategoryFeelings = "feelings"
CategoryReligion = "religion"
CategoryHealth = "health"
CategoryPlaces = "places"
CategoryAnimals = "animals"
CategoryIndustry = "industry"
CategoryFood = "food"
CategoryComputer = "computer"
CategorySports = "sports"
CategoryTransportation = "transportation"
CategoryTravel = "travel"
CategoryBuildings = "buildings"
CategoryBusiness = "business"
CategoryMusic = "music"
)
//Constants for Order attibute
const (
OrderPopular = "popular"
OrderLatest = "latest"
)
//Request payload
type Request struct {
Key string `url:"key"`
ResponseGroup string `url:"response_group"`
ID string `url:"id"`
Query string `url:"q"`
Lang string `url:"lang"`
ImageType string `url:"image_type"`
Orientation string `url:"orientation"`
Category string `url:"category"`
MinWidth int `url:"min_width"`
MinHeight int `url:"min_height"`
EditorsChoice bool `url:"editors_choice"`
SafeSearch bool `url:"safesearch"`
Order string `url:"order"`
Page int `url:"page"`
PerPage int `url:"per_page"`
Callback string `url:"callback"`
Pretty bool `url:"pretty"`
}
//NewRequest creates a new request
func NewRequest(key string) *Request {
return &Request{
Key: key,
ResponseGroup: ResponseGroupImageDetails,
Lang: "en",
ImageType: ImageTypeAll,
Orientation: OrientationAll,
MinWidth: 0,
MinHeight: 0,
EditorsChoice: false,
SafeSearch: false,
Order: OrderPopular,
Page: 1,
PerPage: 20,
Pretty: false,
}
}
func (r *Request) GetRequestURI() (*url.URL, error) {
queryParams, err := query.Values(r)
if err != nil {
return nil, err
}
return url.ParseRequestURI(fmt.Sprintf("%s?%s", APIURL, queryParams.Encode()))
}
|
/*
Given a unordered array of the vertices of a convex polygon, find its area.
Examples
polygon([[2, 5], [5, 1], [-4, 3]]) ➞ 15.0
polygon([[-1, 1], [1, 1], [-1, -1], [1, -1]]) ➞ 4.0
polygon([[2, 2], [11, 2], [4, 10], [9, 7]]) ➞ 45.5
polygon([[5, 3], [3, 4], [12, 8], [5, 11], [9, 5]]) ➞ 39.0
Notes
A convex polygon has all interior angles less than 180 degrees.
The first example has only 3 vertices so this list is ordered by default.
*/
package main
import (
"image"
"math"
"sort"
)
func main() {
test([]image.Point{{2, 5}, {5, 1}, {-4, 3}}, 15)
test([]image.Point{{-1, 1}, {1, 1}, {-1, -1}, {1, -1}}, 4)
test([]image.Point{{2, 2}, {11, 2}, {4, 10}, {9, 7}}, 45.5)
test([]image.Point{{-1, 1}, {1, 8}, {1, -3}, {3, 1}, {3, -1}}, 24)
test([]image.Point{{5, 3}, {3, 4}, {12, 8}, {5, 11}, {9, 5}}, 39)
test([]image.Point{{77, 10}, {92, 71}, {51, 9}, {62, 84}, {29, 94}}, 3274.5)
test([]image.Point{{8199, 8629}, {424, 4349}, {5026, 68}, {7682, 3184}, {7561, 9036}, {1813, 8798}, {5360, 92}, {9676, 6684}, {4597, 9802}, {760, 2765}, {5872, 557}, {4446, 423}}, 59253602.0)
}
func test(p []image.Point, r float64) {
assert(math.Abs(polygon(p)-r) < 1e-8)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
/*
https://www.geeksforgeeks.org/find-simple-closed-path-for-a-given-set-of-points/
https://en.wikipedia.org/wiki/Shoelace_formula
Generate a simple closed path to sort the points so we can use the shoelace formula
*/
func polygon(p []image.Point) float64 {
if len(p) < 3 {
return 0
}
n := ylb(p)
p[0], p[n] = p[n], p[0]
sort.Slice(p[1:], func(i, j int) bool {
d := orientation(p[0], p[i+1], p[j+1])
if d == 0 {
return dist(p[0], p[i+1]) < dist(p[0], p[j+1])
}
return d < 0
})
return math.Abs(shoelace(p))
}
func ylb(p []image.Point) int {
ym := p[0].Y
n := 0
for i := 1; i < len(p); i++ {
if p[i].Y < ym || (ym == p[i].Y && p[i].X < p[n].X) {
ym = p[i].Y
n = i
}
}
return n
}
func dist(p1, p2 image.Point) int {
return (p1.X-p2.X)*(p1.X-p2.X) + (p1.Y-p2.Y)*(p1.Y-p2.Y)
}
func orientation(p, q, r image.Point) int {
return (q.Y-p.Y)*(r.X-q.X) - (q.X-p.X)*(r.Y-q.Y)
}
func shoelace(p []image.Point) float64 {
n := len(p)
if n < 3 {
return 0
}
s := 0
for i := 0; i < n-1; i++ {
s += (p[i+1].X + p[i].X) * (p[i].Y - p[i+1].Y)
}
s += (p[0].X + p[n-1].X) * (p[n-1].Y - p[0].Y)
return float64(s) * 0.5
}
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Code generated from the elasticsearch-specification DO NOT EDIT.
// https://github.com/elastic/elasticsearch-specification/tree/33e8a1c9cad22a5946ac735c4fba31af2da2cec2
package types
import (
"bytes"
"encoding/json"
"errors"
"io"
"strconv"
"github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard"
)
// IndicesOptions type.
//
// https://github.com/elastic/elasticsearch-specification/blob/33e8a1c9cad22a5946ac735c4fba31af2da2cec2/specification/_types/common.ts#L297-L324
type IndicesOptions struct {
// AllowNoIndices If false, the request returns an error if any wildcard expression, index
// alias, or `_all` value targets only
// missing or closed indices. This behavior applies even if the request targets
// other open indices. For example,
// a request targeting `foo*,bar*` returns an error if an index starts with
// `foo` but no index starts with `bar`.
AllowNoIndices *bool `json:"allow_no_indices,omitempty"`
// ExpandWildcards Type of index that wildcard patterns can match. If the request can target
// data streams, this argument
// determines whether wildcard expressions match hidden data streams. Supports
// comma-separated values,
// such as `open,hidden`.
ExpandWildcards []expandwildcard.ExpandWildcard `json:"expand_wildcards,omitempty"`
// IgnoreThrottled If true, concrete, expanded or aliased indices are ignored when frozen.
IgnoreThrottled *bool `json:"ignore_throttled,omitempty"`
// IgnoreUnavailable If true, missing or closed indices are not included in the response.
IgnoreUnavailable *bool `json:"ignore_unavailable,omitempty"`
}
func (s *IndicesOptions) UnmarshalJSON(data []byte) error {
dec := json.NewDecoder(bytes.NewReader(data))
for {
t, err := dec.Token()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return err
}
switch t {
case "allow_no_indices":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseBool(v)
if err != nil {
return err
}
s.AllowNoIndices = &value
case bool:
s.AllowNoIndices = &v
}
case "expand_wildcards":
rawMsg := json.RawMessage{}
dec.Decode(&rawMsg)
if !bytes.HasPrefix(rawMsg, []byte("[")) {
o := &expandwildcard.ExpandWildcard{}
if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil {
return err
}
s.ExpandWildcards = append(s.ExpandWildcards, *o)
} else {
if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.ExpandWildcards); err != nil {
return err
}
}
case "ignore_throttled":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseBool(v)
if err != nil {
return err
}
s.IgnoreThrottled = &value
case bool:
s.IgnoreThrottled = &v
}
case "ignore_unavailable":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseBool(v)
if err != nil {
return err
}
s.IgnoreUnavailable = &value
case bool:
s.IgnoreUnavailable = &v
}
}
}
return nil
}
// NewIndicesOptions returns a IndicesOptions.
func NewIndicesOptions() *IndicesOptions {
r := &IndicesOptions{}
return r
}
|
package main
import (
"fmt"
)
//函数,返回值可以有多个,go语言中没有默认参数这个概念,要么传参数要么不传参数
//可以在函数里直接使用命名返回值,就是在声明函数的时候已经给函数命名了
func test(a int)(b int){
b=a
return b //这里也可以直接写return就可以,如果不是命名那么就不可以直接return,一定要写返回的具体的返回值
}
//参数中如果两个连续的变量类型一样那么我们可以将前面的那个参数类型省略
func test2(a,b int)(int,int){
return a,b
}
//可变长参数 ,这里y类型是切片,可以传也可以不传,但是必须在函数参数最后
func test3(x string,y ...int){
fmt.Println(x)
fmt.Println(y) //y的类型是切片
}
//go语言中函数传的都是值,是拷贝
func test6(a [2]int)([2]int){
a[0]=666
//fmt.Println(a)
return a
}
func main(){
re:=test(1)
fmt.Println(re)
fmt.Println(test2(1,2))
test3("后面y的参数也可以不传:",2,3,9)
var tes [2]int
tes=[2]int{1,2}
resu:=test6(tes) //函数里面传递的都是值,都是拷贝,所以即使函数里面的改了,但是对实际的参数没有影响
fmt.Println(resu)
fmt.Println(tes) //实际的数组不会变化
}
|
package cmd
import (
"os"
"github.com/spf13/cobra"
"github.com/Zenika/marcel/config"
"github.com/Zenika/marcel/frontend"
)
func init() {
var cfg = config.New()
var cmd = &cobra.Command{
Use: "frontend",
Short: "Starts Marcel's Frontend server",
Args: cobra.NoArgs,
PreRunE: preRunForServer(cfg),
Run: func(_ *cobra.Command, _ []string) {
os.Exit(frontend.Module().Run())
},
}
var flags = cmd.Flags()
if _, err := cfg.FlagUintP(flags, "port", "p", cfg.HTTP().Port(), "Listening port", "http.port"); err != nil {
panic(err)
}
if _, err := cfg.FlagString(flags, "basePath", cfg.Frontend().BasePath(), "Base path", "frontend.basePath"); err != nil {
panic(err)
}
if _, err := cfg.FlagString(flags, "apiURI", cfg.API().BasePath(), "API URI", "api.basePath"); err != nil {
panic(err)
}
Marcel.AddCommand(cmd)
}
|
package main
import "net"
var c net.Conn
var e error
// func main() {
// c, e = net.Dial("tcp", "localhost:5555")
// if e != nil {
// panic(e)
// }
// i := 0
// for {
// _, err := c.Write([]byte("hi" + strconv.Itoa(i) + "\n"))
// if err != nil {
// println(err)
// }
//
// time.Sleep(1e9)
// buf := make([]byte, 512)
// nr, err := c.Read(buf)
//
// data := buf[0:nr]
// fmt.Printf("echoed: %v", string(data))
// i++
// }
// }
|
package main
func totalNQueens(n int) int {
res := 0
//每一列是否被占用
col := make([]bool, n)
//左对角线是否有元素
dia1 := make([]bool, 2*n-1) //因为有2*n-1条对角线
//右对角线是否有元素
dia2 := make([]bool, 2*n-1) //因为有2*n-1条对角线
var putQueen func(int, int)
putQueen = func(index int, n int) { //index代表当前处理的行,n是问题规模,row代表第
if index == n { //找到一个解
res++
}
for i := 0; i < n; i++ { //遍历每一列寻找不冲突的位置
if !col[i] && !dia1[index+i] && !dia2[index-i+n-1] {
col[i] = true
dia1[index+i] = true
dia2[index-i+n-1] = true
putQueen(index+1, n)
col[i] = false
dia1[index+i] = false
dia2[index-i+n-1] = false //不用关心输出的每一个解是什么,仅计数即可
}
}
}
putQueen(0, n)
return res
}
|
package main
import (
"context"
"flag"
"fmt"
"log"
"strings"
multiaddr "gx/ipfs/QmNTCey11oxhb1AxDnQBRHtdhap6Ctud872NjAYPYYXPuc/go-multiaddr"
quic "gx/ipfs/QmPxDT1mJcdVbPSGRrActszXdptSgcj9gtyMuhrPavXFCN/go-libp2p-quic-transport"
relay "gx/ipfs/QmQG8wJtY6KfsTH2tjhaThFPeYVJGm7cmRMxen73ipA4Z5/go-libp2p-circuit"
libp2p "gx/ipfs/QmSgtf5vHyugoxcwMbyNy6bZ9qPDDTJSYEED2GkWjLwitZ/go-libp2p"
identify "gx/ipfs/QmSgtf5vHyugoxcwMbyNy6bZ9qPDDTJSYEED2GkWjLwitZ/go-libp2p/p2p/protocol/identify"
p2pd "gx/ipfs/QmTSBjpm7866oJKqcbuuicx6v3ypjgzoZ47y6xDNnWbB3o/go-libp2p-daemon"
ps "gx/ipfs/QmWL6MKfes1HuSiRUNzGmwy9YyQDwcZF9V1NaA2keYKhtE/go-libp2p-pubsub"
connmgr "gx/ipfs/Qmb5KqwKh3iqcf91oLunTUXfV9PotzvCAdyrahhPq1uZyy/go-libp2p-connmgr"
)
func main() {
identify.ClientVersion = "p2pd/0.1"
maddrString := flag.String("listen", "/unix/tmp/p2pd.sock", "daemon control listen multiaddr")
quiet := flag.Bool("q", false, "be quiet")
id := flag.String("id", "", "peer identity; private key file")
bootstrap := flag.Bool("b", false, "connects to bootstrap peers and bootstraps the dht if enabled")
bootstrapPeers := flag.String("bootstrapPeers", "", "comma separated list of bootstrap peers; defaults to the IPFS DHT peers")
dht := flag.Bool("dht", false, "Enables the DHT in full node mode")
dhtClient := flag.Bool("dhtClient", false, "Enables the DHT in client mode")
connMgr := flag.Bool("connManager", false, "Enables the Connection Manager")
connMgrLo := flag.Int("connLo", 256, "Connection Manager Low Water mark")
connMgrHi := flag.Int("connHi", 512, "Connection Manager High Water mark")
connMgrGrace := flag.Duration("connGrace", 120, "Connection Manager grace period (in seconds)")
QUIC := flag.Bool("quic", false, "Enables the QUIC transport")
natPortMap := flag.Bool("natPortMap", false, "Enables NAT port mapping")
pubsub := flag.Bool("pubsub", false, "Enables pubsub")
pubsubRouter := flag.String("pubsubRouter", "gossipsub", "Specifies the pubsub router implementation")
pubsubSign := flag.Bool("pubsubSign", true, "Enables pubsub message signing")
pubsubSignStrict := flag.Bool("pubsubSignStrict", false, "Enables pubsub strict signature verification")
gossipsubHeartbeatInterval := flag.Duration("gossipsubHeartbeatInterval", 0, "Specifies the gossipsub heartbeat interval")
gossipsubHeartbeatInitialDelay := flag.Duration("gossipsubHeartbeatInitialDelay", 0, "Specifies the gossipsub initial heartbeat delay")
relayEnabled := flag.Bool("relay", true, "Enables circuit relay")
relayActive := flag.Bool("relayActive", false, "Enables active mode for relay")
relayHop := flag.Bool("relayHop", false, "Enables hop for relay")
relayDiscovery := flag.Bool("relayDiscovery", false, "Enables passive discovery for relay")
autoRelay := flag.Bool("autoRelay", false, "Enables autorelay")
autonat := flag.Bool("autonat", false, "Enables the AutoNAT service")
hostAddrs := flag.String("hostAddrs", "", "comma separated list of multiaddrs the host should listen on")
flag.Parse()
var opts []libp2p.Option
maddr, err := multiaddr.NewMultiaddr(*maddrString)
if err != nil {
log.Fatal(err)
}
if *id != "" {
key, err := p2pd.ReadIdentity(*id)
if err != nil {
log.Fatal(err)
}
opts = append(opts, libp2p.Identity(key))
}
if *hostAddrs != "" {
addrs := strings.Split(*hostAddrs, ",")
opts = append(opts, libp2p.ListenAddrStrings(addrs...))
}
if *connMgr {
cm := connmgr.NewConnManager(*connMgrLo, *connMgrHi, *connMgrGrace)
opts = append(opts, libp2p.ConnectionManager(cm))
}
if *QUIC {
opts = append(opts,
libp2p.DefaultTransports,
libp2p.Transport(quic.NewTransport),
)
// if we explicitly specify a transport, we must also explicitly specify the listen addrs
if *hostAddrs == "" {
opts = append(opts,
libp2p.ListenAddrStrings(
"/ip4/0.0.0.0/tcp/0",
"/ip4/0.0.0.0/udp/0/quic",
"/ip6/::1/tcp/0",
"/ip6/::1/udp/0/quic",
))
}
}
if *natPortMap {
opts = append(opts, libp2p.NATPortMap())
}
if *relayEnabled {
var relayOpts []relay.RelayOpt
if *relayActive {
relayOpts = append(relayOpts, relay.OptActive)
}
if *relayHop {
relayOpts = append(relayOpts, relay.OptHop)
}
if *relayDiscovery {
relayOpts = append(relayOpts, relay.OptDiscovery)
}
opts = append(opts, libp2p.EnableRelay(relayOpts...))
}
if *autoRelay {
if !(*dht || *dhtClient) {
log.Fatal("DHT must be enabled in order to enable autorelay")
}
if !*relayEnabled {
log.Fatal("Relay must be enabled to enable autorelay")
}
opts = append(opts, libp2p.EnableAutoRelay())
}
d, err := p2pd.NewDaemon(context.Background(), maddr, *dht, *dhtClient, opts...)
if err != nil {
log.Fatal(err)
}
if *autonat {
var opts []libp2p.Option
// allow the AutoNAT service to dial back quic addrs.
if *QUIC {
opts = append(opts,
libp2p.DefaultTransports,
libp2p.Transport(quic.NewTransport),
)
}
err := d.EnableAutoNAT(opts...)
if err != nil {
log.Fatal(err)
}
}
if *pubsub {
if *gossipsubHeartbeatInterval > 0 {
ps.GossipSubHeartbeatInterval = *gossipsubHeartbeatInterval
}
if *gossipsubHeartbeatInitialDelay > 0 {
ps.GossipSubHeartbeatInitialDelay = *gossipsubHeartbeatInitialDelay
}
err = d.EnablePubsub(*pubsubRouter, *pubsubSign, *pubsubSignStrict)
if err != nil {
log.Fatal(err)
}
}
if *bootstrapPeers != "" {
for _, s := range strings.Split(*bootstrapPeers, ",") {
ma, err := multiaddr.NewMultiaddr(s)
if err != nil {
log.Fatalf("error parsing bootstrap peer %q: %v", s, err)
}
p2pd.BootstrapPeers = append(p2pd.BootstrapPeers, ma)
}
}
if *bootstrap {
err = d.Bootstrap()
if err != nil {
log.Fatal(err)
}
}
if !*quiet {
fmt.Printf("Control socket: %s\n", maddr.String())
fmt.Printf("Peer ID: %s\n", d.ID().Pretty())
fmt.Printf("Peer Addrs:\n")
for _, addr := range d.Addrs() {
fmt.Printf("%s\n", addr.String())
}
if *bootstrap && *bootstrapPeers != "" {
fmt.Printf("Bootstrap peers:\n")
for _, p := range p2pd.BootstrapPeers {
fmt.Printf("%s\n", p)
}
}
}
select {}
}
|
package main
import (
"fmt"
"sort"
)
//给你一个含 n 个整数的数组 nums ,其中 nums[i] 在区间 [1, n] 内。
//请你找出所有在 [1, n] 范围内但没有出现在 nums 中的数字,并以数组的形式返回结果。
//输入:nums = [4,3,2,7,8,2,3,1]
//输出:[5,6]
func main() {
fmt.Println(findDisappearedNumbers([]int{4, 3, 2, 7, 8, 2, 3, 1}))
}
func findDisappearedNumbers(nums []int) []int {
sort.Ints(nums)
ans := make([]int, 0)
j := 0
for i := 0; i < len(nums); i++ {
j++
if nums[i] != j {
ans = append(ans, nums[i])
//j++
}
}
return ans
}
func findDisappearedNumbers2(nums []int) []int {
mm := make(map[int]int)
for _, v := range nums {
mm[v]++
}
ans := make([]int, 0)
for i := 1; i <= len(nums); i++ {
if _, ok := mm[i]; !ok {
ans = append(ans, i)
}
}
return ans
}
|
// Package queue defines queue constants.
package queue
import (
"encoding/json"
"fmt"
"strings"
)
type Queue int
func (q *Queue) UnmarshalJSON(b []byte) error {
var (
s string
i int
)
// First see if it is stored as native int.
err := json.Unmarshal(b, &i)
if err == nil {
*q = Queue(i)
return nil
}
// Must be a string.
err = json.Unmarshal(b, &s)
if err != nil {
return err
}
switch strings.ToUpper(s) {
case "RANKED_SOLO_5X5":
*q = RankedSolo5x5
case "RANKED_FLEX_SR":
*q = RankedFlexSR
case "RANKED_FLEX_TT":
*q = RankedFlexTT
case "ARAM_games_5x5":
*q = ARAM_games_5x5
default:
return fmt.Errorf("invalid queue %q", s)
}
return nil
}
func (q Queue) String() string {
switch q {
case RankedSolo5x5:
return "RANKED_SOLO_5x5"
case RankedFlexSR:
return "RANKED_FLEX_SR"
case RankedFlexTT:
return "RANKED_FLEX_TT"
case ARAM_games_5x5:
return "ARAM_games_5x5 "
default:
panic(fmt.Sprintf("invalid Queue %d", q))
}
}
const (
RankedSolo5x5 Queue = 420
RankedFlexSR Queue = 440
RankedFlexTT Queue = 470
CUSTOM Queue = 0 // Custom games
NORMAL_3x3 Queue = 8 // Normal 3v3 games
NORMAL_5x5_BLIND Queue = 2 // Normal 5v5 Blind Pick games
NORMAL_5x5_DRAFT Queue = 14 // Normal 5v5 Draft Pick games
RANKED_SOLO_5x5 Queue = 4 // Ranked Solo 5v5 games
RANKED_PREMADE_5x5 Queue = 6 // Ranked Premade 5v5 games (Deprecated)
RANKED_PREMADE_3x3 Queue = 9 // Historical Ranked Premade 3v3 games (Deprecated)
RANKED_FLEX_TT Queue = 9 // Ranked Flex Twisted Treeline games
RANKED_TEAM_3x3 Queue = 41 // Ranked Team 3v3 games (Deprecated)
RANKED_TEAM_5x5 Queue = 42 // Ranked Team 5v5 games
ODIN_5x5_BLIND Queue = 16 // Dominion 5v5 Blind Pick games
ODIN_5x5_DRAFT Queue = 17 // Dominion 5v5 Draft Pick games
BOT_5x5 Queue = 7 // Historical Summoner's Rift Coop vs AI games (Deprecated)
BOT_ODIN_5x5 Queue = 25 // Dominion Coop vs AI games
BOT_5x5_INTRO Queue = 31 // Summoner's Rift Coop vs AI Intro Bot games
BOT_5x5_BEGINNER Queue = 32 // Summoner's Rift Coop vs AI Beginner Bot games
BOT_5x5_INTERMEDIATE Queue = 33 // Historical Summoner's Rift Coop vs AI Intermediate Bot games
BOT_TT_3x3 Queue = 52 // Twisted Treeline Coop vs AI games
GROUP_FINDER_5x5 Queue = 61 // Team Builder games
ARAM_5x5 Queue = 65 // ARAM games
ONEFORALL_5x5 Queue = 70 // One for All games
FIRSTBLOOD_1x1 Queue = 72 // Snowdown Showdown 1v1 games
FIRSTBLOOD_2x2 Queue = 73 // Snowdown Showdown 2v2 games
SR_6x6 Queue = 75 // Summoner's Rift 6x6 Hexakill games
URF_5x5 Queue = 76 // Ultra Rapid Fire games
ONEFORALL_MIRRORMODE_5x5 Queue = 78 // One for All (Mirror mode)
BOT_URF_5x5 Queue = 83 // Ultra Rapid Fire games played against AI games
NIGHTMARE_BOT_5x5_RANK1 Queue = 91 // Doom Bots Rank 1 games
NIGHTMARE_BOT_5x5_RANK2 Queue = 92 // Doom Bots Rank 2 games
NIGHTMARE_BOT_5x5_RANK5 Queue = 93 // Doom Bots Rank 5 games
ASCENSION_5x5 Queue = 96 // Ascension games
HEXAKILL Queue = 98 // Twisted Treeline 6x6 Hexakill games
BILGEWATER_ARAM_5x5 Queue = 100 // Butcher's Bridge games
KING_PORO_5x5 Queue = 300 // King Poro games
COUNTER_PICK Queue = 310 // Nemesis games
BILGEWATER_5x5 Queue = 313 // Black Market Brawlers games
SIEGE Queue = 315 // Nexus Siege games
DEFINITELY_NOT_DOMINION_5x5 Queue = 317 // Definitely Not Dominion games
ARURF_5X5 Queue = 318 // All Random URF games
ARSR_5x5 Queue = 325 // All Random Summoner's Rift games
TEAM_BUILDER_DRAFT_UNRANKED_5x5 Queue = 400 // Normal 5v5 Draft Pick games
TEAM_BUILDER_DRAFT_RANKED_5x5 Queue = 410 // Ranked 5v5 Draft Pick games (Deprecated)
TEAM_BUILDER_RANKED_SOLO Queue = 420 // Ranked Solo games from current season that use Team Builder matchmaking
RANKED_FLEX_SR Queue = 440 // Ranked Flex Summoner's Rift games
ASSASSINATE_5x5 Queue = 600 // Blood Hunt Assassin games
DARKSTAR_3x3 Queue = 610 // Darkstar games
ARAM_games_5x5 Queue = 450 // ARAM games 5x5, 65 deprecated
)
|
package random
import (
"math/rand"
)
// New returns a new random string with a number of characters defined by
// the function parameter `length` and, eventually, some special chars
// and digits, depending on whether `specialChars` and/or `digits` are `true`
func New(length int, specialChars, digits bool) string {
all := alphabet
var buf = make([]byte, length)
if specialChars && digits {
all += specials + numbers
buf[0] = specials[rand.Intn(len(specials))]
buf[1] = numbers[rand.Intn(len(numbers))]
for i := 2; i < length; i++ {
buf[i] = all[rand.Intn(len(all))]
}
} else if specialChars && !digits {
all += specials
buf[0] = specials[rand.Intn(len(specials))]
for i := 1; i < length; i++ {
buf[i] = all[rand.Intn(len(all))]
}
} else if !specialChars && digits {
all += numbers
buf[0] = numbers[rand.Intn(len(numbers))]
for i := 1; i < length; i++ {
buf[i] = all[rand.Intn(len(all))]
}
} else {
for i := 0; i < length; i++ {
buf[i] = all[rand.Intn(len(all))]
}
}
rand.Shuffle(len(buf), func(i, j int) {
buf[i], buf[j] = buf[j], buf[i]
})
return string(buf)
}
|
package leetcode
func LongestValidParentheses(s string) int {
llnum, lrnum, rlnum, rrnum := 0, 0, 0, 0
max := 0
length := len(s) - 1
for i := 0; i <= length; i++ {
if s[i] == 40 {
llnum++
}
if s[i] == 41 {
lrnum++
}
if s[length-i] == 40 {
rlnum++
}
if s[length-i] == 41 {
rrnum++
}
if lrnum > llnum {
llnum, lrnum = 0, 0
} else if lrnum == llnum {
if lrnum > max {
max = lrnum
}
}
if rlnum > rrnum {
rlnum, rrnum = 0, 0
} else if rlnum == rrnum {
if rlnum > max {
max = rlnum
}
}
}
return max * 2
}
|
package main
import (
"testing"
)
func TestSingleNumber(t *testing.T) {
}
|
package models
import (
"github.com/astaxie/beego/orm"
"time"
)
//查询的类
type HashrateOrderTransactionQueryParam struct {
BaseQueryParam
Name string `json:"name"`
StartTime int64 `json:"startTime"` //开始时间
EndTime int64 `json:"endTime"` //截止时间
Status string `json:"status"` //状态
}
func (a *HashrateOrderTransaction) TableName() string {
return HashrateOrderTransactionTBName()
}
//算力合约关联表
type HashrateOrderTransaction struct {
KeyId int `orm:"pk;column(key_id)"json:"keyId"form:"keyId"`
//订单ID
OrderId string `orm:"column(order_id)"json:"orderId"form:"orderId"`
//支付方式 货币
PayType string `orm:"column(pay_type)"json:"payType"form:"payType"`
//交易金额
TransactionMoney float64 `orm:"column(transaction_money)"json:"transactionMoney"form:"transactionMoney"`
//创建时间
CreateTime time.Time `orm:"auto_now_add;type(datetime);column(create_time)"json:"createTime"form:"createTime"`
}
func HashrateOrderTransactionsByIds(ids []string) map[string]map[string]*HashrateOrderTransaction {
mapp := make(map[string]map[string]*HashrateOrderTransaction)
data := make([]*HashrateOrderTransaction, 0)
if len(ids) > 0 {
query := orm.NewOrm().QueryTable(HashrateOrderTransactionTBName())
query = query.Filter("order_id__in", ids)
query.All(&data)
}
for _, obj := range data {
if _, found := mapp[obj.OrderId]; !found {
mapp[obj.OrderId] = make(map[string]*HashrateOrderTransaction)
}
mapp[obj.OrderId][obj.PayType] = obj
}
return mapp
}
|
// Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aggfuncs_test
import (
"testing"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/executor/aggfuncs"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
)
func TestMergePartialResult4JsonArrayagg(t *testing.T) {
typeList := []byte{mysql.TypeLonglong, mysql.TypeDouble, mysql.TypeFloat, mysql.TypeString, mysql.TypeJSON, mysql.TypeDate, mysql.TypeDuration}
tests := make([]aggTest, 0, len(typeList))
numRows := 5
for _, argType := range typeList {
entries1 := make([]interface{}, 0)
entries2 := make([]interface{}, 0)
entries3 := make([]interface{}, 0)
argFieldType := types.NewFieldType(argType)
genFunc := getDataGenFunc(argFieldType)
for m := 0; m < numRows; m++ {
arg := genFunc(m)
entries1 = append(entries1, getJSONValue(arg, argFieldType))
}
// to adapt the `genSrcChk` Chunk format
entries1 = append(entries1, nil)
for m := 2; m < numRows; m++ {
arg := genFunc(m)
entries2 = append(entries2, getJSONValue(arg, argFieldType))
}
// to adapt the `genSrcChk` Chunk format
entries2 = append(entries2, nil)
entries3 = append(entries3, entries1...)
entries3 = append(entries3, entries2...)
tests = append(tests, buildAggTester(ast.AggFuncJsonArrayagg, argType, numRows, types.CreateBinaryJSON(entries1), types.CreateBinaryJSON(entries2), types.CreateBinaryJSON(entries3)))
}
for _, test := range tests {
testMergePartialResult(t, test)
}
}
func TestJsonArrayagg(t *testing.T) {
typeList := []byte{mysql.TypeLonglong, mysql.TypeDouble, mysql.TypeFloat, mysql.TypeString, mysql.TypeJSON, mysql.TypeDate, mysql.TypeDuration}
tests := make([]aggTest, 0, len(typeList))
numRows := 5
for _, argType := range typeList {
entries := make([]interface{}, 0)
argFieldType := types.NewFieldType(argType)
genFunc := getDataGenFunc(argFieldType)
for m := 0; m < numRows; m++ {
arg := genFunc(m)
entries = append(entries, getJSONValue(arg, argFieldType))
}
// to adapt the `genSrcChk` Chunk format
entries = append(entries, nil)
tests = append(tests, buildAggTester(ast.AggFuncJsonArrayagg, argType, numRows, nil, types.CreateBinaryJSON(entries)))
}
for _, test := range tests {
testAggFuncWithoutDistinct(t, test)
}
}
func jsonArrayaggMemDeltaGens(srcChk *chunk.Chunk, dataType *types.FieldType) (memDeltas []int64, err error) {
memDeltas = make([]int64, 0)
for i := 0; i < srcChk.NumRows(); i++ {
row := srcChk.GetRow(i)
if row.IsNull(0) {
memDeltas = append(memDeltas, aggfuncs.DefInterfaceSize)
continue
}
memDelta := int64(0)
memDelta += aggfuncs.DefInterfaceSize
switch dataType.GetType() {
case mysql.TypeLonglong:
memDelta += aggfuncs.DefUint64Size
case mysql.TypeFloat:
memDelta += aggfuncs.DefFloat64Size
case mysql.TypeDouble:
memDelta += aggfuncs.DefFloat64Size
case mysql.TypeString:
val := row.GetString(0)
memDelta += int64(len(val))
case mysql.TypeJSON:
val := row.GetJSON(0)
// +1 for the memory usage of the JSONTypeCode of json
memDelta += int64(len(val.Value) + 1)
case mysql.TypeDuration:
memDelta += aggfuncs.DefDurationSize
case mysql.TypeDate, mysql.TypeDatetime:
memDelta += aggfuncs.DefTimeSize
case mysql.TypeNewDecimal:
memDelta += aggfuncs.DefFloat64Size
default:
return memDeltas, errors.Errorf("unsupported type - %v", dataType.GetType())
}
memDeltas = append(memDeltas, memDelta)
}
return memDeltas, nil
}
func TestMemJsonArrayagg(t *testing.T) {
typeList := []byte{mysql.TypeLonglong, mysql.TypeDouble, mysql.TypeString, mysql.TypeJSON, mysql.TypeDuration, mysql.TypeNewDecimal, mysql.TypeDate}
tests := make([]aggMemTest, 0, len(typeList))
numRows := 5
for _, argType := range typeList {
tests = append(tests, buildAggMemTester(ast.AggFuncJsonArrayagg, argType, numRows, aggfuncs.DefPartialResult4JsonArrayagg+aggfuncs.DefSliceSize, jsonArrayaggMemDeltaGens, false))
}
for _, test := range tests {
testAggMemFunc(t, test)
}
}
|
package views
import (
"net/http"
"strings"
)
// HTTPHandler !
func HTTPHandler(w http.ResponseWriter, r *http.Request) {
// r.URL.Path creates a new path called /http_handler
r.URL.Path = strings.TrimPrefix(r.URL.Path, "/http_handler")
if strings.HasPrefix(r.URL.Path, "/todo") {
TodoHandler(w, r)
return
}
if strings.HasSuffix(r.URL.Path, "/tokens") {
TokensHandler(w, r)
return
}
}
|
package priorityqueue
import (
"LimitGo/limit/collection"
"testing"
)
var precede = func(p1 *collection.Object, p2 *collection.Object) bool {
s1 := (*p1).(Student)
s2 := (*p2).(Student)
return s1.Id < s2.Id
}
type Student struct {
Id int
Name string
}
func TestArrayListAll(t *testing.T) {
TestNew(t)
TestPriorityQueue_Clear(t)
TestPriorityQueue_Contains(t)
TestPriorityQueue_Empty(t)
TestPriorityQueue_First(t)
TestPriorityQueue_GetIterator(t)
TestPriorityQueue_Poll(t)
TestPriorityQueue_String(t)
}
func TestNew(t *testing.T) {
q := New(precede)
if q.Size() != 0 {
t.Error("Create PriorityQueue fail!")
}
}
func TestPriorityQueue_Clear(t *testing.T) {
q := New(precede)
var a collection.Object = Student{1, "Alice"}
var b collection.Object = Student{2, "Bob"}
var c collection.Object = Student{3, "Mark"}
q.Add(&a)
q.Add(&b)
q.Add(&c)
q.Clear()
if q.Size() != 0 {
t.Error("Clear operation fail!")
}
}
func TestPriorityQueue_Contains(t *testing.T) {
q := New(precede)
var a collection.Object = Student{1, "Alice"}
var b collection.Object = Student{2, "Bob"}
var c collection.Object = Student{3, "Mark"}
var d collection.Object = Student{4, "Jessie"}
q.Add(&a)
q.Add(&b)
q.Add(&c)
if !q.Contains(&a) {
t.Error("Contains operation fail!")
}
if q.Contains(&d) {
t.Error("Contains operation fail!")
}
}
func TestPriorityQueue_Empty(t *testing.T) {
q := New(precede)
if !q.Empty() {
t.Error("Empty operation fail!")
}
var a collection.Object = Student{1, "Alice"}
var b collection.Object = Student{2, "Bob"}
q.Add(&a)
q.Add(&b)
if q.Empty() {
t.Error("Empty operation fail!")
}
}
func TestPriorityQueue_First(t *testing.T) {
q := New(precede)
var a collection.Object = Student{1, "Alice"}
var b collection.Object = Student{2, "Bob"}
var c collection.Object = Student{3, "Mark"}
var d collection.Object = Student{4, "Jessie"}
var e collection.Object = Student{5, "Alex"}
var f collection.Object = Student{6, "Ellen"}
var g collection.Object = Student{7, "August"}
var h collection.Object = Student{8, "Jeff"}
var i collection.Object = Student{9, "Jerry"}
q.Add(&i)
q.Add(&h)
if q.First() != &h {
t.Error("First operation fail!")
}
q.Add(&e)
q.Add(&f)
q.Add(&g)
if q.First() != &e {
t.Error("First operation fail!")
}
q.Add(&b)
q.Add(&c)
q.Add(&a)
q.Add(&d)
if q.First() != &a {
t.Error("First operation fail!")
}
}
func TestPriorityQueue_GetIterator(t *testing.T) {
q := New(precede)
var a collection.Object = Student{1, "Alice"}
var b collection.Object = Student{2, "Bob"}
var c collection.Object = Student{3, "Mark"}
var d collection.Object = Student{4, "Jessie"}
q.Add(&a)
q.Add(&b)
q.Add(&c)
q.Add(&d)
it := q.GetIterator()
for i := 0; it.HashNext(); i++ {
if i >= 2 {
it.Remove()
}
it.Next()
}
if q.Size() != 2 {
t.Error("Iterator operation fail!")
}
}
func TestPriorityQueue_Poll(t *testing.T) {
q := New(precede)
var a collection.Object = Student{1, "Alice"}
var b collection.Object = Student{2, "Bob"}
var c collection.Object = Student{3, "Mark"}
var d collection.Object = Student{4, "Jessie"}
var e collection.Object = Student{5, "Alex"}
var f collection.Object = Student{6, "Ellen"}
q.Add(&f)
q.Add(&d)
q.Add(&e)
q.Add(&b)
q.Add(&c)
q.Add(&a)
if q.Poll() != &a {
t.Error("Poll operation fail!")
}
if q.Size() != 5 {
t.Error("Poll operation fail!")
}
}
func TestPriorityQueue_String(t *testing.T) {
q := New(precede)
var a collection.Object = Student{1, "Alice"}
var b collection.Object = Student{2, "Bob"}
q.Add(&b)
q.Add(&a)
if q.String() != "{{\"Id\":1,\"Name\":\"Alice\"},{\"Id\":2,\"Name\":\"Bob\"}}" {
t.Error("String operation fail!")
}
}
|
package backend
// OpenSentencer represents an interface where a fetch on an opeing sentence is made.
type OpenSentencer interface {
OpenSentence() (string, error)
}
|
/*
* @lc app=leetcode id=13 lang=golang
*
* [13] Roman to Integer
*/
package main
/* Solution 1: */
func romanToInt(s string) int {
res := 0
mapping := [128]int{}
mapping['I'] = 1
mapping['V'] = 5
mapping['X'] = 10
mapping['L'] = 50
mapping['C'] = 100
mapping['D'] = 500
mapping['M'] = 1000
for i := 0; i < len(s); i++ {
if i+1 < len(s) && mapping[s[i]] < mapping[s[i+1]] {
res += mapping[s[i+1]] - mapping[s[i]]
i++
} else {
res += mapping[s[i]]
}
}
return res
}
|
package compute
import (
"encoding/json"
"fmt"
"log"
"net/http"
"net/url"
)
// IPAddressList represents an IP address list.
type IPAddressList struct {
ID string `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
IPVersion string `json:"ipVersion"`
State string `json:"state"`
CreateTime string `json:"createTime"`
Addresses []IPAddressListEntry `json:"ipAddress"`
ChildLists []EntityReference `json:"childIpAddressList"`
}
// BuildEditRequest creates an EditIPAddressList using the existing addresses and child list references in the IP address list.
func (addressList *IPAddressList) BuildEditRequest() EditIPAddressList {
edit := &EditIPAddressList{
ID: addressList.ID,
Description: addressList.Description,
Addresses: addressList.Addresses,
ChildListIDs: make([]string, len(addressList.ChildLists)),
}
for index, childList := range addressList.ChildLists {
edit.ChildListIDs[index] = childList.ID
}
return *edit
}
// IPAddressListEntry represents an entry in an IP address list.
type IPAddressListEntry struct {
Begin string `json:"begin"`
End *string `json:"end,omitempty"`
PrefixSize *int `json:"prefixSize,omitempty"`
}
// IPAddressLists represents a page of IPAddressList results.
type IPAddressLists struct {
AddressLists []IPAddressList `json:"ipAddressList"`
PagedResult
}
// Request body for creating an IP address list.
type createIPAddressList struct {
Name string `json:"name"`
Description string `json:"description"`
IPVersion string `json:"ipVersion"`
NetworkDomainID string `json:"networkDomainId"`
Addresses []IPAddressListEntry `json:"ipAddress"`
ChildListIDs []string `json:"childIpAddressListId"`
}
// EditIPAddressList represents the request body for editing an IP address list.
type EditIPAddressList struct {
ID string `json:"id"`
Description string `json:"description"`
Addresses []IPAddressListEntry `json:"ipAddress"`
ChildListIDs []string `json:"childIpAddressListId"`
}
// Request body for deleting an IP address list.
type deleteIPAddressList struct {
ID string `json:"id"`
}
// GetIPAddressList retrieves the IP address list with the specified Id.
// id is the Id of the IP address list to retrieve.
// Returns nil if no addressList is found with the specified Id.
func (client *Client) GetIPAddressList(id string) (addressList *IPAddressList, err error) {
organizationID, err := client.getOrganizationID()
if err != nil {
return nil, err
}
requestURI := fmt.Sprintf("%s/network/ipAddressList/%s",
url.QueryEscape(organizationID),
url.QueryEscape(id),
)
request, err := client.newRequestV22(requestURI, http.MethodGet, nil)
if err != nil {
return nil, err
}
responseBody, statusCode, err := client.executeRequest(request)
if err != nil {
return nil, err
}
if statusCode != http.StatusOK {
var apiResponse *APIResponseV2
apiResponse, err = readAPIResponseAsJSON(responseBody, statusCode)
if err != nil {
return nil, err
}
if apiResponse.ResponseCode == ResponseCodeResourceNotFound {
return nil, nil // Not an error, but was not found.
}
return nil, apiResponse.ToError("Request to retrieve IP address list failed with status code %d (%s): %s", statusCode, apiResponse.ResponseCode, apiResponse.Message)
}
addressList = &IPAddressList{}
err = json.Unmarshal(responseBody, addressList)
return addressList, err
}
// ListIPAddressLists retrieves all IP address lists associated with the specified network domain.
func (client *Client) ListIPAddressLists(networkDomainID string) (addressLists *IPAddressLists, err error) {
organizationID, err := client.getOrganizationID()
if err != nil {
return nil, err
}
requestURI := fmt.Sprintf("%s/network/ipAddressList?networkDomainId=%s",
url.QueryEscape(organizationID),
url.QueryEscape(networkDomainID),
)
request, err := client.newRequestV22(requestURI, http.MethodGet, nil)
if err != nil {
return nil, err
}
responseBody, statusCode, err := client.executeRequest(request)
if err != nil {
return nil, err
}
if statusCode != http.StatusOK {
var apiResponse *APIResponseV2
apiResponse, err = readAPIResponseAsJSON(responseBody, statusCode)
if err != nil {
return nil, err
}
return nil, apiResponse.ToError("Request to list IP address lists failed with status code %d (%s): %s", statusCode, apiResponse.ResponseCode, apiResponse.Message)
}
addressLists = &IPAddressLists{}
err = json.Unmarshal(responseBody, addressLists)
return addressLists, err
}
// CreateIPAddressList creates a new IP address list.
// Returns the Id of the new IP address list.
//
// This operation is synchronous.
func (client *Client) CreateIPAddressList(name string, description string, ipVersion string, networkDomainID string, addresses []IPAddressListEntry, childListIDs []string) (addressListID string, err error) {
organizationID, err := client.getOrganizationID()
if err != nil {
return "", err
}
requestURI := fmt.Sprintf("%s/network/createIpAddressList",
url.QueryEscape(organizationID),
)
request, err := client.newRequestV22(requestURI, http.MethodPost, &createIPAddressList{
Name: name,
Description: description,
IPVersion: ipVersion,
Addresses: addresses,
ChildListIDs: childListIDs,
NetworkDomainID: networkDomainID,
})
responseBody, statusCode, err := client.executeRequest(request)
if err != nil {
return "", err
}
apiResponse, err := readAPIResponseAsJSON(responseBody, statusCode)
if err != nil {
return "", err
}
if apiResponse.ResponseCode != ResponseCodeOK {
return "", apiResponse.ToError("Request to create IP address list '%s' failed with status code %d (%s): %s", name, statusCode, apiResponse.ResponseCode, apiResponse.Message)
}
// Expected: "info" { "name": "ipAddressListId", "value": "the-Id-of-the-new-IP-address-list" }
ipAddressListIDMessage := apiResponse.GetFieldMessage("ipAddressListId")
if ipAddressListIDMessage == nil {
return "", apiResponse.ToError("Received an unexpected response (missing 'ipAddressListId') with status code %d (%s): %s", statusCode, apiResponse.ResponseCode, apiResponse.Message)
}
return *ipAddressListIDMessage, nil
}
// EditIPAddressList updates the configuration for a IP address list.
//
// Note that this operation is not additive; it *replaces* the configuration for the IP address list.
// You can IPAddressList.BuildEditRequest() to create an EditIPAddressList request that copies the current state of the IPAddressList (and then apply customisations).
//
// This operation is synchronous.
func (client *Client) EditIPAddressList(edit EditIPAddressList) error {
organizationID, err := client.getOrganizationID()
if err != nil {
return err
}
requestURI := fmt.Sprintf("%s/network/editIpAddressList",
url.QueryEscape(organizationID),
)
request, err := client.newRequestV22(requestURI, http.MethodPost, edit)
responseBody, statusCode, err := client.executeRequest(request)
if err != nil {
return err
}
apiResponse, err := readAPIResponseAsJSON(responseBody, statusCode)
if err != nil {
return err
}
log.Printf("response code: %s", apiResponse.ResponseCode)
if apiResponse.ResponseCode != ResponseCodeOK {
return apiResponse.ToError("Request to edit IP address list failed with unexpected status code %d (%s): %s", statusCode, apiResponse.ResponseCode, apiResponse.Message)
}
return nil
}
// DeleteIPAddressList deletes an existing IP address list.
// Returns an error if the operation was not successful.
//
// This operation is synchronous.
func (client *Client) DeleteIPAddressList(id string) (err error) {
organizationID, err := client.getOrganizationID()
if err != nil {
return err
}
requestURI := fmt.Sprintf("%s/network/deleteIpAddressList",
url.QueryEscape(organizationID),
)
request, err := client.newRequestV22(requestURI, http.MethodPost, &deleteIPAddressList{id})
responseBody, statusCode, err := client.executeRequest(request)
if err != nil {
return err
}
apiResponse, err := readAPIResponseAsJSON(responseBody, statusCode)
if err != nil {
return err
}
if apiResponse.ResponseCode != ResponseCodeOK {
return apiResponse.ToError("Request to delete IP address list failed with unexpected status code %d (%s): %s", statusCode, apiResponse.ResponseCode, apiResponse.Message)
}
return nil
}
// GetIPAddressList retrieves the IP address list by name.
// Name is the name of the IP address list to retrieve.
// Returns nil if no addressList is found with the specified name.
func (client *Client) GetIPAddressListByName(name string, networkDomainID string) (addressList *IPAddressList, err error) {
organizationID, err := client.getOrganizationID()
if err != nil {
return nil, err
}
requestURI := fmt.Sprintf("%s/network/ipAddressList?networkDomainId=%s&name=%s",
url.QueryEscape(organizationID),
url.QueryEscape(networkDomainID),
url.QueryEscape(name),
)
request, err := client.newRequestV22(requestURI, http.MethodGet, nil)
if err != nil {
return nil, err
}
responseBody, statusCode, err := client.executeRequest(request)
if err != nil {
return nil, err
}
if statusCode != http.StatusOK {
var apiResponse *APIResponseV2
apiResponse, err = readAPIResponseAsJSON(responseBody, statusCode)
if err != nil {
return nil, err
}
if apiResponse.ResponseCode == ResponseCodeResourceNotFound {
return nil, nil // Not an error, but was not found.
}
return nil, apiResponse.ToError("Request to retrieve IP address list failed with status code %d (%s): %s", statusCode, apiResponse.ResponseCode, apiResponse.Message)
}
addressLists := &IPAddressLists{}
err = json.Unmarshal(responseBody, addressLists)
if err != nil {
return nil, err
}
if addressLists.IsEmpty() {
return nil, nil // No matching addresslist was found.
}
if len(addressLists.AddressLists) != 1 {
return nil, fmt.Errorf("found multiple addresslist (%d) named '%s'",
len(addressLists.AddressLists), name)
}
return &addressLists.AddressLists[0], err
}
|
package 一维数组
// firstMissingPositive 找到数组中第一个缺失的正数。
func firstMissingPositive(nums []int) int {
// 1. 让正数占领 nums[正数-1] 的位置。 (比如正数 1、2、3,分别占领 nums[0]、nums[1]、nums[2])
// (比如正数 1、2、3,分别占领 nums[0]、nums[1]、nums[2])
for i := 0; i < len(nums); i++ {
for nums[i] >= 1 && nums[i] <= len(nums) && nums[i] != i+1 && nums[nums[i]-1] != nums[i] {
nums[nums[i]-1], nums[i] = nums[i], nums[nums[i]-1]
}
}
// 2. 寻找第一个没被正数占领的位置。
for i := 0; i < len(nums); i++ {
if nums[i] != i+1 {
return i + 1
}
}
// 3. 全被正数占领。
return len(nums) + 1
}
|
package processor
import (
"github.com/bitmaelum/bitmaelum-suite/internal/message"
"github.com/sirupsen/logrus"
"time"
)
const (
// MaxRetries defines how many retries we can do for sending a message
MaxRetries int = 30
)
// ProcessRetryQueue will process all mails found in the retry queue or removes them when they are expired
func ProcessRetryQueue(forceRetry bool) {
// Get retry info from all messages found in the retry queue
retryQueue, err := message.GetRetryInfoFromQueue()
if err != nil {
return
}
for _, info := range retryQueue {
if info.Retries > MaxRetries {
// @TODO: We should send a message back to the user?
// Message has been retried over 10 times. It's not gonna happen.
logrus.Errorf("Message %s stuck in retry queue for too long. Giving up.", info.MsgID)
err := message.RemoveMessage(message.SectionProcessing, info.MsgID)
if err != nil {
logrus.Warnf("Cannot remove message %s from the process queue.", info.MsgID)
continue
}
}
if forceRetry || canRetryNow(info) {
err := message.MoveMessage(message.SectionRetry, message.SectionProcessing, info.MsgID)
if err != nil {
continue
}
go ProcessMessage(info.MsgID)
}
}
}
// MoveToRetryQueue moves a message (back) to retry queue and update retry info
func MoveToRetryQueue(msgID string) {
// Create or update retry information for this message
info, err := message.GetRetryInfo(message.SectionProcessing, msgID)
if err == nil {
info.Retries++
info.LastRetriedAt = time.Now()
info.RetryAt = time.Now().Add(getNextRetryDuration(info.Retries))
} else {
info = message.NewRetryInfo(msgID)
}
err = message.StoreRetryInfo(message.SectionProcessing, msgID, *info)
if err != nil {
logrus.Warnf("Cannot store retry information for message %s.", msgID)
}
// Move the message to the retry queue
err = message.MoveMessage(message.SectionProcessing, message.SectionRetry, info.MsgID)
if err != nil {
// can't move the message?
logrus.Warnf("Cannot move message %s from processing to retry queue.", msgID)
}
}
// canRetryNow returns true if we can retry the message right now
func canRetryNow(info message.RetryInfo) bool {
if info.RetryAt.Unix() < time.Now().Unix() {
return true
}
return false
}
// calculateNextRetryTime will return the next time a message can be retried again
func getNextRetryDuration(retries int) (d time.Duration) {
/* @TODO: These duration should be configurable:
*
* config:
* retries: [
* { count: 5, hold: 1 },
* { count: 17, hold: 5 },
* { count: 25, hold: 30 },
* { count: 30, hold: 60 }
* ]
*/
d = 0
switch {
case retries < 5:
d = 1 * time.Minute
case retries < 17:
d = 5 * time.Minute
case retries < 25:
d = 30 * time.Minute
case retries < 30:
d = 60 * time.Minute
}
return
}
|
package main
import (
"net"
"log"
"github.com/gobwas/ws"
)
func main() {
ln, err := net.Listen("tcp", "localhost:9999")
if err != nil {
log.Fatal(err)
}
u := ws.Upgrader{
OnHeader: func(key, value []byte) (err error) {
log.Printf("non-websocket header: %q=%q", key, value)
return
},
}
for {
conn, err := ln.Accept()
if err != nil {
// handle error
}
_, err = u.Upgrade(conn)
if err != nil {
// handle error
}
}
}
|
package rest
import (
"encoding/json"
"fmt"
"log"
"net/http"
)
func RespOk(w http.ResponseWriter, message string, data map[string]interface{}) {
w.Header().Add("Content-Type", "application/json; charset=UTF-8")
w.WriteHeader(200)
resp := Response{
Status: 200,
Message: message,
Data: data,
}
body, err := json.Marshal(resp)
if err != nil {
w.Write([]byte(`{"status":200}`))
return
}
if _, err = w.Write(body); err != nil {
log.Printf("Error writing ok to response: %v", err)
}
}
func RespNotOk(code int, w http.ResponseWriter, message string, data map[string]interface{}) {
w.WriteHeader(code)
w.Header().Add("Content-Type", "application/json")
resp := Response{
Status: code,
Message: message,
Data: data,
}
body, err := json.Marshal(resp)
if err != nil {
w.Write([]byte(fmt.Sprintf(`{"status":%d}`, code)))
return
}
if _, err = w.Write(body); err != nil {
log.Printf("Error writing not ok to response: %v", err)
}
}
|
package main
import "fmt"
type Node struct {
data int
next *Node
}
func NewNode(v int) *Node {
var n Node
n.data = v
n.next = nil
return &n
}
func PrintList(n *Node) {
t := n
for t != nil {
fmt.Println(t.data)
t = t.next
}
}
func Reverse(n *Node) *Node {
var curr *Node
var pre *Node
var next *Node
curr = n
for curr != nil {
next = curr.next
curr.next = pre
pre = curr
curr = next
}
return pre
}
func main() {
n := NewNode(1)
n.next = NewNode(2)
n.next.next = NewNode(3)
PrintList(n)
fmt.Println("-----------------")
PrintList(Reverse(n))
}
|
package main
import "fmt"
func main() {
var n int32
fmt.Scanf("%d", &n)
s := fmt.Sprintf("%b", n)
max := 0
count := 0
for _, r := range s {
if fmt.Sprintf("%s", string(r)) == "1" {
count++
} else {
count = 0
}
if count > max {
max = count
}
}
fmt.Println(max)
}
|
// +build qml
package detail
import (
"github.com/therecipe/qt/quick"
"github.com/therecipe/qt/internal/examples/sql/masterdetail_qml/controller"
)
func init() {
detailController_QmlRegisterType2("Detail", 1, 0, "DetailController")
}
type detailController struct {
quick.QQuickItem
_ func() `constructor:"init"`
//<-controller
_ func() `signal:"showImageLabel"`
_ func(profileLabelText string) `signal:"showArtistProfile"`
_ func(title string, elements []string) `signal:"showTitleAndAlbumDetails"`
}
func (d *detailController) init() {
//<-controller
controller.Instance.ConnectShowImageLabel(d.ShowImageLabel)
controller.Instance.ConnectShowArtistProfile(d.ShowArtistProfile)
controller.Instance.ConnectShowTitleAndAlbumDetails(d.ShowTitleAndAlbumDetails)
}
|
package main
import (
"encoding/json"
"fmt"
"github.com/gomodule/redigo/redis"
"log"
"net/http"
"strings"
"time"
)
type ReturnResult struct {
Success bool
Content string
}
func sayHello(w http.ResponseWriter, r *http.Request){
r.ParseForm()
for k,v := range r.Form{
fmt.Println("key", k)
fmt.Println("value", strings.Join(v, ""))
}
fmt.Fprintf(w, "hello")
}
func ServerFuncSingleInvoiceCheck(w http.ResponseWriter, r *http.Request){
r.ParseMultipartForm(32<<20)
files := r.MultipartForm.File["file"]
InvoiceFiles := CopyHttpfilesToLocalFiles(files)
returnResult :=ReturnResult{}
if len(InvoiceFiles) == 1{
result := FlowSingleInvoiceCheckThroughRedis(InvoiceFiles[0])
returnResult.Success=true
returnResult.Content=result
}else{
log.Println("SingleInvoiceCheck must upload 1 file")
returnResult.Success=false
returnResult.Content="输入不合法"
}
returnResultByte,_ :=json.Marshal(returnResult)
fmt.Fprintf(w,string(returnResultByte))
}
func ServerFuncMultiInvoiceCheck(w http.ResponseWriter, r *http.Request){
r.ParseMultipartForm(32<<20)
files := r.MultipartForm.File["file"]
InvoiceFiles := CopyHttpfilesToLocalFiles(files)
returnResult :=ReturnResult{}
if len(InvoiceFiles)>1{
result := FlowMultiInvoiceCheckThroughRedis(InvoiceFiles)
returnResult.Success=true
returnResult.Content=result
}else{
log.Println("MultiInvoiceCheck must upload more than 1 file")
returnResult.Success=false
returnResult.Content="输入不合法"
}
returnResultByte,_ :=json.Marshal(returnResult)
fmt.Fprintf(w,string(returnResultByte))
}
func ServerFuncMultiInvoiceResultQuery(w http.ResponseWriter, r *http.Request){
r.ParseMultipartForm(32<<20)
PchNumbers:=r.MultipartForm.Value["PchNumber"]
returnResult :=ReturnResult{}
if len(PchNumbers)==1{
res := FlowMultiResultQueryThroughRedis(string(PchNumbers[0]))
returnResult.Success=true
returnResult.Content=res
}else{
log.Println("Can not found the PchNumber ")
returnResult.Success=false
returnResult.Content="输入不合法"
}
returnResultByte,_ :=json.Marshal(returnResult)
fmt.Fprintf(w,string(returnResultByte))
}
func main() {
pool = &redis.Pool{
MaxIdle: 10,
IdleTimeout: 240 * time.Second,
Dial: func() (redis.Conn, error) {
return redis.Dial("tcp", "localhost:6379")
},
}
http.HandleFunc("/hello", sayHello)
http.HandleFunc("/SingleInvoiceCheck", ServerFuncSingleInvoiceCheck)
http.HandleFunc("/MultiInvoiceCheck", ServerFuncMultiInvoiceCheck)
http.HandleFunc("/MultiInvoiceResultQuery", ServerFuncMultiInvoiceResultQuery)
err :=http.ListenAndServe(":9090", nil)
if err != nil{
log.Fatal("listenAnd Serve, err")
}
}
|
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
package upgrader
import (
"log"
"github.com/hashicorp/go-multierror"
corev1 "k8s.io/api/core/v1"
)
type ConcurrentPolicy struct {
Upgrader
Concurrency int
}
type Job struct {
node corev1.Node
version string
}
type Result struct {
job Job
err error
}
func (policy ConcurrentPolicy) Run(nodes corev1.NodeList, version string) error {
jobs := make(chan Job, policy.Concurrency)
results := make(chan Result, len(nodes.Items))
for w := 0; w < policy.Concurrency; w++ {
go policy.worker(w, jobs, results)
}
for _, node := range nodes.Items {
jobs <- Job{node, version}
}
close(jobs)
var result *multierror.Error
for a := 0; a < len(nodes.Items); a++ {
r := <-results
if r.err != nil {
result = multierror.Append(result, r.err)
}
}
return result.ErrorOrNil()
}
func (policy ConcurrentPolicy) worker(id int, jobs <-chan Job, results chan<- Result) {
for j := range jobs {
log.Println("concurrent policy worker", id, "assigned to node", j.node.Name)
if err := policy.Upgrade(j.node, j.version); err != nil {
results <- Result{j, err}
}
results <- Result{j, nil}
}
}
|
package mytls
import (
"crypto/tls"
"crypto/x509"
"errors"
"google.golang.org/grpc/credentials"
"io/ioutil"
"log"
"strings"
)
func GetTLSCreds(certFile, keyFile, caDir string, isServer bool) (credentials.TransportCredentials, error) {
if !strings.HasSuffix(caDir, "/") {
caDir += "/"
}
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return nil, err
}
caCertPool := x509.NewCertPool()
caCertPoolMemCnt := 0
caDirFiles, err := ioutil.ReadDir(caDir)
if err != nil {
return nil, err
}
for _, entry := range caDirFiles {
if entry.IsDir() || !(strings.HasSuffix(entry.Name(), ".pem") ||
strings.HasSuffix(entry.Name(), ".crt")) {
continue
}
caFilePath := caDir + entry.Name()
caFile, err := ioutil.ReadFile(caFilePath)
if err != nil {
return nil, err
}
if !caCertPool.AppendCertsFromPEM(caFile) {
log.Printf("Warning: Certificate %v not added.\n", caFilePath)
} else {
caCertPoolMemCnt++
}
}
if caCertPoolMemCnt == 0 {
return nil, errors.New("no CA certificate")
}
tlsConfig := &tls.Config{
ClientCAs: caCertPool,
VerifyPeerCertificate: func(_ [][]byte, verifiedChains [][]*x509.Certificate) error {
for _, chain := range verifiedChains {
if len(chain) > 2 || len(chain) == 0 {
continue
}
return nil
}
return errors.New("invalid certificate chains")
},
Certificates: []tls.Certificate{cert},
}
if isServer {
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
tlsConfig.ClientCAs = caCertPool
} else {
tlsConfig.RootCAs = caCertPool
}
return credentials.NewTLS(tlsConfig), nil
}
|
package registerpeserta
import (
"context"
"github.com/mirzaakhena/danarisan/domain/entity"
"github.com/mirzaakhena/danarisan/domain/service"
"github.com/mirzaakhena/danarisan/usecase/registerpeserta/port"
"strings"
)
//go:generate mockery --dir port/ --name RegisterPesertaOutport -output mocks/
type registerPesertaInteractor struct {
outport port.RegisterPesertaOutport
}
// NewUsecase ...
func NewUsecase(outputPort port.RegisterPesertaOutport) port.RegisterPesertaInport {
return ®isterPesertaInteractor{
outport: outputPort,
}
}
// Execute ...
func (r *registerPesertaInteractor) Execute(ctx context.Context, req port.RegisterPesertaRequest) (*port.RegisterPesertaResponse, error) {
res := &port.RegisterPesertaResponse{}
err := service.WithTransaction(ctx, r.outport, func(ctx context.Context) error {
pesertaObj, err := entity.NewPeserta(entity.PesertaRequest{
GenerateID: func() string {
return strings.ToLower(req.PesertaID)
},
Nama: req.PesertaID,
})
if err != nil {
return err
}
err = r.outport.SavePeserta(ctx, pesertaObj)
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, err
}
return res, nil
}
|
package 字符串
import (
"bytes"
"sort"
)
// -------------- 不使用任何数据结构的版本 --------------
func isUnique(astr string) bool {
for i:=0;i<len(astr);i++{
for t:=i+1;t<len(astr);t++{
if astr[i]==astr[t]{
return false
}
}
}
return true
}
func isUnique(astr string) bool {
for i:=0;i<len(astr);i++{
if bytes.LastIndex([]byte(astr),[]byte{astr[i]})!=bytes.Index([]byte(astr),[]byte{astr[i]}){
return false
}
}
return true
}
func isUnique(astr string) bool {
bytes := []byte(astr)
sort.Slice(bytes,func(i,j int) bool{
return bytes[i]<bytes[j]
})
for i:=1;i<len(bytes);i++{
if bytes[i]==bytes[i-1]{
return false
}
}
return true
}
func isUnique(astr string) bool{
// 这个也相当于哈希,只不过是采用类似Bitmap的方式存储哈希映射关系。
hashArray := 0
for i:=0;i<len(astr);i++{
bitValueOfChar := 1<<(astr[i]-'a')
if hashArray & bitValueOfChar != 0{
return false
}
hashArray |= bitValueOfChar
}
return true
}
// -------------- 使用哈希的版本 --------------
func isUnique(astr string) bool {
countOfChar := make(map[byte]int)
for i:=0;i<len(astr);i++{
countOfChar[astr[i]]++
}
for _,count := range countOfChar{
if count>1{
return false
}
}
return true
}
/*
题目链接: https://leetcode-cn.com/problems/is-unique-lcci/submissions/
总结:
1. 不使用数据结构,还可以使用find函数、排序解决这个题目。
2. 官方有使用位运算作为底层哈希数组,这也没用到任何数据结构。 (相当于bit map)
*/
|
package main
import "fmt"
func main() {
const distance = 236000000000000000
const lightSpeed = 299792
const secondsPerDay = 86400
const daysPerYear = 365
lightYears := distance / lightSpeed / secondsPerDay / daysPerYear
fmt.Println(lightYears)
}
|
package compiler
import (
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
)
type fsLoader struct {
*Repository
abspath string
}
func (l *fsLoader) findGrammars() (files []string, err error) {
err = filepath.Walk(l.abspath,
func(path string, info os.FileInfo, err error) error {
if err == nil && isValidGrammar(path, info) {
files = append(files, path)
}
return nil
})
return
}
func (l *fsLoader) load() {
grammars, err := l.findGrammars()
if err != nil {
l.Fail(err)
return
}
for _, path := range grammars {
data, err := ioutil.ReadFile(path)
if err != nil {
l.Fail(err)
continue
}
if rel, err := filepath.Rel(l.abspath, path); err == nil {
path = rel
}
rule, unknown, err := ConvertProto(filepath.Ext(path), data)
if err != nil {
l.Fail(&ConversionError{path, err})
continue
}
if _, ok := l.Files[rule.ScopeName]; ok {
continue
}
l.AddFile(path, rule, unknown)
}
}
func gitRemoteName(path string) (string, error) {
remote, err := exec.Command("git", "-C", path, "remote", "get-url", "origin").Output()
if err != nil {
return "", err
}
return strings.TrimSpace(string(remote)), nil
}
func LoadFromFilesystem(root, src string) *Repository {
loader := fsLoader{
Repository: newRepository(src),
abspath: path.Join(root, src),
}
loader.load()
if ups, err := gitRemoteName(loader.abspath); err == nil {
loader.Repository.Upstream = ups
}
return loader.Repository
}
|
package intermediate
// All of the formats specified above are available here. It is expected that implementations use this wherever
// possible to allow for changes
const (
Sentinel = '$'
Bold = 'b'
Italic = 'i'
Underline = 'u'
Strikethrough = 's'
Reset = 'r'
Colour = 'c'
)
// String representations of intermediate runes
const (
SentinelString = string(Sentinel)
BoldString = string(Bold)
ItalicString = string(Italic)
UnderlineString = string(Underline)
StrikethroughString = string(Strikethrough)
ResetString = string(Reset)
ColourString = string(Colour)
)
// String representations of intermediate runes with a prefixed sentinel
const (
SSentinelString = SentinelString + SentinelString
SBoldString = SentinelString + BoldString
SItalicString = SentinelString + ItalicString
SUnderlineString = SentinelString + UnderlineString
SStrikethroughString = SentinelString + StrikethroughString
SResetString = SentinelString + ResetString
SColourString = SentinelString + ColourString
)
|
package main
import (
"github.com/kaleido-io/kaleido-sdk-go/cmd"
)
func main() {
cmd.Execute()
}
|
package main
import (
"fmt"
"short-url/server"
)
func main() {
// uncomment the following lines for a persistent data store (BoltDB).
// Note: I'm setting a global variable which is very bad. Again, just a quick and dirty implementation.
// There is a known bug with the `Visits` and the persistent store. It always returns 0.
//d, err := bolt.Open("shortner.db", 0600, nil)
//if err != nil {
// panic(err)
//}
//defer d.Close()
//server.MainDB = db.NewPersistent(d)
fmt.Println("listening on", server.BaseURL)
if e := server.ListenAndServe(); e != nil {
fmt.Println("ERROR:", e)
}
}
|
package hcledit_test
import (
"fmt"
"strings"
"go.mercari.io/hcledit"
)
func Example() {
src := `
resource "google_container_node_pool" "nodes1" {
name = "nodes1"
node_config {
preemptible = false
machine_type = "e2-medium"
}
timeouts {
create = "30m"
}
}
resource "google_container_node_pool" "nodes2" {
name = "nodes2"
node_config {
preemptible = false
machine_type = "e2-medium"
}
timeouts {
create = "30m"
}
}
`
// Read HCL contents.
editor, _ := hcledit.Read(strings.NewReader(src), "")
// Create new attribute on the existing block.
editor.Create("resource.google_container_node_pool.*.node_config.disk_size_gb", "200")
// Create new block and add some attributes.
editor.Create("resource.google_container_node_pool.*.master_auth", hcledit.BlockVal())
editor.Create("resource.google_container_node_pool.*.master_auth.username", "")
editor.Create("resource.google_container_node_pool.*.master_auth.password", "")
// Update existing attributes.
editor.Update("resource.google_container_node_pool.*.node_config.machine_type", "COS")
editor.Update("resource.google_container_node_pool.*.node_config.preemptible", true)
// Delete existing attribute and blocks
editor.Delete("resource.google_container_node_pool.*.timeouts")
fmt.Printf("%s", editor.Bytes())
// Output:
// resource "google_container_node_pool" "nodes1" {
// name = "nodes1"
//
// node_config {
// preemptible = true
// machine_type = "COS"
// disk_size_gb = "200"
// }
//
// master_auth {
// username = ""
// password = ""
// }
// }
//
// resource "google_container_node_pool" "nodes2" {
// name = "nodes2"
//
// node_config {
// preemptible = true
// machine_type = "COS"
// disk_size_gb = "200"
// }
//
// master_auth {
// username = ""
// password = ""
// }
// }
}
|
package webhook
import (
"bytes"
"crypto/hmac"
"crypto/sha1"
"encoding/json"
"fmt"
"hooksim/config"
"hooksim/types"
"io/ioutil"
"log"
"net/http"
"github.com/satori/go.uuid"
)
var (
// For constructing the repository json object with the same field order as github's output
repoFields = [...]string{"id", "name", "full_name", "owner", "private", "html_url", "description", "fork", "url", "forks_url",
"keys_url", "collaborators_url", "teams_url", "hooks_url", "issue_events_url", "events_url", "assignees_url",
"branches_url", "tags_url", "blobs_url", "git_tags_url", "git_refs_url", "trees_url", "statuses_url", "languages_url",
"stargazers_url", "contributors_url", "subscribers_url", "subscription_url", "commits_url", "git_commits_url", "comments_url",
"issue_comment_url", "contents_url", "compare_url", "merges_url", "archive_url", "downloads_url", "issues_url", "pulls_url",
"milestones_url", "notifications_url", "labels_url", "releases_url", "created_at", "updated_at", "pushed_at", "git_url", "ssh_url",
"clone_url", "svn_url", "homepage", "size", "stargazers_count", "watchers_count", "language", "has_issues", "has_downloads",
"has_wiki", "has_pages", "forks_count", "mirror_url", "open_issues_count", "forks", "open_issues", "watchers", "default_branch"}
)
// getRepoContent composes the repo json object of the specified owner and repo
func getRepoContent(owner, repo string, client *http.Client) string {
resp, err := client.Get(fmt.Sprintf("%s/repos/%s/%s", config.GithubAPIURL, owner, repo))
if err != nil {
log.Printf("Error in getting repo content: %v\n", err)
return "{}"
}
content, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Printf("Error in reading repo content: %v\n", err)
return "{}"
}
resp.Body.Close()
var repoMap map[string]json.RawMessage
if json.Unmarshal(content, &repoMap) != nil {
log.Printf("Error in parsing repo content: %v\n", err)
return "{}"
}
output := "{"
for k, v := range repoFields {
if k > 0 {
output += ","
}
output += fmt.Sprintf("\"%s\":%s", v, string(repoMap[v]))
}
output += "}"
return output
}
// TriggerIssueRenamedWebHook makes POST request to the end-point specified in the config file
func TriggerIssueRenamedWebHook(owner, repo string, renamedIssues []types.IssueActorPair, queryClient *http.Client) {
pairs := getWebHookURL(owner, repo, "issues")
if len(pairs) == 0 {
return
}
for _, pair := range pairs {
for _, renamedIssue := range renamedIssues {
payload := fmt.Sprintf("{\"action\":\"updated\",\"issue\":%s,\"repository\":%s,\"sender\":%s}",
string(renamedIssue.Issue),
getRepoContent(owner, repo, queryClient),
string(renamedIssue.Actor))
client := &http.Client{Transport: &http.Transport{DisableCompression: true}}
req, err := http.NewRequest("POST", pair.URL, bytes.NewReader([]byte(payload)))
if err != nil {
log.Printf("Error in creating POST request: %v\n", err)
}
req.Header.Add("User-Agent", "hooksim")
req.Header.Add("Content-Type", "application/json")
req.Header.Add("Accept", "*/*")
req.Header.Add("X-Github-Event", "issues")
req.Header.Add("X-Github-Delivery", uuid.NewV4().String())
if pair.Secret != "" {
mac := hmac.New(sha1.New, []byte(pair.Secret))
mac.Write([]byte(payload))
req.Header.Add("X-Hub-Signature", fmt.Sprintf("sha1=%x", mac.Sum(nil)))
}
resp, err := client.Do(req)
if err != nil {
log.Printf("Error in making webhook call: %v\n", err)
}
if resp.Body != nil {
resp.Body.Close()
}
}
}
}
|
//Package binarysearchtree 实现了二叉搜索树数据结构
package binarybalancetree
import (
"container/list"
"reflect"
)
//Item:树节点的数据可以是任意类型
type Item interface{}
//TreeNode:结点结构
type TreeNode struct {
Val int
Height int
Left *TreeNode
Right *TreeNode
}
type LevelOrder struct {
Val int
Height int
}
//将传入数组转换成二叉树
func arrayToBinTree(nums []Item) *TreeNode {
queue := list.New()
if reflect.TypeOf(nums[0]) == nil {
return nil
}
root := &TreeNode{nums[0].(int), 0, nil, nil}
queue.PushBack(root)
for i := 1; queue.Len() != 0; {
root := queue.Remove(queue.Front()).(*TreeNode)
if i < len(nums) {
if reflect.TypeOf(nums[i]) != nil {
root.Left = &TreeNode{nums[i].(int), 0, nil, nil}
queue.PushBack(root.Left)
}
i++
}
if i < len(nums) {
if reflect.TypeOf(nums[i]) != nil {
root.Right = &TreeNode{nums[i].(int), 0, nil, nil}
queue.PushBack(root.Right)
}
i++
}
}
return root
}
func updateHeight(root *TreeNode) {
update(root)
}
func update(root *TreeNode) int {
var HL, HR int
if root != nil {
HL = update(root.Left)
HR = update(root.Right)
root.Height = max(HL, HR) + 1
return max(HL, HR) + 1
} else {
return 0
}
}
// 层次遍历
func levelorderTraversal(root *TreeNode) []LevelOrder {
result := []LevelOrder{}
if root == nil {
return result
}
queue := list.New()
queue.PushBack(root)
for queue.Len() != 0 {
root = queue.Remove(queue.Front()).(*TreeNode)
result = append(result, LevelOrder{root.Val, root.Height})
if root.Left != nil {
queue.PushBack(root.Left)
}
if root.Right != nil {
queue.PushBack(root.Right)
}
}
return result
}
//搜索最小结点
func findMin(root *TreeNode) *TreeNode {
if root != nil {
for root.Left != nil {
root = root.Left
}
}
return root
}
//二叉树插入
func insertIntoAVL(root *TreeNode, val int) *TreeNode {
if root == nil {
root = &TreeNode{val, 0, nil, nil}
} else {
if val > root.Val {
root.Right = insertIntoAVL(root.Right, val)
} else if val < root.Val {
root.Left = insertIntoAVL(root.Left, val)
}
}
updateHeight(root)
return ajust(root)
}
//二叉树删除
func deleteNode(root *TreeNode, key int) *TreeNode {
if root == nil {
return nil
}
if key < root.Val {
root.Left = deleteNode(root.Left, key)
} else if key > root.Val {
root.Right = deleteNode(root.Right, key)
} else {
if root.Left != nil && root.Right != nil {
root.Val = findMin(root.Right).Val
root.Right = deleteNode(root.Right, root.Val)
} else {
if root.Left == nil {
root = root.Right
} else if root.Right == nil {
root = root.Left
}
}
}
updateHeight(root)
return ajust(root)
}
func leftRotate(root *TreeNode) *TreeNode {
tmp := root.Right
root.Right = tmp.Left
tmp.Left = root
root.Height = max(getHeight(root.Left), getHeight(root.Right)) + 1
tmp.Height = max(getHeight(tmp.Left), getHeight(tmp.Right)) + 1
return tmp
}
func rightThenLeftRotate(root *TreeNode) *TreeNode {
tmp := rightRotate(root.Right)
root.Right = tmp
return leftRotate(root)
}
func rightRotate(root *TreeNode) *TreeNode {
tmp := root.Left
root.Left = tmp.Right
tmp.Right = root
root.Height = max(getHeight(root.Left), getHeight(root.Right)) + 1
tmp.Height = max(getHeight(tmp.Left), getHeight(tmp.Right)) + 1
return tmp
}
func leftThenRightRotate(root *TreeNode) *TreeNode {
tmp := leftRotate(root.Left)
root.Left = tmp
return rightRotate(root)
}
func ajust(root *TreeNode) *TreeNode {
if root == nil {
return nil
}
compare := getHeight(root.Right) - getHeight(root.Left)
if compare == 2 {
if getHeight(root.Right.Right) > getHeight(root.Right.Left) {
root = leftRotate(root)
} else {
root = rightThenLeftRotate(root)
}
} else if compare == -2 {
if getHeight(root.Left.Left) > getHeight(root.Left.Right) {
root = rightRotate(root)
} else {
root = leftThenRightRotate(root)
}
}
return root
}
func getHeight(root *TreeNode) int {
if root == nil {
return 0
}
return root.Height
}
func max(a int, b int) int {
if a > b {
return a
} else {
return b
}
}
|
package main
import "fmt"
// 1. To satisfy an interface you have to use methods not func
// 2. Create methods with exact same signature as of in interface
// 3. Create all methods in struct as specified in interface. Or your struct should satisfy all of the methods of an interface
type Shape interface {
Area() float64
Perimeter() float64
}
type Rect struct {
width float64
height float64
}
func (r Rect) Area() float64 {
return r.height * r.width
}
func (r Rect) Perimeter() float64 {
return 2 * (r.height + r.width)
}
func (r Rect) Hello() {
fmt.Println("Hello")
}
func main() {
var s Shape
r := Rect{
width: 10,
height: 10,
}
s = r
fmt.Println("Area=", s.Area())
fmt.Println("Perm=", s.Perimeter())
fmt.Printf("%T", s)
}
|
package nv4
import (
"context"
address "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
states0 "github.com/filecoin-project/specs-actors/actors/states"
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
cid "github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
"golang.org/x/xerrors"
power2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/power"
adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt"
smoothing2 "github.com/filecoin-project/specs-actors/v2/actors/util/smoothing"
)
type powerMigrator struct {
actorsIn *states0.Tree
powerUpdates *PowerUpdates
}
func (m powerMigrator) MigrateState(ctx context.Context, store cbor.IpldStore, head cid.Cid, info MigrationInfo) (*StateMigrationResult, error) {
var inState power0.State
if err := store.Get(ctx, head, &inState); err != nil {
return nil, err
}
cronEventsRoot, err := m.updateCronEvents(ctx, store, inState.CronEventQueue, m.powerUpdates)
if err != nil {
return nil, xerrors.Errorf("could not update cron events: %w", err)
}
cronEventsRoot, err = m.migrateCronEvents(ctx, store, cronEventsRoot)
if err != nil {
return nil, xerrors.Errorf("cron events: %w", err)
}
claimsRoot, err := m.updateClaims(ctx, store, inState.Claims, m.powerUpdates)
if err != nil {
return nil, xerrors.Errorf("claims: %w", err)
}
claimsRoot, err = m.migrateClaims(ctx, store, claimsRoot)
if err != nil {
return nil, xerrors.Errorf("claims: %w", err)
}
outState := power2.State{
TotalRawBytePower: inState.TotalRawBytePower,
TotalBytesCommitted: inState.TotalBytesCommitted,
TotalQualityAdjPower: inState.TotalQualityAdjPower,
TotalQABytesCommitted: inState.TotalQABytesCommitted,
TotalPledgeCollateral: inState.TotalPledgeCollateral,
ThisEpochRawBytePower: inState.ThisEpochRawBytePower,
ThisEpochQualityAdjPower: inState.ThisEpochQualityAdjPower,
ThisEpochPledgeCollateral: inState.ThisEpochPledgeCollateral,
ThisEpochQAPowerSmoothed: smoothing2.FilterEstimate(*inState.ThisEpochQAPowerSmoothed),
MinerCount: inState.MinerCount,
MinerAboveMinPowerCount: inState.MinerAboveMinPowerCount,
CronEventQueue: cronEventsRoot,
FirstCronEpoch: inState.FirstCronEpoch,
Claims: claimsRoot,
ProofValidationBatch: nil, // Set nil at the end of every epoch in cron handler
}
newHead, err := store.Put(ctx, &outState)
return &StateMigrationResult{
NewHead: newHead,
Transfer: big.Zero(),
}, err
}
func (m *powerMigrator) updateCronEvents(ctx context.Context, store cbor.IpldStore, cronRoot cid.Cid, powerUpdates *PowerUpdates) (cid.Cid, error) {
crons, err := adt0.AsMultimap(adt0.WrapStore(ctx, store), cronRoot)
if err != nil {
return cid.Undef, err
}
for epoch, cronEvents := range powerUpdates.crons { // nolint:nomaprange
for _, event := range cronEvents {
if err := crons.Add(abi.IntKey(int64(epoch)), &event); err != nil {
return cid.Undef, err
}
}
}
return crons.Root()
}
func (m *powerMigrator) migrateCronEvents(ctx context.Context, store cbor.IpldStore, root cid.Cid) (cid.Cid, error) {
// The HAMT has changed, but the value (an AMT[CronEvent] root) is identical.
// The AMT queues may contain miner0.CronEventWorkerKeyChange, but these will be ignored by the miner
// actor so are safe to leave behind.
var _ = power0.CronEvent(power2.CronEvent{})
return migrateHAMTRaw(ctx, store, root)
}
func (m *powerMigrator) updateClaims(ctx context.Context, store cbor.IpldStore, root cid.Cid, updates *PowerUpdates) (cid.Cid, error) {
claims, err := adt0.AsMap(adt0.WrapStore(ctx, store), root)
if err != nil {
return cid.Undef, err
}
for addr, claim := range updates.claims { // nolint:nomaprange
if err := claims.Put(abi.AddrKey(addr), &claim); err != nil {
return cid.Undef, err
}
}
return claims.Root()
}
func (m *powerMigrator) migrateClaims(ctx context.Context, store cbor.IpldStore, root cid.Cid) (cid.Cid, error) {
inMap, err := adt0.AsMap(adt0.WrapStore(ctx, store), root)
if err != nil {
return cid.Undef, err
}
outMap := adt2.MakeEmptyMap(adt2.WrapStore(ctx, store))
var inClaim power0.Claim
if err = inMap.ForEach(&inClaim, func(key string) error {
// look up seal proof type from miner actor
a, err := address.NewFromBytes([]byte(key))
if err != nil {
return err
}
minerActor, found, err := m.actorsIn.GetActor(address.Address(a))
if err != nil {
return err
}
if !found {
return xerrors.Errorf("claim exists for miner %s but miner not in state tree", a)
}
var minerState miner0.State
if err := store.Get(ctx, minerActor.Head, &minerState); err != nil {
return err
}
info, err := minerState.GetInfo(adt0.WrapStore(ctx, store))
if err != nil {
return err
}
outClaim := power2.Claim{
SealProofType: info.SealProofType,
RawBytePower: inClaim.RawBytePower,
QualityAdjPower: inClaim.QualityAdjPower,
}
return outMap.Put(StringKey(key), &outClaim)
}); err != nil {
return cid.Undef, err
}
return outMap.Root()
}
|
package constants
import "path/filepath"
const (
OcBinaryName = "oc"
TrayBinaryName = "CodeReady Containers.app"
)
var (
TrayBinaryPath = filepath.Join(CrcBinDir, TrayBinaryName)
)
|
package main
import (
"strconv"
"github.com/gin-gonic/gin"
"github.com/rossifedericoe/bootcamp/apirest/dto"
"github.com/rossifedericoe/bootcamp/apirest/services/movieService"
)
func main() {
engine := gin.Default()
engine.GET("/movies", func(context *gin.Context) {
context.JSON(200, movieService.ListarMovies())
})
engine.GET("/movies/populares", func(context *gin.Context) {
context.JSON(200, movieService.ListarMoviesPopulares())
})
engine.POST("/movies", func(context *gin.Context) {
var movieDTO dto.MovieDTO
bindErr := context.BindJSON(&movieDTO)
if bindErr != nil {
context.JSON(400, bindErr.Error())
return
}
movieCreada, crearErr := movieService.Crear(movieDTO.Title, movieDTO.Language, movieDTO.Budget, movieDTO.Revenue, movieDTO.IMDB)
if crearErr != nil {
context.JSON(400, crearErr.Error())
return
}
context.JSON(200, movieCreada)
})
engine.DELETE("/movies/:id", func(context *gin.Context) {
id, _ := strconv.Atoi(context.Param("id"))
eliminarErr := movieService.EliminarMovie(id)
if eliminarErr != nil {
context.JSON(404, eliminarErr.Error())
return
}
context.JSON(200, "pudo borrarse")
})
// Levanta en localhost:8080
engine.Run()
}
|
package mytest
import (
"fmt"
"reflect"
"strconv"
"testing"
"time"
)
type PaymentInfoResponse struct {
MsgID string `json:"msg_id"`
CardNumber string `json:"card_number"`
}
func TestFiledName(t *testing.T) {
p := &PaymentInfoResponse{}
typ := reflect.TypeOf(p)
elem := typ.Elem()
totalFields := elem.NumField()
for index := 0; index < totalFields; index++ {
field := elem.Field(index)
// extract any with sql-col
tagDbColumn := field.Tag.Get("json")
if tagDbColumn == "" {
continue
}
fmt.Println(tagDbColumn, " - ", field.Name)
}
val := reflect.Indirect(reflect.ValueOf(p))
fmt.Println(val.Type().Field(0).Name)
}
func TestNil(t *testing.T) {
mapt := map[string]string{}
fmt.Println(mapt == nil)
}
func TestAssert(t *testing.T) {
var typeA interface{}
typeA = nil
v, ok := typeA.(string)
fmt.Println(ok)
fmt.Println(v)
}
func TestNilBytes(t *testing.T) {
var bs []byte
fmt.Println(bs == nil)
sbs := string(bs)
fmt.Println(sbs)
fmt.Println(sbs == "")
fmt.Println(len(sbs))
}
func TestPointer(t *testing.T) {
p := &PaymentInfoResponse{
MsgID: "123",
CardNumber: "345",
}
fmt.Println(fmt.Sprintf("intial value: %+v", p))
stopChan := make(chan bool)
go keepPrint(stopChan, *p)
for i := 0; i < 10; i++ {
p.CardNumber = strconv.Itoa(i)
time.Sleep(10 * time.Millisecond)
}
stopChan <- true
}
func keepPrint(stopChan chan bool, val PaymentInfoResponse) {
for {
select {
case <-stopChan:
fmt.Println("stopped")
return
default:
fmt.Println(fmt.Sprintf("echo: %+v", val))
time.Sleep(10 * time.Millisecond)
}
}
}
|
package sessions_test
import (
"crypto/rand"
"encoding/hex"
"net/http"
"net/http/httptest"
"sync"
"testing"
"time"
"github.com/go-http-utils/cookie"
"github.com/go-http-utils/cookie-session"
"github.com/stretchr/testify/assert"
)
var (
username = "mushroom"
useage int64 = 99
secondUserName = "mushroomnew"
secondUsage int64 = 100
store = sessions.NewMemoryStore()
)
func TestMemoryStore(t *testing.T) {
SessionName := "teambition"
NewSessionName := "teambition-new"
SessionKeys := []string{"keyxxx"}
t.Run("Sessions use default options that should be", func(t *testing.T) {
assert := assert.New(t)
req, err := http.NewRequest("GET", "/", nil)
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
session := &Session{Meta: &sessions.Meta{}}
store.Load(SessionName, session, cookie.New(w, r, SessionKeys...))
session.Name = username
session.Age = useage
err = session.Save()
assert.Nil(err)
assert.True(session.IsNew())
assert.True(session.GetSID() == "")
})
handler.ServeHTTP(recorder, req)
//====== reuse session =====
req, err = http.NewRequest("GET", "/", nil)
migrateCookies(recorder, req)
handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
session := &Session{Meta: &sessions.Meta{}}
store.Load(SessionName, session, cookie.New(w, r, SessionKeys...))
assert.Equal(username, session.Name)
assert.Equal(int64(useage), session.Age)
assert.False(session.IsNew())
assert.True(session.GetSID() != "")
assert.Nil(session.Destroy())
})
recorder = httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
//====== destroy session=====
req, err = http.NewRequest("GET", "/", nil)
migrateCookies(recorder, req)
handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
session := &Session{Meta: &sessions.Meta{}}
err := store.Load(SessionName, session, cookie.New(w, r, SessionKeys...))
assert.NotNil(err)
assert.True(session.IsNew())
})
recorder = httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
})
t.Run("Sessions with sign session that should be", func(t *testing.T) {
assert := assert.New(t)
recorder := httptest.NewRecorder()
req, _ := http.NewRequest("GET", "/", nil)
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
session := &Session{Meta: &sessions.Meta{}}
store.Load(SessionName, session, cookie.New(w, r, SessionKeys...))
session.Name = username
session.Age = useage
session.Save()
session = &Session{Meta: &sessions.Meta{}}
store.Load(NewSessionName, session, cookie.New(w, r, SessionKeys...))
session.Name = secondUserName
session.Age = secondUsage
session.Save()
})
handler.ServeHTTP(recorder, req)
//====== reuse session =====
req, _ = http.NewRequest("GET", "/", nil)
migrateCookies(recorder, req)
handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
session := &Session{Meta: &sessions.Meta{}}
store.Load(SessionName, session, cookie.New(w, r, SessionKeys...))
assert.Equal(username, session.Name)
assert.Equal(useage, session.Age)
session = &Session{Meta: &sessions.Meta{}}
store.Load(NewSessionName, session, cookie.New(w, r, SessionKeys...))
assert.Equal(secondUserName, session.Name)
assert.Equal(secondUsage, session.Age)
})
recorder = httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
})
t.Run("Sessions with Name() and Store() that should be", func(t *testing.T) {
assert := assert.New(t)
recorder := httptest.NewRecorder()
req, _ := http.NewRequest("GET", "/", nil)
store := sessions.NewMemoryStore(&sessions.Options{
Path: "xxx.com",
HTTPOnly: true,
MaxAge: 64,
Domain: "ttt.com",
Secure: true,
})
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
session := &Session{Meta: &sessions.Meta{}}
store.Load(SessionName, session, cookie.New(w, r, SessionKeys...))
session.Name = username
session.Age = useage
session.Save()
assert.Equal(SessionName, session.GetName())
assert.NotNil(session.GetStore())
})
handler.ServeHTTP(recorder, req)
cookies, _ := getCookie(SessionName, recorder)
assert.Equal("ttt.com", cookies.Domain)
assert.Equal("xxx.com", cookies.Path)
assert.Equal(true, cookies.HttpOnly)
assert.Equal(64, cookies.MaxAge)
assert.Equal(true, cookies.Secure)
})
t.Run("Sessions donn't override old value when seting same value that should be", func(t *testing.T) {
assert := assert.New(t)
req, err := http.NewRequest("GET", "/", nil)
assert.Nil(err)
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
session := &Session{Meta: &sessions.Meta{}}
store.Load(SessionName, session, cookie.New(w, r, SessionKeys...))
session.Name = username
session.Age = useage
session.Save()
})
handler.ServeHTTP(recorder, req)
//====== reuse session =====
req, err = http.NewRequest("GET", "/", nil)
migrateCookies(recorder, req)
handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
session := &Session{Meta: &sessions.Meta{}}
store.Load(SessionName, session, cookie.New(w, r, SessionKeys...))
session.Name = username
session.Age = useage
session.Save()
})
handler.ServeHTTP(recorder, req)
})
t.Run("Sessions with high goroutine should be", func(t *testing.T) {
assert := assert.New(t)
req, err := http.NewRequest("GET", "/", nil)
assert.Nil(err)
recorder := httptest.NewRecorder()
store := sessions.NewMemoryStore(&sessions.Options{
Path: "xxx.com",
HTTPOnly: true,
MaxAge: 2,
Domain: "ttt.com",
Secure: true,
})
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
session := &Session{Meta: &sessions.Meta{}}
store.Load(SessionName, session, cookie.New(w, r, SessionKeys...))
session.Name = username
session.Age = useage
session.Save()
var wg sync.WaitGroup
wg.Add(10000)
for i := 0; i < 10000; i++ {
go func() {
newid := genID()
sess := &Session{Meta: &sessions.Meta{}}
store.Load(newid, sess, cookie.New(w, r, SessionKeys...))
sess.Name = username
sess.Age = useage
sess.Save()
wg.Done()
}()
}
wg.Wait()
})
handler.ServeHTTP(recorder, req)
time.Sleep(time.Second * 3)
assert.Equal(0, store.Len())
//====== reuse session =====
req, err = http.NewRequest("GET", "/", nil)
migrateCookies(recorder, req)
handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
session := &Session{Meta: &sessions.Meta{}}
store.Load(SessionName, session, cookie.New(w, r, SessionKeys...))
assert.Equal("", session.Name)
assert.Equal(int64(0), session.Age)
})
handler.ServeHTTP(recorder, req)
store.Close()
})
}
func genID() string {
buf := make([]byte, 12)
_, err := rand.Read(buf)
if err != nil {
panic(err)
}
return hex.EncodeToString(buf)
}
|
package main
import (
"fmt"
"io/ioutil"
"log"
"os"
cli "github.com/jawher/mow.cli"
"github.com/karantin2020/gitcomm"
"github.com/karantin2020/gitcomm/version"
)
func main() {
app := cli.App("gitcomm", "Automate git commit messaging\n"+
"\nSource https://github.com/karantin2020/gitcomm")
app.Version("V version", version.BuildDetails())
app.Spec = "[-v] [-Ast] | [-u]"
var (
// declare the -r flag as a boolean flag
addFiles = app.BoolOpt("A addAll", false, "Adds, modifies, and removes index entries "+
"to match the working tree. Evals `git add -A`")
verbose = app.BoolOpt("v verbose", false, "Switch log output")
show = app.BoolOpt("s show", false, "Show last commit or not. "+
"Evals `git show -s` in the end of execution")
tag = app.BoolOpt("t tag", false, "Create an annonated tag for the next logical version")
undo = app.BoolOpt("u undo", false, "Revert last commit")
)
// Specify the action to execute when the app is invoked correctly
app.Action = func() {
if !*verbose {
log.SetFlags(0)
log.SetOutput(ioutil.Discard)
}
if !gitcomm.CheckIsGitDir() {
fmt.Println("Current directory is not inside git worktree")
os.Exit(1)
}
if *undo {
if gitcomm.PromptConfirm("Revert last commit?") {
gitcomm.UndoLastCommit()
}
os.Exit(0)
}
if gitcomm.CheckForUncommited() {
log.Printf("there are new changes in working directory\n")
msg := gitcomm.Prompt()
gitcomm.GitExec(*addFiles, *show, msg)
} else {
log.Printf("nothing to commit, working tree clean\n")
}
if *tag {
level := gitcomm.TagPrompt()
gitcomm.AutoTag(level)
}
}
// Invoke the app passing in os.Args
app.Run(os.Args)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.