text
stringlengths 11
4.05M
|
|---|
/* main function */
/* file name: webcmd.go */
/* link: */
/* */
/* update: 20181122 */
package webdata
import (
"encoding/hex"
"fmt"
"log"
"loranet20181205/database"
"loranet20181205/exception"
"net"
_ "github.com/go-sql-driver/mysql"
)
func WebCmd(activeStatus int) {
// var id int
// var activeStatus int
// var DevEUI string
webcmddata := make([]byte, 11)
webcmddata[0] = 0xFD
webcmddata[1] = 0xC0
webcmddata[6] = 0x11
webcmddata[7] = 0x01
webcmddata[8] = 0x02
LoraServerIP, LoraServerPort := database.DbGetServerIP()
//conn, err := net.Dial("udp", "192.168.10.90:7878")
//conn, err := net.Dial("udp", "172.16.10.16:7878")
var IPstring string
IPstring = fmt.Sprintf("%s:%d", LoraServerIP, LoraServerPort)
conn, err := net.Dial("udp", IPstring)
defer conn.Close()
exception.CheckError(err)
// Create Connection
edInfo := database.ActiveEdInfo(activeStatus)
// Query
//init delay 29 seconds (26+3)
webcmddata[9] = 26
// for rows.Next() {
// err = rows.Scan(&id, &activeStatus, &DevEUI)
// exception.CheckError(err)
for r := range edInfo {
log.Print(edInfo[r])
decoded, err := hex.DecodeString(edInfo[r].DevEUI)
exception.CheckError(err)
//dev_addr
webcmddata[2] = decoded[4]
webcmddata[3] = decoded[3]
webcmddata[4] = decoded[2]
webcmddata[5] = decoded[0]
//depand on communication speed 3 second per command
webcmddata[9] += 3
//check sum
webcmddata[10] = 0xFD
for i := 1; i < 10; i++ {
webcmddata[10] += webcmddata[i]
}
//fmt.Printf("HEX: %X", webcmddata)
// schedule the data
conn.Write([]byte(webcmddata))
// fmt.Println(edInfo[r].ID)
// fmt.Println(edInfo[r].ActiveStatus)
// fmt.Println(edInfo[r].DevEUI)
// log.Println("---------------")
// log.Println(webcmddata)
}
}
|
package main
import (
"github.com/tiagorlampert/CHAOS/client/app"
"github.com/tiagorlampert/CHAOS/client/app/environment"
"github.com/tiagorlampert/CHAOS/client/app/ui"
)
var (
Version = "dev"
Port = ""
ServerAddress = ""
Token = ""
)
func main() {
ui.ShowMenu(Version, ServerAddress, Port)
app.New(environment.Load(ServerAddress, Port, Token)).Run()
}
|
package tests
import "testing"
func TestDirectoryListing(t *testing.T) {
testBuilder(t, defaultBuilder, "/directory-listing")
testBuilder(t, defaultBuilder, "/directory-listing/Files")
}
|
package main
import (
"context"
"encoding/json"
"flag"
"log"
"github.com/zaynjarvis/fyp/config/api"
"google.golang.org/grpc"
)
type Config struct {
Name string
Version int
Desc string
}
func main() {
var (
cfg = Config{Name: "service-name", Version: 1, Desc: "hello world"}
name = "default"
version uint32 = 1
addr = flag.String("addr", "localhost:3700", "address of config center")
)
conn, err := grpc.Dial(*addr, grpc.WithInsecure())
if err != nil {
panic(err)
}
client := api.NewConfigCenterClient(conn)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
data, err := json.Marshal(cfg)
if err != nil {
panic(err)
}
res, err := client.Set(ctx, &api.ServiceConfig{Name: name, Version: version, Config: data})
if err != nil {
panic(err)
}
log.Println(res)
}
|
package mapreduce
import (
"fmt"
)
func handleTask(worker string, args DoTaskArgs, successWorkerChan chan string, failWorkerChan chan string, taskChan chan DoTaskArgs) {
success := call(worker, "Worker.DoTask", &args, nil)
if success {
successWorkerChan <- worker
} else {
fmt.Println(worker, "executing task", args.TaskNumber, "has failed")
taskChan <- args
failWorkerChan <- worker
}
}
//
// schedule() starts and waits for all tasks in the given phase (mapPhase
// or reducePhase). the mapFiles argument holds the names of the files that
// are the inputs to the map phase, one per map task. nReduce is the
// number of reduce tasks. the registerChan argument yields a stream
// of registered workers; each item is the worker's RPC address,
// suitable for passing to call(). registerChan will yield all
// existing registered workers (if any) and new ones as they register.
//
func schedule(jobName string, mapFiles []string, nReduce int, phase jobPhase, registerChan chan string) {
var ntasks int
var n_other int // number of inputs (for reduce) or outputs (for map)
switch phase {
case mapPhase:
ntasks = len(mapFiles)
n_other = nReduce
case reducePhase:
ntasks = nReduce
n_other = len(mapFiles)
}
fmt.Printf("Schedule: %v %v tasks (%d I/Os)\n", ntasks, phase, n_other)
// All ntasks tasks have to be scheduled on workers. Once all tasks
// have completed successfully, schedule() should return.
// scheduling policy: every worker's goroutine has a channel; when a worker can be assigned work, a new goroutine is created to schedule the work, wait for a response, and then write to the worker's channel that it is done
// select on:
// registerChan (new worker):
// add worker to workers list
// assign work to worker
// workerChannels (each worker):
taskChan := make(chan DoTaskArgs, ntasks)
go func() {
for i := 0; i < ntasks; i++ {
var args DoTaskArgs
args.JobName = jobName
if phase == mapPhase {
args.File = mapFiles[i]
}
args.Phase = phase
args.TaskNumber = i
args.NumOtherPhase = n_other
taskChan <- args
}
}()
successWorkerChan := make(chan string, 11)
failWorkerChan := make(chan string, 10)
tasksRemaining := ntasks
tasksOutstanding := 0
workerFree := make(map[string]bool)
// assumption: once a worker dies, don't want to assign any more work to that worker
for tasksRemaining > 0 {
// fmt.Println(tasksRemaining, "tasks remaining")
select {
case newWorker := <-registerChan:
workerFree[newWorker] = true
if tasksRemaining-tasksOutstanding > 0 {
nextTask := <-taskChan
tasksOutstanding = tasksOutstanding + 1
workerFree[newWorker] = false
go handleTask(newWorker, nextTask, successWorkerChan, failWorkerChan, taskChan)
}
case nextWorker := <-successWorkerChan:
tasksOutstanding = tasksOutstanding - 1
tasksRemaining = tasksRemaining - 1
// even if there are tasks remaining, they may have already been scheduled
if tasksRemaining-tasksOutstanding > 0 {
nextTask := <-taskChan
tasksOutstanding = tasksOutstanding + 1
go handleTask(nextWorker, nextTask, successWorkerChan, failWorkerChan, taskChan)
} else {
workerFree[nextWorker] = true
}
case failedWorker := <-failWorkerChan:
delete(workerFree, failedWorker)
tasksOutstanding = tasksOutstanding - 1
// if no outstanding tasks, then need to assign to worker; if no outstanding workers, wait for success from existing worker or for new worker
for worker, isFree := range workerFree {
if isFree {
workerFree[worker] = false
nextTask := <-taskChan
tasksOutstanding = tasksOutstanding + 1
go handleTask(worker, nextTask, successWorkerChan, failWorkerChan, taskChan)
break
}
}
}
}
fmt.Printf("Schedule: %v done\n", phase)
}
|
package main
//p173
import (
"fmt"
"math/rand"
"time"
)
func main() {
var arr [10]int
for i := 0; i < 10; i++ {
rand.Seed(time.Now().UnixNano())
var num int = rand.Intn(100)
fmt.Println(num)
time.Sleep(100 * time.Millisecond)
arr[i] = num + 1
}
fmt.Println(arr)
for index, value := range arr {
if value == 55 {
fmt.Println("yes", index)
break
}
}
}
// fmt.Println(shuzu)
// for i := len(shuzu)-1 ; i >= 0;i-- {
// fmt.Println(shuzu[i])
//for k:=len(arr)-1;k>0 ;k-- {
// for j:=0;j<len(arr)-1;j++{
// if (arr)[j] < (arr)[j+1]{
// var t int
// t = (arr)[j+1]
// (arr)[j+1] = (arr)[j]
// (arr)[j] = t
// }
// }
//}
//p197
|
package main
import (
"fmt"
)
func main() {
tamanhodocansaço := 2
switch {
case tamanhodocansaço == 0:
fmt.Println("que malandragem")
case tamanhodocansaço == 1:
fmt.Println("uma gelada ia bem")
case tamanhodocansaço == 2:
fmt.Println("ih já era, só nascendo denovo")
}
}
|
package _334_Increasing_Triplet_Subsequence
import (
"testing"
)
type testCase struct {
input []int
output bool
}
func TestIncreasingTriplet(t *testing.T) {
cases := []testCase{
{
input: []int{1, 2, 3, 4, 5},
output: true,
},
{
input: []int{5, 4, 3, 2, 1},
output: false,
},
{
input: []int{2, 1, 5, 0, 4, 6},
output: true,
},
}
for _, c := range cases {
if x := increasingTriplet(c.input); x != c.output {
t.Errorf("output should be \"%v\" instead of \"%v\" with input=\"%v\"", c.output, x, c.input)
}
}
}
|
package config
type Log struct {
File string `yaml:"file,omitempty"`
Level string `yaml:"level,omitempty"`
Formatter string `yaml:"formatter,omitempty"`
}
|
package etcd
import (
"context"
"fmt"
"github.com/coreos/etcd/clientv3"
"mall_server/store"
"os"
"os/signal"
"strings"
"syscall"
"time"
)
type Wiper struct {
client *clientv3.Client
}
func NewClient() (*Wiper, error) {
cli, err := clientv3.New(clientv3.Config{
Endpoints: strings.Split(conf.Get().Etcd.Addr, ","),
DialTimeout: 15 * time.Second,
})
if err != nil {
return nil, err
}
return &Wiper{client: cli}, nil
}
var log = new(store.Log)
var conf = new(store.Config)
var v = fmt.Sprintf("/%s/%s/%s", conf.Get().Etcd.Schema, conf.Get().Etcd.Name, conf.Get().Etcd.Key)
func Register() {
client, err := NewClient()
if err != nil {
log.Get().Error("connect to etcd err:%v", err)
return
}
client.KeepAlive()
ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGTERM, syscall.SIGINT, syscall.SIGKILL, syscall.SIGHUP, syscall.SIGQUIT)
go func() {
sig := <-ch
client.UnRegister(conf.Get().Etcd.Name, conf.Get().Etcd.Key)
if i, ok := sig.(syscall.Signal); ok {
os.Exit(int(i))
} else {
os.Exit(0)
}
}()
log.Get().Info("etcd connect success!!!")
}
func (s *Wiper) GetClient() *clientv3.Client {
return s.client
}
// Register 注册地址到ETCD组件中 使用 ; 分割
func (s *Wiper) KeepAlive() {
ticker := time.NewTicker(time.Second * time.Duration(conf.Get().Etcd.Ttl))
go func() {
for {
getResp, err := s.client.Get(context.Background(), v)
if err != nil {
log.Get().Error("cli getResp err:%+v\n %v", getResp, err)
continue
} else if getResp.Count == 0 {
leaseResp, err := s.client.Grant(context.Background(), conf.Get().Etcd.Ttl)
if err != nil {
log.Get().Error("etcd client.Grant error:%v", err)
continue
}
_, err = s.client.Put(context.Background(), v, conf.Get().Etcd.Key, clientv3.WithLease(leaseResp.ID))
if err != nil {
log.Get().Error("etcd s.client.Put error:%v", err)
continue
}
ch, err := s.client.KeepAlive(context.Background(), leaseResp.ID)
if err != nil {
log.Get().Error("etcd s.client.KeepAlive err:%v", err)
continue
}
// 清空 keep alive 返回的channel
go func() {
for {
<-ch
}
}()
}
<-ticker.C
}
}()
}
// UnRegister remove service from etcd
func (s *Wiper) UnRegister(name string, addr string) {
s.client.Delete(context.Background(), v)
}
|
package opconf
// Default operation configuration
const defaultConfig = `0x10 BIPUSH byte
0x59 DUP
0xA7 GOTO label
0x60 IADD
0x7E IAND
0x99 IFEQ label
0x9B IFLT label
0x9F IF_ICMPEQ label
0x84 IINC var byte
0x15 ILOAD var
0xB6 INVOKEVIRTUAL method
0xB0 IOR
0xAC IRETURN
0x36 ISTORE var
0x64 ISUB
0x13 LDC_W constant
0x00 NOP
0x57 POP
0x5F SWAP
0xC4 WIDE
0xFF HALT
0xFE ERR
0xFD OUT
0xFC IN`
|
package main
import (
"encoding/json"
"errors"
"strconv"
"github.com/clearmatics/autonity/rlp"
"github.com/ethereum/go-ethereum/common"
"github.com/hyperledger/fabric/core/chaincode/shim"
)
// unmarshallState receives state in byte form and returns unmarshalled struct
func unmarshallState(state []byte) (Shares, error) {
// Unmarshall returned state into struct
var shares Shares
err := json.Unmarshal(state, &shares)
if err != nil {
return shares, errors.New("Failed unmarshall state")
}
return shares, nil
}
// marshallState receives state struct and returns byte format json state
func marshallState(shares Shares) ([]byte, error) {
newState, err := json.MarshalIndent(shares, "", " ")
if err != nil {
return nil, err
}
return newState, nil
}
// unmarshallOrderBook receives state in byte form and returns unmarshalled struct
func unmarshallOrderBook(state []byte) (OrderStruct, error) {
// Unmarshall returned state into struct
var orders OrderStruct
err := json.Unmarshal(state, &orders)
if err != nil {
return orders, errors.New("Failed unmarshall state")
}
return orders, nil
}
// marshallState receives state struct and returns byte format json state
func marshallOrderBook(orders OrderStruct) ([]byte, error) {
output, err := json.MarshalIndent(orders, "", " ")
if err != nil {
return nil, err
}
return output, nil
}
// marshallTrades receives state struct and returns byte format json state
func marshallTrades(trades Trades) ([]byte, error) {
output, err := json.MarshalIndent(trades, "", " ")
if err != nil {
return nil, err
}
return output, nil
}
//
func newOrder(args []string) (OpenOrder, error) {
// Create new order from input args
var order OpenOrder
order.Organisation = args[1]
// identify the order type
if args[2] == "Buy" {
order.BuySell = "Buy"
order.Address = common.HexToAddress(args[5])
} else if args[2] == "Sell" {
order.BuySell = "Sell"
order.Address = common.HexToAddress(args[5])
} else {
return order, errors.New("Incorrect Buy/Sell Flag")
}
// Retrieve amount
amount, err := strconv.Atoi(args[3])
if err != nil {
return order, errors.New("Amount is not integer")
}
order.Amount = uint(amount)
// Retrieve price
amount, err = strconv.Atoi(args[4])
if err != nil {
return order, errors.New("Price is not integer")
}
order.Price = uint(amount)
// Retrieve reference
order.Ref = args[6]
return order, nil
}
// matchOrder reviews order book to see if a matching order is included
func matchOrder(orderBook OrderStruct, newOrder OpenOrder) OpenOrder {
var matchedOrder OpenOrder
// Match order
for i := range orderBook.Open {
// if orderBook.Open[i].Organisation == newOrder.Organisation {
if checkDetails(orderBook.Open[i], newOrder) {
matchedOrder = orderBook.Open[i]
return matchedOrder
}
}
return matchedOrder
}
// orderMatched
func orderMatched(orderBook OrderStruct, matchedOrder OpenOrder, newOrder OpenOrder) OrderStruct {
// Add matched order to section in db
var newMatched MatchedOrder
newMatched.Organisation = matchedOrder.Organisation
newMatched.Amount = matchedOrder.Amount
newMatched.Price = matchedOrder.Price
newMatched.Ref = matchedOrder.Ref
// Determine seller and buyer
if newOrder.BuySell == "Buy" {
newMatched.Recv = newOrder.Address
newMatched.Send = matchedOrder.Address
} else {
newMatched.Send = newOrder.Address
newMatched.Recv = matchedOrder.Address
}
orderBook.Matched = append(orderBook.Matched, newMatched)
// Remove original order from orderBook
for i := range orderBook.Open {
if orderBook.Open[i] == matchedOrder {
orderBook.Open = append(orderBook.Open[:i], orderBook.Open[i+1:]...)
}
}
return orderBook
}
// checkDetails makes sure that orders have corresponding values
func checkDetails(orderA OpenOrder, orderB OpenOrder) bool {
var tmpA OpenOrder
var tmpB OpenOrder
// Create new temp value
tmpA = orderA
tmpB = orderB
// ensure that orders are not both sell/buy orders
if tmpA.BuySell == tmpB.BuySell {
return false
}
// Reset values that we do not wish to compare
tmpA.BuySell = tmpB.BuySell
tmpA.Address = tmpB.Address
tmpA.Ref = tmpB.Ref
if tmpA == tmpB {
return true
}
return false
}
func retrieveOrderBook(stub shim.ChaincodeStubInterface) (OrderStruct, error) {
var orderBook OrderStruct
// Retrieve order details
state, err := stub.GetState("orders")
if err != nil {
return orderBook, errors.New("Failed to get state")
}
err = rlp.DecodeBytes(state, &orderBook)
if err != nil {
return orderBook, errors.New("Failed decode rlp bytes")
}
return orderBook, nil
}
func retrieveShareListings(stub shim.ChaincodeStubInterface) (Shares, error) {
var listings Shares
// Retrieve share listings
state, err := stub.GetState("shares")
if err != nil {
return listings, errors.New("Failed to get state")
}
err = rlp.DecodeBytes(state, &listings)
if err != nil {
return listings, errors.New("Failed to get state")
}
return listings, nil
}
|
/*
Copyright (c) 2020 Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cluster
import (
"fmt"
"os"
"strconv"
"strings"
"time"
cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1"
"github.com/spf13/cobra"
"github.com/openshift/rosa/cmd/upgrade/roles"
"github.com/openshift/rosa/pkg/aws"
"github.com/openshift/rosa/pkg/interactive"
"github.com/openshift/rosa/pkg/interactive/confirm"
"github.com/openshift/rosa/pkg/ocm"
"github.com/openshift/rosa/pkg/rosa"
)
var args struct {
version string
scheduleDate string
scheduleTime string
nodeDrainGracePeriod string
}
var nodeDrainOptions = []string{
"15 minutes",
"30 minutes",
"45 minutes",
"1 hour",
"2 hours",
"4 hours",
"8 hours",
}
var Cmd = &cobra.Command{
Use: "cluster",
Short: "Upgrade cluster",
Long: "Upgrade cluster to a new available version",
Example: ` # Interactively schedule an upgrade on the cluster named "mycluster"
rosa upgrade cluster --cluster=mycluster --interactive
# Schedule a cluster upgrade within the hour
rosa upgrade cluster -c mycluster --version 4.5.20`,
Run: run,
}
func init() {
flags := Cmd.Flags()
flags.SortFlags = false
ocm.AddClusterFlag(Cmd)
aws.AddModeFlag(Cmd)
flags.StringVar(
&args.version,
"version",
"",
"Version of OpenShift that the cluster will be upgraded to",
)
flags.StringVar(
&args.scheduleDate,
"schedule-date",
"",
"Next date the upgrade should run at the specified UTC time. Format should be 'yyyy-mm-dd'",
)
flags.StringVar(
&args.scheduleTime,
"schedule-time",
"",
"Next UTC time that the upgrade should run on the specified date. Format should be 'HH:mm'",
)
flags.StringVar(
&args.nodeDrainGracePeriod,
"node-drain-grace-period",
"1 hour",
fmt.Sprintf("You may set a grace period for how long Pod Disruption Budget-protected workloads will be "+
"respected during upgrades.\nAfter this grace period, any workloads protected by Pod Disruption "+
"Budgets that have not been successfully drained from a node will be forcibly evicted.\nValid "+
"options are ['%s']", strings.Join(nodeDrainOptions, "','")),
)
confirm.AddFlag(flags)
}
func run(cmd *cobra.Command, _ []string) {
r := rosa.NewRuntime().WithAWS().WithOCM()
defer r.Cleanup()
clusterKey := r.GetClusterKey()
mode, err := aws.GetMode()
if err != nil {
r.Reporter.Errorf("%s", err)
os.Exit(1)
}
cluster := r.FetchCluster()
if cluster.State() != cmv1.ClusterStateReady {
r.Reporter.Errorf("Cluster '%s' is not yet ready", clusterKey)
os.Exit(1)
}
_, isSTS := cluster.AWS().STS().GetRoleARN()
if !isSTS && mode != "" {
r.Reporter.Errorf("The 'mode' option is only supported for STS clusters")
os.Exit(1)
}
scheduledUpgrade, upgradeState, err := r.OCMClient.GetScheduledUpgrade(cluster.ID())
if err != nil {
r.Reporter.Errorf("Failed to get scheduled upgrades for cluster '%s': %v", clusterKey, err)
os.Exit(1)
}
if scheduledUpgrade != nil {
r.Reporter.Warnf("There is already a %s upgrade to version %s on %s",
upgradeState.Value(),
scheduledUpgrade.Version(),
scheduledUpgrade.NextRun().Format("2006-01-02 15:04 MST"),
)
os.Exit(0)
}
version := args.version
scheduleDate := args.scheduleDate
scheduleTime := args.scheduleTime
availableUpgrades, err := r.OCMClient.GetAvailableUpgrades(ocm.GetVersionID(cluster))
if err != nil {
r.Reporter.Errorf("Failed to find available upgrades: %v", err)
os.Exit(1)
}
if len(availableUpgrades) == 0 {
r.Reporter.Warnf("There are no available upgrades")
os.Exit(0)
}
if version == "" || interactive.Enabled() {
if version == "" {
version = availableUpgrades[0]
}
version, err = interactive.GetOption(interactive.Input{
Question: "Version",
Help: cmd.Flags().Lookup("version").Usage,
Options: availableUpgrades,
Default: version,
Required: true,
})
if err != nil {
r.Reporter.Errorf("Expected a valid version to upgrade to: %s", err)
os.Exit(1)
}
}
err = r.OCMClient.CheckUpgradeClusterVersion(availableUpgrades, version, cluster)
if err != nil {
r.Reporter.Errorf("%v", err)
os.Exit(1)
}
if scheduleDate == "" || scheduleTime == "" {
interactive.Enable()
}
if isSTS && mode == "" {
mode, err = interactive.GetOption(interactive.Input{
Question: "IAM Roles/Policies upgrade mode",
Help: cmd.Flags().Lookup("mode").Usage,
Default: aws.ModeAuto,
Options: aws.Modes,
Required: true,
})
if err != nil {
r.Reporter.Errorf("Expected a valid role upgrade mode: %s", err)
os.Exit(1)
}
aws.SetModeKey(mode)
}
// if cluster is sts validate roles are compatible with upgrade version
if isSTS {
r.Reporter.Infof("Ensuring account and operator role policies for cluster '%s'"+
" are compatible with upgrade.", cluster.ID())
err = roles.Cmd.RunE(roles.Cmd, []string{mode, cluster.ID(), version, cluster.Version().ChannelGroup()})
if err != nil {
rolesStr := fmt.Sprintf("rosa upgrade roles -c %s --cluster-version=%s --mode=%s", clusterKey, version, mode)
upgradeClusterStr := fmt.Sprintf("rosa upgrade cluster -c %s", clusterKey)
r.Reporter.Infof("Account/Operator Role policies are not valid with upgrade version %s. "+
"Run the following command(s) to upgrade the roles and run the upgrade command again:\n\n"+
"\t%s\n"+
"\t%s\n", version, rolesStr, upgradeClusterStr)
os.Exit(0)
}
r.Reporter.Infof("Account and operator roles for cluster '%s' are compatible with upgrade", clusterKey)
}
version, err = ocm.CheckAndParseVersion(availableUpgrades, version)
if err != nil {
r.Reporter.Errorf("Error parsing version to upgrade to")
os.Exit(1)
}
if !confirm.Confirm("upgrade cluster to version '%s'", version) {
os.Exit(0)
}
upgradePolicyBuilder := cmv1.NewUpgradePolicy().
ScheduleType("manual").
Version(version)
upgradePolicy, err := upgradePolicyBuilder.Build()
if err != nil {
r.Reporter.Errorf("Failed to schedule upgrade for cluster '%s': %v", clusterKey, err)
os.Exit(1)
}
err = checkAndAckMissingAgreements(r, cluster, upgradePolicy, clusterKey)
if err != nil {
r.Reporter.Errorf("%v", err)
os.Exit(1)
}
// Set the default next run within the next 10 minutes
now := time.Now().UTC().Add(time.Minute * 10)
if scheduleDate == "" {
scheduleDate = now.Format("2006-01-02")
}
if scheduleTime == "" {
scheduleTime = now.Format("15:04")
}
if interactive.Enabled() {
// If datetimes are set, use them in the interactive form, otherwise fallback to 'now'
scheduleParsed, err := time.Parse("2006-01-02 15:04", fmt.Sprintf("%s %s", scheduleDate, scheduleTime))
if err != nil {
r.Reporter.Errorf("Schedule date should use the format 'yyyy-mm-dd'\n" +
" Schedule time should use the format 'HH:mm'")
os.Exit(1)
}
if scheduleParsed.IsZero() {
scheduleParsed = now
}
scheduleDate = scheduleParsed.Format("2006-01-02")
scheduleTime = scheduleParsed.Format("15:04")
scheduleDate, err = interactive.GetString(interactive.Input{
Question: "Please input desired date in format yyyy-mm-dd",
Help: cmd.Flags().Lookup("schedule-date").Usage,
Default: scheduleDate,
Required: true,
})
if err != nil {
r.Reporter.Errorf("Expected a valid date: %s", err)
os.Exit(1)
}
_, err = time.Parse("2006-01-02", scheduleDate)
if err != nil {
r.Reporter.Errorf("Date format '%s' invalid", scheduleDate)
os.Exit(1)
}
scheduleTime, err = interactive.GetString(interactive.Input{
Question: "Please input desired UTC time in format HH:mm",
Help: cmd.Flags().Lookup("schedule-time").Usage,
Default: scheduleTime,
Required: true,
})
if err != nil {
r.Reporter.Errorf("Expected a valid time: %s", err)
os.Exit(1)
}
_, err = time.Parse("15:04", scheduleTime)
if err != nil {
r.Reporter.Errorf("Time format '%s' invalid", scheduleTime)
os.Exit(1)
}
}
// Parse next run to time.Time
nextRun, err := time.Parse("2006-01-02 15:04", fmt.Sprintf("%s %s", scheduleDate, scheduleTime))
if err != nil {
r.Reporter.Errorf("Schedule date should use the format 'yyyy-mm-dd'\n" +
" Schedule time should use the format 'HH:mm'")
os.Exit(1)
}
upgradePolicyBuilder = upgradePolicyBuilder.NextRun(nextRun)
nodeDrainGracePeriod := ""
// Determine if the cluster already has a node drain grace period set and use that as the default
nd := cluster.NodeDrainGracePeriod()
if _, ok := nd.GetValue(); ok {
// Convert larger times to hours, since the API only stores minutes
val := int(nd.Value())
unit := nd.Unit()
if val >= 60 {
val = val / 60
if val == 1 {
unit = "hour"
} else {
unit = "hours"
}
}
nodeDrainGracePeriod = fmt.Sprintf("%d %s", val, unit)
}
// If node drain grace period is not set, or the user sent it as a CLI argument, use that instead
if nodeDrainGracePeriod == "" || cmd.Flags().Changed("node-drain-grace-period") {
nodeDrainGracePeriod = args.nodeDrainGracePeriod
}
if interactive.Enabled() {
nodeDrainGracePeriod, err = interactive.GetOption(interactive.Input{
Question: "Node draining",
Help: cmd.Flags().Lookup("node-drain-grace-period").Usage,
Options: nodeDrainOptions,
Default: nodeDrainGracePeriod,
Required: true,
})
if err != nil {
r.Reporter.Errorf("Expected a valid node drain grace period: %s", err)
os.Exit(1)
}
}
isValidNodeDrainGracePeriod := false
for _, nodeDrainOption := range nodeDrainOptions {
if nodeDrainGracePeriod == nodeDrainOption {
isValidNodeDrainGracePeriod = true
break
}
}
if !isValidNodeDrainGracePeriod {
r.Reporter.Errorf("Expected a valid node drain grace period. Options are [%s]",
strings.Join(nodeDrainOptions, ", "))
os.Exit(1)
}
nodeDrainParsed := strings.Split(nodeDrainGracePeriod, " ")
nodeDrainValue, err := strconv.ParseFloat(nodeDrainParsed[0], 64)
if err != nil {
r.Reporter.Errorf("Expected a valid node drain grace period: %s", err)
os.Exit(1)
}
if nodeDrainParsed[1] == "hours" || nodeDrainParsed[1] == "hour" {
nodeDrainValue = nodeDrainValue * 60
}
clusterSpec := ocm.Spec{
NodeDrainGracePeriodInMinutes: nodeDrainValue,
}
upgradePolicy, err = upgradePolicyBuilder.Build()
if err != nil {
r.Reporter.Errorf("Failed to schedule upgrade for cluster '%s': %v", clusterKey, err)
os.Exit(1)
}
err = r.OCMClient.ScheduleUpgrade(cluster.ID(), upgradePolicy)
if err != nil {
r.Reporter.Errorf("Failed to schedule upgrade for cluster '%s': %v", clusterKey, err)
os.Exit(1)
}
err = r.OCMClient.UpdateCluster(cluster.ID(), r.Creator, clusterSpec)
if err != nil {
r.Reporter.Errorf("Failed to update cluster '%s': %v", clusterKey, err)
os.Exit(1)
}
r.Reporter.Infof("Upgrade successfully scheduled for cluster '%s'", clusterKey)
}
func checkAndAckMissingAgreements(r *rosa.Runtime, cluster *cmv1.Cluster, upgradePolicy *cmv1.UpgradePolicy,
clusterKey string) error {
// check if the cluster upgrade requires gate agreements
gates, err := r.OCMClient.GetMissingGateAgreements(cluster.ID(), upgradePolicy)
if err != nil {
return fmt.Errorf("failed to check for missing gate agreements upgrade for "+
"cluster '%s': %v", clusterKey, err)
}
isWarningDisplayed := false
for _, gate := range gates {
if !gate.STSOnly() {
if !isWarningDisplayed {
r.Reporter.Warnf("Missing required acknowledgements to schedule upgrade. \n")
isWarningDisplayed = true
}
str := fmt.Sprintf("Description: %s\n", gate.Description())
if gate.WarningMessage() != "" {
str = fmt.Sprintf("%s"+
" Warning: %s\n", str, gate.WarningMessage())
}
str = fmt.Sprintf("%s"+
" URL: %s\n", str, gate.DocumentationURL())
err = interactive.PrintHelp(interactive.Help{
Message: "Read the below description and acknowledge to proceed with upgrade",
Steps: []string{str},
})
if err != nil {
return fmt.Errorf("failed to get version gate '%s' for cluster '%s': %v",
gate.ID(), clusterKey, err)
}
// for non sts gates we require user agreement
if !confirm.Prompt(true, "I acknowledge") {
os.Exit(0)
}
}
err = r.OCMClient.AckVersionGate(cluster.ID(), gate.ID())
if err != nil {
return fmt.Errorf("failed to acknowledge version gate '%s' for cluster '%s': %v",
gate.ID(), clusterKey, err)
}
}
return err
}
|
package repositories
import (
"blog/app/models"
"blog/database"
"github.com/jinzhu/gorm"
"github.com/mlogclub/simple"
"time"
)
type UserRepository struct {
db *gorm.DB
}
func NewUserRepository() *UserRepository {
return &UserRepository{
db: database.DB()}
}
func (this *UserRepository) List(paging *simple.Paging) []*models.User {
users := make([]*models.User, paging.Limit)
this.db.Offset(paging.Offset()).Limit(paging.Limit).Find(&users)
this.db.Model(&models.User{}).Count(&paging.Total)
return users
}
func (this *UserRepository) GetById(id uint) *models.User {
var user = &models.User{}
this.db.First(&user, id)
return user
}
// 根据 email 查找用户
func (this *UserRepository) GetByEmail(email string) *models.User {
ret := &models.User{}
this.db.Take(ret, "email = ?", email)
return ret
}
// 根据 username 查找用户
func (this *UserRepository) GetByUsername(username string) *models.User {
ret := &models.User{}
this.db.Take(ret, "username = ?", username)
return ret
}
// 数据库更新最新时间
func (this *UserRepository) LastLoginTimeById(user *models.User) {
this.db.Model(&user).Update("last_login_time", time.Now())
}
|
// Copyright 2015 Simon HEGE. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package geodesic
import (
"bufio"
"compress/gzip"
"flag"
"fmt"
"math"
"os"
"testing"
"github.com/xeonx/geographic"
)
var (
deltaAz = flag.Float64("deltaAz", 1e-2, "Maximum delta allowed on azimuths (degree)")
deltaS = flag.Float64("deltaS", 1e-7, "Maximum delta allowed on distances (m)")
deltaPos = flag.Float64("deltaPos", 1e-8, "Maximum delta allowed on latitudes or longitudes (degree)")
)
func approxEq(a, b float64, delta float64) bool {
return math.Abs(a-b) < delta
}
type d struct {
lat1 float64
lon1 float64
azi1 float64
lat2 float64
lon2 float64
azi2 float64
s12 float64
a12 float64
m12 float64
S12 float64
}
var testData = []d{}
func TestMain(m *testing.M) {
flag.Parse()
filepath := "testdata/GeodTest.dat.gz"
if testing.Short() {
filepath = "testdata/GeodTest-short.dat.gz"
}
//Load test data file
file, err := os.Open(filepath)
if err != nil {
panic(err)
}
defer file.Close()
unziper, err := gzip.NewReader(file)
if err != nil {
panic(err)
}
defer unziper.Close()
reader := bufio.NewReader(unziper)
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
var d d
_, err := fmt.Sscan(scanner.Text(), &d.lat1, &d.lon1, &d.azi1, &d.lat2, &d.lon2, &d.azi2, &d.s12, &d.a12, &d.m12, &d.S12)
if err != nil {
panic(err)
}
testData = append(testData, d)
}
//Run test suite
os.Exit(m.Run())
}
func TestDirect(t *testing.T) {
for i, tt := range testData {
outPt2, outAz2 := WGS84.Direct(geographic.Point{tt.lat1, tt.lon1}, tt.azi1, tt.s12)
if !approxEq(outPt2.LatitudeDeg, tt.lat2, *deltaPos) {
t.Errorf("%d: Direct(%v, %v, %v).LatitudeDeg => %v, want %v", i, tt.lat1, tt.lon1, tt.azi1, outPt2.LatitudeDeg, tt.lat2)
}
if !approxEq(outPt2.LongitudeDeg, tt.lon2, *deltaPos) {
t.Errorf("%d: Direct(%v, %v, %v).LongitudeDeg => %v, want %v", i, tt.lat1, tt.lon1, tt.azi1, outPt2.LongitudeDeg, tt.lon2)
}
if !approxEq(outAz2, tt.azi2, *deltaAz) {
t.Errorf("%d: Direct(%v, %v, %v).Azimuth => %v, want %v", i, tt.lat1, tt.lon1, tt.azi1, outAz2, tt.azi2)
}
}
}
func BenchmarkDirect(b *testing.B) {
for i := 0; i < b.N; i++ {
tt := testData[i%len(testData)]
WGS84.Direct(geographic.Point{tt.lat1, tt.lon1}, tt.azi1, tt.s12)
}
}
func TestInverse(t *testing.T) {
for i, tt := range testData {
outS12, outAz1, outAz2 := WGS84.Inverse(geographic.Point{tt.lat1, tt.lon1}, geographic.Point{tt.lat2, tt.lon2})
if !approxEq(outS12, tt.s12, *deltaS) {
t.Errorf("%d: Inverse(%v, %v, %v, %v).s12 => %v, want %v", i, tt.lat1, tt.lon1, tt.lat2, tt.lon2, outS12, tt.s12)
}
if !approxEq(outAz1, tt.azi1, *deltaAz) {
t.Errorf("%d: Inverse(%v, %v, %v, %v).azi1 => %v, want %v", i, tt.lat1, tt.lon1, tt.lat2, tt.lon2, outAz1, tt.azi1)
}
if !approxEq(outAz2, tt.azi2, *deltaAz) {
t.Errorf("%d: Inverse(%v, %v, %v, %v).azi2 => %v, want %v", i, tt.lat1, tt.lon1, tt.lat2, tt.lon2, outAz2, tt.azi2)
}
}
}
func BenchmarkInverse(b *testing.B) {
for i := 0; i < b.N; i++ {
tt := testData[i%len(testData)]
WGS84.Inverse(geographic.Point{tt.lat1, tt.lon1}, geographic.Point{tt.lat2, tt.lon2})
}
}
|
package qingstor
import (
"context"
"net/http"
"github.com/sirupsen/logrus"
"github.com/yunify/qingstor-sdk-go/v3/config"
"github.com/yunify/qingstor-sdk-go/v3/service"
"gopkg.in/yaml.v2"
"github.com/yunify/qscamel/constants"
"github.com/yunify/qscamel/model"
)
// Client is the client to visit QingStor service.
type Client struct {
Protocol string `yaml:"protocol"`
Host string `yaml:"host"`
Port int `yaml:"port"`
Zone string `yaml:"zone"`
BucketName string `yaml:"bucket_name"`
AccessKeyID string `yaml:"access_key_id"`
SecretAccessKey string `yaml:"secret_access_key"`
StorageClass string `yaml:"storage_class"`
DisableURICleaning bool `yaml:"disable_uri_cleaning"`
Path string
client *service.Bucket
}
// New will create a new QingStor client.
func New(ctx context.Context, et uint8, hc *http.Client) (c *Client, err error) {
t, err := model.GetTask(ctx)
if err != nil {
return
}
c = &Client{}
e := t.Src
if et == constants.DestinationEndpoint {
e = t.Dst
}
content, err := yaml.Marshal(e.Options)
if err != nil {
return
}
err = yaml.Unmarshal(content, c)
if err != nil {
return
}
// Set protocol.
if c.Protocol == "" {
c.Protocol = "https"
}
// Set host.
if c.Host == "" {
c.Host = "qingstor.com"
}
// Set port.
if c.Port == 0 {
if c.Protocol == "https" {
c.Port = 443
} else {
c.Port = 80
}
}
// Set bucket name.
if c.BucketName == "" {
logrus.Error("QingStor's bucket name can't be empty.")
err = constants.ErrEndpointInvalid
return
}
// Set access key.
if c.AccessKeyID == "" {
logrus.Error("QingStor's access key id can't be empty.")
err = constants.ErrEndpointInvalid
return
}
// Set secret key.
if c.SecretAccessKey == "" {
logrus.Error("QingStor's secret access key can't be empty.")
err = constants.ErrEndpointInvalid
return
}
// Set storage class.
if c.StorageClass == "" {
c.StorageClass = StorageClassStandard
}
if c.StorageClass != StorageClassStandard &&
c.StorageClass != StorageClassStandardIA {
logrus.Errorf("QingStor's storage class can't be %s.", c.StorageClass)
err = constants.ErrEndpointInvalid
return
}
// Set path.
c.Path = e.Path
// Set qingstor config.
qc, _ := config.New(c.AccessKeyID, c.SecretAccessKey)
qc.Protocol = c.Protocol
qc.Host = c.Host
qc.Port = c.Port
qc.Connection = hc
qc.AdditionalUserAgent = "qscamel " + constants.Version
qc.DisableURICleaning = c.DisableURICleaning
// Set qingstor service.
qs, _ := service.Init(qc)
if c.Zone == "" {
c.Zone, err = c.GetZone()
if err != nil {
return
}
}
c.client, _ = qs.Bucket(c.BucketName, c.Zone)
return
}
|
package cache
import (
"blog/database"
"github.com/vmihailenco/msgpack/v4"
"github.com/go-redis/cache/v7"
)
var cached *cache.Codec
func GetCache() *cache.Codec {
if cached == nil {
cached = initCache()
}
return cached
}
// 初始化缓存模块 并注入缓存驱动
func initCache() *cache.Codec {
return &cache.Codec{
Redis: database.GetRedis(),
Marshal: func(v interface{}) ([]byte, error) {
return msgpack.Marshal(v)
},
Unmarshal: func(b []byte, v interface{}) error {
return msgpack.Unmarshal(b, v)
},
}
}
|
package main
import (
"bytes"
"crypto/tls"
"encoding/hex"
"encoding/json"
"errors"
"flag"
"fmt"
"net/http"
"os"
"strconv"
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcutil"
"github.com/btcsuite/btcutil/hdkeychain"
"github.com/luno/moonbeam/address"
"github.com/luno/moonbeam/channels"
"github.com/luno/moonbeam/client"
"github.com/luno/moonbeam/models"
"github.com/luno/moonbeam/resolver"
)
var testnet = flag.Bool("testnet", true, "Use testnet")
var tlsSkipVerify = flag.Bool("tls_skip_verify", false, "Whether to validate the server's TLS cert")
func getNet() *chaincfg.Params {
if *testnet {
return &chaincfg.TestNet3Params
}
return &chaincfg.MainNetParams
}
func loadkey(s *State, n int) (*btcec.PrivateKey, *btcutil.AddressPubKey, error) {
net := getNet()
ek, err := hdkeychain.NewKeyFromString(s.XPrivKey)
if err != nil {
return nil, nil, err
}
if !ek.IsForNet(net) {
return nil, nil, errors.New("wrong network")
}
ek, err = ek.Child(uint32(n))
if err != nil {
return nil, nil, err
}
privKey, err := ek.ECPrivKey()
if err != nil {
return nil, nil, err
}
pk := (*btcec.PublicKey)(&privKey.PublicKey)
pubkey, err := btcutil.NewAddressPubKey(pk.SerializeCompressed(), net)
if err != nil {
return nil, nil, err
}
return privKey, pubkey, nil
}
func getHttpClient() *http.Client {
if *tlsSkipVerify {
tr := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
}
return &http.Client{Transport: tr}
} else {
return http.DefaultClient
}
}
func getClient(id string) (*client.Client, error) {
host := globalState.Channels[id].Host
c := getHttpClient()
return client.NewClient(c, host)
}
func getResolver() *resolver.Resolver {
r := resolver.NewResolver()
r.Client = getHttpClient()
if *testnet {
r.DefaultPort = 3211
}
return r
}
func create(args []string) error {
domain := args[0]
outputAddr := args[1]
r := getResolver()
hostURL, err := r.Resolve(domain)
if err != nil {
return err
}
host := hostURL.String()
n := globalState.NextKey()
privkey, _, err := loadkey(globalState, n)
if err != nil {
return err
}
config := getConfig()
s, err := channels.NewSender(config, privkey)
if err != nil {
return err
}
req, err := s.GetCreateRequest(outputAddr)
if err != nil {
return err
}
httpClient := getHttpClient()
c, err := client.NewClient(httpClient, host)
if err != nil {
return err
}
resp, err := c.Create(*req)
if err != nil {
return err
}
err = s.GotCreateResponse(resp)
if err != nil {
return err
}
_, addr, err := s.State.GetFundingScript()
if err != nil {
return err
}
// Sanity check to make sure client and server both agree on the state.
if addr != resp.FundingAddress {
return errors.New("state discrepancy")
}
fmt.Printf("Funding address: %s\n", addr)
fmt.Printf("Fee: %d\n", s.State.Fee)
fmt.Printf("Timeout: %d\n", s.State.Timeout)
id := strconv.Itoa(n)
globalState.Channels[id] = Channel{
Domain: domain,
Host: host,
State: s.State,
KeyPath: n,
ReceiverData: resp.ReceiverData,
}
fmt.Printf("%s\n", id)
return nil
}
func fund(args []string) error {
id := args[0]
txid := args[1]
vout, err := strconv.Atoi(args[2])
if err != nil {
return errors.New("invalid vout")
}
amount, err := strconv.ParseInt(args[3], 10, 64)
if err != nil {
return errors.New("invalid amount")
}
ch, sender, err := getChannel(id)
if err != nil {
return err
}
req, err := sender.GetOpenRequest(txid, uint32(vout), amount)
if err != nil {
return err
}
req.ReceiverData = ch.ReceiverData
c, err := getClient(id)
if err != nil {
return err
}
resp, err := c.Open(*req)
if err != nil {
return err
}
if err := sender.GotOpenResponse(resp); err != nil {
return err
}
if err := storeAuthToken(id, resp.AuthToken); err != nil {
return err
}
return storeChannel(id, sender.State)
}
func send(args []string) error {
target := args[0]
amount, err := strconv.ParseInt(args[1], 10, 64)
if err != nil {
return errors.New("invalid amount")
}
id := ""
if len(args) > 2 {
id = args[2]
}
if id == "" {
_, domain, valid := address.Decode(target)
if !valid {
return errors.New("invalid address")
}
ids := findForDomain(domain)
if len(ids) == 0 {
return errors.New("no open channels to domain")
}
id = ids[0]
}
p := models.Payment{
Amount: amount,
Target: target,
}
payment, err := json.Marshal(p)
if err != nil {
return err
}
ch, sender, err := getChannel(id)
if err != nil {
return err
}
if _, err := sender.GetSendRequest(p.Amount, payment); err != nil {
return err
}
if ch.PendingPayment != nil {
return errors.New("there is already a pending payment")
}
c, err := getClient(id)
if err != nil {
return err
}
req := models.ValidateRequest{
TxID: ch.State.FundingTxID,
Vout: ch.State.FundingVout,
Payment: payment,
}
resp, err := c.Validate(req, ch.AuthToken)
if err != nil {
return err
}
if !resp.Valid {
return errors.New("payment rejected by server")
}
if err := storePendingPayment(id, sender.State, payment); err != nil {
return err
}
if err := save(getNet(), globalState); err != nil {
return err
}
return flush(id)
}
func flush(id string) error {
ch, sender, err := getChannel(id)
if err != nil {
return err
}
payment := ch.PendingPayment
if payment == nil {
fmt.Println("No pending payment to flush.")
return nil
}
var p models.Payment
if err := json.Unmarshal(payment, &p); err != nil {
return err
}
sendReq, err := sender.GetSendRequest(p.Amount, payment)
if err != nil {
return err
}
// Either the payment has been sent or it hasn't. Find out which one.
c, err := getClient(id)
if err != nil {
return err
}
req := models.StatusRequest{
TxID: ch.State.FundingTxID,
Vout: ch.State.FundingVout,
}
resp, err := c.Status(req, ch.AuthToken)
if err != nil {
return err
}
serverBal := resp.Balance
if serverBal == sender.State.Balance {
// Pending payment doesn't reflect yet. We have to retry.
if _, err := c.Send(*sendReq, ch.AuthToken); err != nil {
return err
}
if err := sender.GotSendResponse(p.Amount, payment, nil); err != nil {
return err
}
return storePendingPayment(id, sender.State, nil)
} else if serverBal == sender.State.Balance+p.Amount {
// Pending payment reflects. Finalize our side.
if err := sender.GotSendResponse(p.Amount, payment, nil); err != nil {
return err
}
return storePendingPayment(id, sender.State, nil)
} else {
return errors.New("unexpected remote channel balance")
}
}
func flushAction(args []string) error {
return flush(args[0])
}
func closeAction(args []string) error {
id := args[0]
ch, sender, err := getChannel(id)
if err != nil {
return err
}
req, err := sender.GetCloseRequest()
if err != nil {
return err
}
c, err := getClient(id)
if err != nil {
return err
}
resp, err := c.Close(*req, ch.AuthToken)
if err != nil {
return err
}
if err := sender.GotCloseResponse(resp); err != nil {
return err
}
fmt.Printf("%s\n", hex.EncodeToString(resp.CloseTx))
return storeChannel(id, sender.State)
}
func isClosing(s channels.Status) bool {
return s == channels.StatusClosing || s == channels.StatusClosed
}
func status(args []string) error {
id := args[0]
ch, sender, err := getChannel(id)
if err != nil {
return err
}
c, err := getClient(id)
if err != nil {
return err
}
req := models.StatusRequest{
TxID: ch.State.FundingTxID,
Vout: ch.State.FundingVout,
}
resp, err := c.Status(req, ch.AuthToken)
if err != nil {
return err
}
buf, _ := json.MarshalIndent(resp, "", " ")
fmt.Printf("%s\n", string(buf))
serverStatus := channels.Status(resp.Status)
if sender.State.Status != serverStatus {
fmt.Printf("Warning: Status differs\n")
}
if sender.State.Balance != resp.Balance {
fmt.Printf("Warning: Balance differs\n")
}
if !bytes.Equal(sender.State.PaymentsHash[:], resp.PaymentsHash) {
fmt.Printf("Warning: PaymentsHash differs\n")
}
if isClosing(serverStatus) && !isClosing(sender.State.Status) {
if _, err := sender.GetCloseRequest(); err != nil {
return err
}
return storeChannel(id, sender.State)
}
return nil
}
func refund(args []string) error {
id := args[0]
_, sender, err := getChannel(id)
if err != nil {
return err
}
if sender.State.Status != channels.StatusOpen {
txid := args[1]
vout, err := strconv.Atoi(args[2])
if err != nil {
return errors.New("invalid vout")
}
amount, err := strconv.ParseInt(args[3], 10, 64)
if err != nil {
return errors.New("invalid amount")
}
_, err = sender.GetOpenRequest(txid, uint32(vout), amount)
if err != nil {
return err
}
if err := storeChannel(id, sender.State); err != nil {
return err
}
}
rawTx, err := sender.Refund()
if err != nil {
return err
}
fmt.Printf("%s\n", hex.EncodeToString(rawTx))
return nil
}
func list(args []string) error {
all := false
if len(args) > 0 && args[0] == "-a" {
all = true
}
fmt.Printf("ID\tDomain\tStatus\tCapacity\tBalance\n")
for id, c := range globalState.Channels {
if c.State.Status != channels.StatusOpen && !all {
continue
}
total := float64(c.State.Capacity) / 1e8
balance := float64(c.State.Balance) / 1e8
fmt.Printf("%s\t%s\t%s\t%.8f\t%.8f\n",
id, c.Domain, c.State.Status, total, balance)
}
return nil
}
func show(args []string) error {
id := args[0]
c, ok := globalState.Channels[id]
if !ok {
return errors.New("not found")
}
buf, _ := json.MarshalIndent(c, "", " ")
fmt.Printf("%s\n", string(buf))
return nil
}
func help(args []string) error {
fmt.Printf("Available commands:\n")
for action, _ := range commands {
h := helps[action]
fmt.Printf("%10s %s\n", action, h)
}
return nil
}
func outputError(err string) {
fmt.Printf("%v\n", err)
os.Exit(1)
}
var commands = map[string]func(args []string) error{
"create": create,
"fund": fund,
"send": send,
"close": closeAction,
"refund": refund,
"list": list,
"show": show,
"status": status,
"flush": flushAction,
}
var helps = map[string]string{
"create": "Create a channel to a remote server",
"fund": "Open a created channel after funding transaction is confirmed",
"send": "Send a payment",
"close": "Close a channel",
"refund": "Show the refund transaction for a channel",
"list": "List channels",
"show": "Show info about a channel",
"status": "Get status from server",
"flush": "Flush any pending payment",
"help": "Show help",
}
func main() {
flag.Parse()
args := flag.Args()
action := ""
if len(args) == 0 {
action = "help"
} else {
action = args[0]
args = args[1:]
}
commands["help"] = help
f, ok := commands[action]
if !ok {
outputError("unknown command")
return
}
net := getNet()
s, err := load(net)
if err != nil {
outputError(err.Error())
}
globalState = s
err = f(args)
if err == nil {
err = save(net, globalState)
}
if err != nil {
outputError(err.Error())
}
}
|
package autonat
import (
pb "gx/ipfs/QmZgrJk2k14P3zHUAz4hdk1TnU57iaTWEk8fGmFkrafEMX/go-libp2p-autonat/pb"
ma "gx/ipfs/QmNTCey11oxhb1AxDnQBRHtdhap6Ctud872NjAYPYYXPuc/go-multiaddr"
logging "gx/ipfs/QmcuXC5cxs79ro2cUuHs4HQ2bkDLJUYokwL8aivcX6HW3C/go-log"
)
var log = logging.Logger("autonat-svc")
func newDialResponseOK(addr ma.Multiaddr) *pb.Message_DialResponse {
dr := new(pb.Message_DialResponse)
dr.Status = pb.Message_OK.Enum()
dr.Addr = addr.Bytes()
return dr
}
func newDialResponseError(status pb.Message_ResponseStatus, text string) *pb.Message_DialResponse {
dr := new(pb.Message_DialResponse)
dr.Status = status.Enum()
dr.StatusText = &text
return dr
}
|
package web
import (
"testing"
"github.com/gin-gonic/gin"
"github.com/iGoogle-ink/gotil/xlog"
)
func TestInitServer(t *testing.T) {
// 需要测试请自行解开注释测试
//c := &Config{
// Port: ":2233",
// Limit: &limit.Config{
// Rate: 0, // 0 速率不限流
// BucketSize: 100,
// },
//}
//
//g := InitGin(c)
//g.Gin.Use(g.CORS())
//g.Gin.Use(g.Recovery())
//
//initRoute(g.Gin)
//
//g.Start()
//
//ch := make(chan os.Signal)
//signal.Notify(ch, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT)
//for {
// si := <-ch
// switch si {
// case syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT:
// xlog.Warnf("get a signal %s, stop the process", si.String())
// // todo something
// return
// case syscall.SIGHUP:
// default:
// return
// }
//}
}
func initRoute(g *gin.Engine) {
g.GET("/a/:abc", func(c *gin.Context) {
xlog.Debug(c.Param("abc"))
xlog.Debug(c.Request.RequestURI)
rsp := &struct {
Param string `json:"param"`
Path string `json:"path"`
}{Param: c.Param("abc"), Path: c.Request.RequestURI}
JSON(c, rsp, nil)
})
g.GET("/b", func(c *gin.Context) {
JSON(c, "b", nil)
})
g.GET("/c", func(c *gin.Context) {
JSON(c, "c", nil)
})
g.GET("/d", func(c *gin.Context) {
JSON(c, "d", nil)
})
}
|
package dynamic_programming
import "math"
func minDistance(word1 string, word2 string) int {
m := len(word1)
n := len(word2)
if m == 0 {
return n
}
if n == 0 {
return m
}
dp := make([][]int, m)
for i := range dp {
dp[i] = make([]int, n)
}
// 初始化第一行
for i := 0; i < n; i++ {
if word1[0] == word2[i] {
dp[0][i] = i
} else if i > 0 {
dp[0][i] = dp[0][i-1] + 1
} else {
dp[0][i] = 1
}
}
// 初始化第一列
for i := 0; i < m; i++ {
if word2[0] == word1[i] {
dp[i][0] = i
} else if i > 0 {
dp[i][0] = dp[i-1][0] + 1
} else {
dp[i][0] = 1
}
}
// 填表
for i := 1; i < m; i++ {
for j := 1; j < n; j++ {
if word1[i] == word2[j] {
dp[i][j] = min(dp[i-1][j]+1, dp[i][j-1]+1, dp[i-1][j-1])
} else {
dp[i][j] = min(dp[i-1][j]+1, dp[i][j-1]+1, dp[i-1][j-1]+1)
}
}
}
return dp[m-1][n-1]
}
func min(x, y, z int) int {
min := math.MaxInt32
if x < min {
min = x
}
if y < min {
min = y
}
if z < min {
min = z
}
return min
}
func minDistance2(word1 string, word2 string) int {
m := len(word1) + 1
n := len(word2) + 1
dp := make([][]int, m)
for i := range dp {
dp[i] = make([]int, n)
}
// 第一行
for i := 0; i < n; i++ {
dp[0][i] = i
}
// 第一列
for j := 0; j < m; j++ {
dp[j][0] = j
}
// 填表
for i := 1; i < m; i++ {
for j := 1; j < n; j++ {
if word1[i-1] == word2[j-1] { // 这里为什么是i-1和j-1?是为了让dp的索引和字符串的索引保持一致
dp[i][j] = dp[i-1][j-1]
} else {
dp[i][j] = min(dp[i-1][j]+1, dp[i][j-1]+1, dp[i-1][j-1]+1)
}
}
}
return dp[m-1][n-1]
}
|
package sql
/**
使用队列方式进行 sql 词组进行暂存
利用先进先出方式进行拼接 sql
*/
type SelectBuilder struct {
// 查询字段队列
Select *Select
// 查询表队列
From *From
// 查询条件队列
Where *Where
}
type ConditionFunc func(interface{}) bool
func NewSelectBuilder() *SelectBuilder {
sb := SelectBuilder{}
sb.Select = NewSelect()
sb.From = NewFrom()
sb.Where = NewWhere()
return &sb
}
func (sb *SelectBuilder) ToSQl() string {
if sb.From.IsNull() {
panic("查询表不能为空")
}
return sb.Select.ToString() + sb.From.ToString() + sb.Where.ToString()
}
|
package main
import (
"path"
"regexp"
"strings"
)
var featureExtractors = []func(Tweet) Feature{
ExclamationMarks,
QuestionMarks,
DotMarks,
WordCount,
LetterCount,
BadWordCount,
GoodWordCount,
HappyEmoticon,
AngryEmoticon,
DCSList,
PositiveListCount,
NegativeListCount,
Posemo,
Negemo,
}
func ExclamationMarks(t Tweet) Feature {
return Feature{
Name: "exclamation_marks",
Type: Numeric,
Value: strings.Count(t.Corpus, "!"),
}
}
func QuestionMarks(t Tweet) Feature {
return Feature{
Name: "question_marks",
Type: Numeric,
Value: strings.Count(t.Corpus, "?"),
}
}
func DotMarks(t Tweet) Feature {
return Feature{
Name: "dot_marks",
Type: Numeric,
Value: strings.Count(t.Corpus, "."),
}
}
func WordCount(t Tweet) Feature {
return Feature{
Name: "word_count",
Type: Numeric,
Value: len(regexp.MustCompile(`\w+`).FindAllString(t.Corpus, -1)),
}
}
func LetterCount(t Tweet) Feature {
return Feature{
Name: "letter_count",
Type: Numeric,
Value: len(t.Corpus),
}
}
func BadWordCount(t Tweet) Feature {
count := 0
for _, badword := range badWordList {
count += strings.Count(t.Corpus, badword)
}
return Feature{
Name: "bad_word_count",
Type: Numeric,
Value: count,
}
}
func GoodWordCount(t Tweet) Feature {
count := 0
for _, goodword := range goodWordList {
count += strings.Count(t.Corpus, goodword)
}
return Feature{
Name: "good_word_count",
Type: Numeric,
Value: count,
}
}
func HappyEmoticon(t Tweet) Feature {
count := 0
for _, emoji := range happyEmojies {
count += strings.Count(t.Corpus, emoji)
}
return Feature{
Name: "happy_emoji",
Type: Numeric,
Value: count,
}
}
func AngryEmoticon(t Tweet) Feature {
count := 0
for _, emoji := range angryEmojies {
count += strings.Count(t.Corpus, emoji)
}
return Feature{
Name: "angry_emoji",
Type: Numeric,
Value: count,
}
}
func DCSList(t Tweet) Feature {
score := 0.0
wordRE := regexp.MustCompile(`\w+`)
matchs := wordRE.FindAllString(t.Corpus, -1)
for _, word := range matchs {
if v, ok := dsclist[strings.ToLower(strings.TrimSpace(word))]; ok {
score += v
}
}
return Feature{
Name: "dcs_list_score",
Type: NumericFloat,
Value: score,
}
}
func PositiveListCount(t Tweet) Feature {
count := 0
wordRE := regexp.MustCompile(`\w+`)
matchs := wordRE.FindAllString(t.Corpus, -1)
for _, word := range matchs {
if _, ok := positive[strings.ToLower(strings.TrimSpace(word))]; ok {
count++
}
}
return Feature{
Name: "positive_list",
Type: Numeric,
Value: count,
}
}
func NegativeListCount(t Tweet) Feature {
count := 0
wordRE := regexp.MustCompile(`\w+`)
matchs := wordRE.FindAllString(t.Corpus, -1)
for _, word := range matchs {
if _, ok := negative[strings.ToLower(strings.TrimSpace(word))]; ok {
count++
}
}
return Feature{
Name: "negative_list",
Type: Numeric,
Value: count,
}
}
func Posemo(t Tweet) Feature {
count := 0
wordRE := regexp.MustCompile(`\w+`)
matchs := wordRE.FindAllString(t.Corpus, -1)
for _, word := range matchs {
word := strings.ToLower(strings.TrimSpace(word))
for _, wildcard := range posemoWildcard {
if ok, _ := path.Match(wildcard, word); ok {
count++
}
}
}
return Feature{
Name: "posemo_list",
Type: Numeric,
Value: count,
}
}
func Negemo(t Tweet) Feature {
count := 0
wordRE := regexp.MustCompile(`\w+`)
matchs := wordRE.FindAllString(t.Corpus, -1)
for _, word := range matchs {
word := strings.ToLower(strings.TrimSpace(word))
for _, wildcard := range posemoWildcard {
if ok, _ := path.Match(wildcard, word); ok {
count++
}
}
}
return Feature{
Name: "negemo_list",
Type: Numeric,
Value: count,
}
}
|
/*
Copyright 2021 CodeNotary, Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package store
import (
"crypto/sha256"
"encoding/binary"
"os"
"testing"
"github.com/stretchr/testify/require"
)
func TestVerifyLinearProofEdgeCases(t *testing.T) {
require.False(t, VerifyLinearProof(nil, 0, 0, sha256.Sum256(nil), sha256.Sum256(nil)))
require.False(t, VerifyLinearProof(&LinearProof{}, 0, 0, sha256.Sum256(nil), sha256.Sum256(nil)))
require.True(t,
VerifyLinearProof(
&LinearProof{Terms: [][sha256.Size]byte{sha256.Sum256(nil)}, SourceTxID: 1, TargetTxID: 1},
1,
1,
sha256.Sum256(nil),
sha256.Sum256(nil),
),
)
require.False(t,
VerifyLinearProof(
&LinearProof{Terms: [][sha256.Size]byte{sha256.Sum256(nil)}, SourceTxID: 1, TargetTxID: 2},
1,
2,
sha256.Sum256(nil),
sha256.Sum256(nil),
),
)
}
func TestVerifyDualProofEdgeCases(t *testing.T) {
require.False(t, VerifyDualProof(nil, 0, 0, sha256.Sum256(nil), sha256.Sum256(nil)))
require.False(t, VerifyDualProof(&DualProof{}, 0, 0, sha256.Sum256(nil), sha256.Sum256(nil)))
opts := DefaultOptions().WithSynced(false).WithMaxLinearProofLen(0).WithMaxConcurrency(1)
immuStore, err := Open("data_dualproof_edge_cases", opts)
require.NoError(t, err)
defer os.RemoveAll("data_dualproof_edge_cases")
require.NotNil(t, immuStore)
txCount := 10
eCount := 4
for i := 0; i < txCount; i++ {
kvs := make([]*KV, eCount)
for j := 0; j < eCount; j++ {
k := make([]byte, 8)
binary.BigEndian.PutUint64(k, uint64(i<<4+j))
v := make([]byte, 8)
binary.BigEndian.PutUint64(v, uint64(i<<4+(eCount-j)))
kvs[j] = &KV{Key: k, Value: v}
}
txMetadata, err := immuStore.Commit(kvs, false)
require.NoError(t, err)
require.Equal(t, uint64(i+1), txMetadata.ID)
}
sourceTx := immuStore.NewTx()
targetTx := immuStore.NewTx()
targetTxID := uint64(txCount)
err = immuStore.ReadTx(targetTxID, targetTx)
require.NoError(t, err)
require.Equal(t, uint64(txCount), targetTx.ID)
for i := 0; i < txCount-1; i++ {
sourceTxID := uint64(i + 1)
err := immuStore.ReadTx(sourceTxID, sourceTx)
require.NoError(t, err)
require.Equal(t, uint64(i+1), sourceTx.ID)
dproof, err := immuStore.DualProof(sourceTx, targetTx)
require.NoError(t, err)
verifies := VerifyDualProof(dproof, sourceTxID, targetTxID, sourceTx.Alh, targetTx.Alh)
require.True(t, verifies)
// Alter proof
dproof.SourceTxMetadata.BlTxID++
verifies = VerifyDualProof(dproof, sourceTxID, targetTxID, sourceTx.Alh, targetTx.Alh)
require.False(t, verifies)
// Restore proof
dproof.SourceTxMetadata.BlTxID--
// Alter proof
dproof.TargetTxMetadata.BlTxID++
verifies = VerifyDualProof(dproof, sourceTxID, targetTxID, sourceTx.Alh, targetTx.Alh)
require.False(t, verifies)
}
err = immuStore.Close()
require.NoError(t, err)
}
|
package controllers
import (
"github.com/astaxie/beego"
"io/ioutil"
"os/exec"
"fmt"
"os"
"bytes"
"strings"
"github.com/bitly/go-simplejson"
)
// Operations about Ipfs
type IpfsController struct {
beego.Controller
}
var (
cmdOut []byte
err error
)
// @Title upload
// @Description upload json to IPFS
// @Param body body models.Ipfs true "body for user content"
// @Success 200 {int} models.Ipfs.IpfsId
// @Failure 403 body is empty
// @router / [post]
func (i *IpfsController) Uplaod() {
err = ioutil.WriteFile("tmp/output.json",i.Ctx.Input.RequestBody,0666)
if err != nil{
fmt.Println(err)
}
// 上传至ipfs
args := []string{"add","tmp/output.json"}
var out bytes.Buffer
var stderr bytes.Buffer
cmd := exec.Command("ipfs" , args...)
cmd.Stdout = &out //结果
cmd.Stderr = &stderr
err := cmd.Run()
if err != nil {
fmt.Println(fmt.Sprint(err) + ": " + stderr.String())
i.Data["json"] = fmt.Sprint(err) + ": " + stderr.String()
} else {
i.Data["json"]=strings.Split(out.String(), " ")[1]
}
i.ServeJSON()
}
// @Title download
// @Description download json from IPFS
// @Param ipfsId path string true "ipfsId for json"
// @Success 200 {object} models.Ipfs
// @Failure 403 ipfsId is error
// @router /:ipfsId [get]
func (i *IpfsController) Download(){
ipfsId := i.Ctx.Input.Param(":ipfsId")
// 从ipfs 下载json
args := []string{"get",ipfsId,"-o","tmp/output.json"}
if cmdOut, err = exec.Command("ipfs", args...).Output(); err != nil {
fmt.Fprintln(os.Stderr, "There was an error running git rev-parse command: ", err)
}
fmt.Println(string(cmdOut))
// 解析json
data, err := ioutil.ReadFile("tmp/output.json")
if err != nil {
fmt.Println("ReadFile: ", err.Error())
}
js, err :=simplejson.NewJson([]byte(data))
if err != nil {
fmt.Println("newJson: ", err.Error())
}
i.Data["json"] = js
i.ServeJSON()
}
|
package main
import f "fmt"
func main() {
f.Println("beffered")
messages := make(chan string, 2)
messages <- "wisoft"
messages <- "lab"
f.Println(<-messages)
f.Println(<-messages)
}
|
package user
import (
"github.com/jinzhu/gorm"
"time"
"github.com/satori/go.uuid"
)
const (
SUPER_ADMIN = 0
ADMIN = 1
STAFF = 2
RESIDENT = 3
)
const (
RESET_PASSWORD_FROM_WEB = 1
RESET_PASSWORD_FROM_MOBILE = 2
)
const (
ScopesSuperAdmin = "supper_admin"
ScopesAdmin = "admin"
ScopesStaff = "staff"
ScopesResident = "resident"
)
const (
PENDING = 0
ENABLE = 1
DISABLE = 2
DELETED = 3
)
const (
USER_ENABLE = 1
USER_DISABLE = 2
USER_DELETED = 3
)
const LinkSendEmailResetPassword = "wewumbo.tpptechnology.com/user/confirmNewPassword/"
type User struct {
ID uuid.UUID `gorm:"type:char(36); primary_key"`
Name string `gorm:"type:varchar(64); not null;index"`
Avatar string `gorm:"type:varchar(255);"`
Splash string `gorm:"type:text"`
Phone string `gorm:"type:varchar(32);"`
Email string `gorm:"type:varchar(255);not null;unique_index"`
Address string `gorm:"type:text"`
Descriptions string `gorm:"type:text"`
UserName string `gorm:"type:varchar(32); not null;unique_index"`
Password string `gorm:"type:varchar(255); not null"`
Token string `gorm:"type:text"`
UserType int32 `gorm:"type:int"`
CreatedAt time.Time
UpdatedAt time.Time
Deleted bool
Active bool
WumboToken string `gorm:"type:varchar(255)"; index`
WumboTokenExpire int32 `gorm:"type:int"`
WumboId int `gorm:"type:int"; index`
Staff []Staff `gorm:"foreignkey:fk_user"`
}
// Duplicate of service.Category for bypass cycling import
type Category struct {
ID int32 `gorm:"AUTO_INCREMENT; primary_key"`
Name string `gorm:"type:varchar(64); not null; index"`
Deleted bool `gorm:"type:tinyint"`
CreatedAt time.Time
UpdatedAt time.Time
}
type Company struct {
ID uuid.UUID `gorm:"type:char(36); primary_key"`
Name string `gorm:"type:varchar(64); not null;index"`
Descriptions string `gorm:"type:text"`
Tos string `gorm:"type:text"`
Logo string `gorm:"type:varchar(255)"`
Banner string `gorm:"type:varchar(255)"`
Certification string `gorm:"type:text"`
ServiceIndustry int32 `gorm:"type:int;"`
ServiceIndustryObj Category `gorm:"foreignkey:service_industry"`
Rating float32 `gorm:"type:float"`
UserID uuid.UUID `gorm:"type:char(36); column:fk_user"`
Building string `gorm:"type:varchar(64)"`
OpenHours string `gorm:"type:varchar(64);"`
OpenDays string `gorm:"type:varchar(64);"`
Operations string `gorm:"type:text",json:"operations"`
Phone string `gorm:"type:varchar(32);"`
Email string `gorm:"type:varchar(64);not nul"`
Address string `gorm:"type:text;"`
Website string `gorm:"type:varchar(64);"`
Hotline string `gorm:"type:varchar(32);"`
ACN string `gorm:"type:varchar(32);"`
BankName string `gorm:"type:varchar(128);"`
BSBNo string `gorm:"type:varchar(64);"`
AccountName string `gorm:"type:varchar(128);"`
AccountNumber string `gorm:"type:varchar(64);"`
BicSwiftCode string `gorm:"type:varchar(64);"`
AccountType string `gorm:"type:varchar(64);"`
BankCountry string `gorm:"type:varchar(64);"`
HolderType string `gorm:"type:varchar(64);"`
BranchAddress string `gorm:"type:varchar(255);"`
Status int32 `gorm:"type:int"`
CreatedAt time.Time
UpdatedAt time.Time
Deleted bool
User User `gorm:"foreignkey:fk_user"`
Staff []Staff `gorm:"foreignkey:fk_company"`
}
type Staff struct {
UserID uuid.UUID `gorm:"type:char(36); not null; unique_index:idx_user_company; column:fk_user"`
User User `gorm:"foreignkey:UserID"`
CompanyID uuid.UUID `gorm:"type:char(36); not null; unique_index:idx_user_company; column:fk_company"`
Company Company `gorm:"foreignkey:CompanyID"`
Enable bool
CreatedAt time.Time
UpdatedAt time.Time
}
type SendEmail struct {
ID uuid.UUID `gorm:"type:varchar(36);"`
UserID uuid.UUID `gorm:"type:varchar(36);"`
Username string `gorm:"type:varchar(32);"`
Password string `gorm:"type:varchar(255);"`
Email string `gorm:"type:varchar(32);"`
CreatedAt time.Time
Status bool
}
type FileUploadReturn struct {
Name string `json:"name"`
Link string `json:"link"`
}
func (d *User) BeforeCreate(scope *gorm.Scope) error {
return scope.SetColumn("ID", uuid.NewV4())
}
func (d *Company) BeforeCreate(scope *gorm.Scope) error {
return scope.SetColumn("ID", uuid.NewV4())
}
func (u *User) GetScope() string {
switch u.UserType {
case SUPER_ADMIN:
return ScopesSuperAdmin
case ADMIN:
return ScopesAdmin
case STAFF:
return ScopesStaff
default:
return ScopesResident
}
}
|
// Copyright 2019 John Papandriopoulos. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package zydis
// CPUFlagAction is an enum of CPU action flags.
type CPUFlagAction int
// CPUFlagAction enum values.
const (
// The CPU flag is not touched by the instruction.
CPUFlagActionNone CPUFlagAction = iota
// The CPU flag is tested (read).
CPUFlagActionTested
// The CPU flag is tested and modified aferwards (read-write).
CPUFlagActionTestedModified
// The CPU flag is modified (write).
CPUFlagActionModified
// The CPU flag is set to 0 (write).
CPUFlagActionSet0
// The CPU flag is set to 1 (write).
CPUFlagActionSet1
// The CPU flag is undefined (write).
CPUFlagActionUndefined
)
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2021/8/20 9:04 上午
# @File : lt_17_电话号码组合.go
# @Description :
# @Attention :
*/
package offer
var phoneMap map[byte]string = map[byte]string{
2: "abc",
3: "def",
4: "ghi",
5: "jkl",
6: "mno",
7: "pqrs",
8: "tuv",
9: "wxyz",
}
var ret []string
// 关键: 回溯法
func letterCombinations(digits string) []string {
if len(digits) == 0 {
return nil
}
ret = nil
letterCombinationsBackTrack(digits, 0, "")
return ret
}
func letterCombinationsBackTrack(digits string, index int, res string) {
if index == len(digits) {
ret = append(ret, res)
return
}
str := phoneMap[digits[index]-'0']
for i := 0; i < len(str); i++ {
letterCombinationsBackTrack(digits, index+1, res+string(str[i]))
}
}
|
package main
import (
"fmt"
"net/http"
"net/http/httputil"
)
func main() {
request, err := http.NewRequest(http.MethodGet, "https://www.indiegogo.com/projects/cubo-ai-world-s-smartest-baby-monitor?secret_perk_token=d15cf19e#/", nil)
// request.Header.Add("User-Agent", "Mozilla/5.0 (iPhone)")
// res, err := http.Get("https://www.youtube.com")
client := http.Client{
CheckRedirect: func(req *http.Request, via []*http.Request) error {
fmt.Println("Redirect: ", req)
return nil
},
}
// res, err := http.DefaultClient.Do(request)
res, err := client.Do(request)
if err != nil {
panic(err)
}
defer res.Body.Close()
s, err := httputil.DumpResponse(res, true)
if err != nil {
panic(err)
}
fmt.Printf("dump respones: %s\n", s)
}
|
package main
import "fmt"
func main() {
//main defer栈与sum的defer栈是独立的
defer fmt.Println("main defer1") //5
defer fmt.Println("main defer2") //4
fmt.Println("main res", sum(1, 2))
}
func sum(a int, b int) int {
var res int
//当执行到defer时,暂时不执行,会将defer后面的语句压入到独立的栈(defer栈)
//当函数执行完毕后(return后,defer在return后执行),再从defer栈,按照先入后出的方式出栈
defer fmt.Println("a=", a) //4 a = 1 连同数值一起压栈,即使后续做了自加操作,结果仍然是1
defer fmt.Println("b=", b) //3 b = 2 连同数值一起压栈,即使后续做了自加操作,结果仍然是2
defer func(res *int) { //2
*res += 100
fmt.Println("defer res", *res) //res = 102
}(&res)
res = a + b
a++
b++
fmt.Println("res=", res) //1
return res
//执行defer栈
}
//defer
//主要价值在,当函数执行完毕后,可以及时的释放函数创建的资源
//1)当go执行到一个defer时,不会立即执行defer后的语句,而是将defer后的语句压入到一个栈中【暂时称该栈为defer栈】,然后继续执行函数下一个语句
//2)当函数执行完毕后,再从defer栈中,依次从栈顶取出语句执行(遵守先入后出的机制)
//3)在defer将语句放入到栈时,也会将相关的值拷贝同时入栈
|
package main
import (
"os"
"got/internal/cmd"
)
//var g = got.NewGot(disk.NewObjects(), file.ReadFromFile())
//var sum string
func main() {
if err := cmd.GotCmd.Execute(); err != nil {
os.Exit(1)
}
/*//fmt.Println(g.HashObject([]byte("test content"), true, objects.TypeBlob))
//fmt.Printf("[Objects]:\n%v\n", internal.Objects)
//fmt.Printf("Index:\n%v\n", internal.Index.String())
fmt.Println("\nCreating test.txt...")
ioutil.WriteFile("test.txt", []byte("version 1"), os.ModePerm)
sum = g.HashFile("test.txt", true)
fmt.Println(sum)
g.AddToIndex(sum, "test.txt")
//fmt.Printf("[Objects]:\n%v\n", internal.Objects)
//fmt.Printf("Index:\n%v\n", internal.Index)
tree := g.WriteTree()
//fmt.Printf("[Objects]:\n%v\n", internal.Objects)
commit := g.CommitTree("first commit", tree, "")
//fmt.Printf("[Objects]:\n%v\n", internal.Objects)
prev := commit
fmt.Println("\nUpdating test.txt...")
ioutil.WriteFile("test.txt", []byte("version 2"), os.ModePerm)
sum = g.HashFile("test.txt", true)
fmt.Println(sum)
g.AddToIndex(sum, "test.txt")
//fmt.Printf("[Objects]:\n%v\n", internal.Objects)
//fmt.Printf("Index:\n%v\n", internal.Index)
tree = g.WriteTree()
//fmt.Printf("[Objects]:\n%v\n", internal.Objects)
commit = g.CommitTree("second commit", tree, prev)
//fmt.Printf("[Objects]:\n%v\n", internal.Objects)
prev = commit
fmt.Println("\nCreating new.txt...")
ioutil.WriteFile("new.txt", []byte("new file"), os.ModePerm)
sum = g.HashFile("new.txt", true)
fmt.Println(sum)
g.AddToIndex(sum, "test.txt")
//fmt.Printf("[Objects]:\n%v\n", internal.Objects)
//fmt.Printf("Index:\n%v\n", internal.Index)
tree = g.WriteTree()
//fmt.Printf("[Objects]:\n%v\n", internal.Objects)
commit = g.CommitTree("third commit", tree, prev)
//fmt.Printf("[Objects]:\n%v\n", internal.Objects)
prev = commit
*/
}
|
package parametrs
import (
"reflect"
)
type IParamers interface {
ToMap() map[string]string
}
func TypeToMap(t interface{}) map[string]string {
retval := map[string]string{}
val := reflect.ValueOf(t).Elem()
for i := 0; i < val.NumField(); i++ {
valueField, ok := val.Field(i).Interface().(string)
if !ok || len(valueField) == 0 {
continue
}
typeField := val.Type().Field(i)
tag := typeField.Tag.Get("tag_name")
retval[tag] = string(valueField)
}
return retval
}
|
package timewheel
import (
"fmt"
"testing"
"time"
"github.com/lioneagle/goutil/src/test"
)
type record struct {
t1 time.Time
t2 time.Time
}
func TestTimeWheelAddOk(t *testing.T) {
testdata := []struct {
sceond int64
minute int64
hour int64
wheel int32
slot int32
}{
{1, 0, 0, 0, 1},
{59, 0, 0, 0, 59},
{0, 1, 0, 1, 1},
{0, 0, 1, 2, 1},
{0, 10, 1, 2, 1},
{32, 11, 1, 2, 1},
{59, 59, 23, 2, 23},
}
statWanted := &TimeWheelStat{
Add: 1,
AddOk: 1,
InternalAdd: 1,
InternalAddOk: 1,
}
tick := int64(10)
tw := NewTimeWheel(3, []int{60, 60, 24}, tick, 0, 1000)
for i, v := range testdata {
v := v
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
//t.Parallel()
tw.RemoveAll()
tw.stat.Clear()
interval := v.sceond + v.minute*60 + v.hour*3600
interval *= tick
ret := tw.Add(interval, nil, nil)
test.ASSERT_TRUE(t, ret >= 0, "")
test.EXPECT_EQ(t, tw.allocator.Chunks[ret].data.wheel, v.wheel, "")
test.EXPECT_EQ(t, tw.allocator.Chunks[ret].data.slot, v.slot, "")
test.EXPECT_EQ(t, tw.stat, *statWanted, "")
})
}
}
func TestTimeWheelBinaryAddOk(t *testing.T) {
testdata := []struct {
sceond int64
minute int64
hour int64
wheel int32
slot int32
}{
{1, 0, 0, 0, 1},
{63, 0, 0, 0, 63},
{0, 1, 0, 1, 1},
{0, 0, 1, 2, 1},
{0, 10, 1, 2, 1},
{32, 11, 1, 2, 1},
{63, 63, 15, 2, 15},
}
statWanted := &TimeWheelStat{
Add: 1,
AddOk: 1,
InternalAdd: 1,
InternalAddOk: 1,
}
tick := int64(10)
tw := NewTimeWheelBinaryBits(3, []int{6, 6, 4}, tick, 0, 1000)
for i, v := range testdata {
v := v
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
//t.Parallel()
tw.RemoveAll()
tw.stat.Clear()
interval := v.sceond + v.minute*64 + v.hour*64*64
interval *= tick
ret := tw.Add(interval, nil, nil)
test.ASSERT_TRUE(t, ret >= 0, "")
test.EXPECT_EQ(t, tw.allocator.Chunks[ret].data.wheel, v.wheel, "")
test.EXPECT_EQ(t, tw.allocator.Chunks[ret].data.slot, v.slot, "")
test.EXPECT_EQ(t, tw.stat, *statWanted, "")
})
}
}
func TestTimeWheelAddNOk(t *testing.T) {
tw := NewTimeWheel(3, []int{60, 60, 24}, 1, 0, 1000)
ret := tw.Add(0, nil, nil)
test.EXPECT_EQ(t, ret, int32(-2), "")
statWanted1 := &TimeWheelStat{Add: 1, Expire: 1, ExpireBeforeAdd: 1}
test.EXPECT_EQ(t, tw.stat, *statWanted1, "")
tw.stat.Clear()
ret = tw.Add(60*60*24, nil, nil)
test.EXPECT_EQ(t, ret, int32(-1), "")
statWanted2 := &TimeWheelStat{Add: 1}
test.EXPECT_EQ(t, tw.stat, *statWanted2, "")
tw.RemoveAll()
}
func TestTimeWheelBinaryAddNOk(t *testing.T) {
delta := int64(10)
tw := NewTimeWheelBinaryBits(3, []int{6, 6, 4}, delta, 0, 1000)
ret := tw.Add(0, nil, func(interface{}) {})
test.EXPECT_EQ(t, ret, int32(-2), "")
statWanted1 := &TimeWheelStat{
Add: 1,
Expire: 1,
ExpireBeforeAdd: 1,
Post: 1,
}
test.EXPECT_EQ(t, tw.stat, *statWanted1, "")
tw.stat.Clear()
ret = tw.Add((1<<16)*delta, nil, nil)
test.EXPECT_EQ(t, ret, int32(-1), "")
statWanted2 := &TimeWheelStat{Add: 1}
test.EXPECT_EQ(t, tw.stat, *statWanted2, "")
tw.RemoveAll()
}
func TestTimeWheelBinaryRemoveOk1(t *testing.T) {
tick := int64(10)
tw := NewTimeWheelBinaryBits(3, []int{6, 6, 4}, tick, 0, 1000)
tm := tw.Add(10*tick, nil, nil)
test.EXPECT_TRUE(t, tm >= 0, "")
ret := tw.Remove(tm)
test.EXPECT_TRUE(t, ret, "")
statWanted1 := &TimeWheelStat{
Add: 1,
AddOk: 1,
InternalAdd: 1,
InternalAddOk: 1,
Remove: 1,
RemoveOk: 1,
InternalRemove: 1,
InternalRemoveOk: 1,
}
test.EXPECT_EQ(t, tw.stat, *statWanted1, "")
tw.RemoveAll()
}
func TestTimeWheelBinaryRemoveOk2(t *testing.T) {
tick := int64(10)
tw := NewTimeWheelBinaryBits(3, []int{6, 6, 4}, tick, 0, 1000)
tm1 := tw.Add(10*tick, nil, nil)
test.EXPECT_TRUE(t, tm1 >= 0, "")
tm2 := tw.Add(20*tick, nil, nil)
test.EXPECT_TRUE(t, tm2 >= 0, "")
ret := tw.Remove(tm1)
test.EXPECT_TRUE(t, ret, "")
statWanted1 := &TimeWheelStat{
Add: 2,
AddOk: 2,
InternalAdd: 2,
InternalAddOk: 2,
Remove: 1,
RemoveOk: 1,
InternalRemove: 1,
InternalRemoveOk: 1,
}
test.EXPECT_EQ(t, tw.stat, *statWanted1, "")
tw.RemoveAll()
}
func TestTimeWheelBinaryRemoveOk3(t *testing.T) {
tick := int64(10)
tw := NewTimeWheelBinaryBits(3, []int{6, 6, 4}, tick, 0, 1000)
tm1 := tw.Add(10*tick, nil, nil)
test.EXPECT_TRUE(t, tm1 >= 0, "")
tm2 := tw.Add(10*tick, nil, nil)
test.EXPECT_TRUE(t, tm2 >= 0, "")
ret := tw.Remove(tm1)
test.EXPECT_TRUE(t, ret, "")
statWanted1 := &TimeWheelStat{
Add: 2,
AddOk: 2,
InternalAdd: 2,
InternalAddOk: 2,
Remove: 1,
RemoveOk: 1,
InternalRemove: 1,
InternalRemoveOk: 1,
}
test.EXPECT_EQ(t, tw.stat, *statWanted1, "")
tw.RemoveAll()
}
func TestTimeWheelBinaryRemoveOk4(t *testing.T) {
tick := int64(10)
tw := NewTimeWheelBinaryBits(3, []int{6, 6, 4}, tick, 0, 1000)
tm1 := tw.Add(10*tick, nil, nil)
test.EXPECT_TRUE(t, tm1 >= 0, "")
tm2 := tw.Add(10*tick, nil, nil)
test.EXPECT_TRUE(t, tm2 >= 0, "")
ret := tw.Remove(tm2)
test.EXPECT_TRUE(t, ret, "")
statWanted1 := &TimeWheelStat{
Add: 2,
AddOk: 2,
InternalAdd: 2,
InternalAddOk: 2,
Remove: 1,
RemoveOk: 1,
InternalRemove: 1,
InternalRemoveOk: 1,
}
test.EXPECT_EQ(t, tw.stat, *statWanted1, "")
tw.RemoveAll()
}
func TestTimeWheelStep1(t *testing.T) {
tick := int64(10)
start := int64(234)
tw := NewTimeWheel(3, []int{60, 60, 24}, tick, start, 1000)
tm1 := tw.Add(1*tick, nil, nil)
test.EXPECT_TRUE(t, tm1 >= 0, "")
tm2 := tw.Add(1*tick, nil, nil)
test.EXPECT_TRUE(t, tm2 >= 0, "")
tw.Step(start + tick)
statWanted1 := &TimeWheelStat{
Add: 2,
AddOk: 2,
InternalAdd: 2,
InternalAddOk: 2,
InternalRemove: 2,
InternalRemoveOk: 2,
Expire: 2,
Step: 1,
}
test.EXPECT_EQ(t, tw.stat, *statWanted1, "")
tw.RemoveAll()
}
func TestTimeWheelStep2(t *testing.T) {
tick := int64(10)
tw := NewTimeWheel(3, []int{100, 10, 3}, tick, 0, 1000)
tm1 := tw.Add(5*tick, nil, func(interface{}) {})
test.EXPECT_TRUE(t, tm1 >= 0, "")
tw.Step(3 * tick)
tm2 := tw.Add(3*tick, nil, func(interface{}) {})
test.EXPECT_TRUE(t, tm2 >= 0, "")
tw.Step(4 * tick)
tm3 := tw.Add(250*tick, nil, func(interface{}) {})
test.EXPECT_TRUE(t, tm3 >= 0, "")
tw.Step(100 * tick)
tw.Step(254 * tick)
//fmt.Println("tw =", tw)
statWanted1 := &TimeWheelStat{
Add: 3,
AddOk: 3,
InternalAdd: 4,
InternalAddOk: 4,
InternalRemove: 4,
InternalRemoveOk: 4,
Expire: 3,
Post: 3,
Step: 4,
MoveWheels: 2,
MoveSlot: 1,
}
test.EXPECT_EQ(t, tw.stat, *statWanted1, "")
tw.RemoveAll()
test.EXPECT_TRUE(t, tw.size >= 0, "")
}
func TestGoTimer1(t *testing.T) {
//fmt.Println("time.Now() =", time.Now())
//fmt.Println("time.Second =", time.Second)
ticker := time.NewTicker(1000000 * 1)
times := make([]*record, 0)
for i := 0; i < 10; i++ {
t1 := <-ticker.C
//fmt.Println(time.String())
times = append(times, &record{t1, time.Now()})
}
ticker.Stop()
/*for i, v := range times {
fmt.Printf("[%d]: t1 = %s, t2 = %s\n", i, v.t1.String(), v.t2.String())
}*/
}
func TestGoTimer2(t *testing.T) {
/*start := time.Now()
for i := 0; i < 100000000; i++ {
time.Now()
}
end := time.Now()
fmt.Printf("start: %s\n", start.String())
fmt.Printf("end: %s\n", end.String())
fmt.Printf("ns/op: %s\n", end.Sub(start).String())*/
}
func BenchmarkGoTimer1(b *testing.B) {
b.StopTimer()
b.ReportAllocs()
b.SetBytes(2)
b.StartTimer()
for i := 0; i < b.N; i++ {
ticker := time.NewTicker(100000000000)
ticker.Stop()
}
}
func BenchmarkGoTimer2(b *testing.B) {
ticker := time.NewTicker(1000000 * 2)
b.ResetTimer()
b.ReportAllocs()
b.SetBytes(2)
b.StartTimer()
for i := 0; i < b.N; i++ {
<-ticker.C
}
b.StopTimer()
ticker.Stop()
}
func BenchmarkGoTimer3(b *testing.B) {
ticker := time.NewTicker(1)
b.ResetTimer()
b.ReportAllocs()
b.SetBytes(2)
b.StartTimer()
for i := 0; i < b.N; i++ {
<-ticker.C
}
b.StopTimer()
ticker.Stop()
}
func BenchmarkGoTimer4(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
b.SetBytes(2)
b.StartTimer()
for i := 0; i < b.N; i++ {
time.Now()
}
b.StopTimer()
}
func BenchmarkTimeWheelAddRemove1(b *testing.B) {
b.StopTimer()
tw := NewTimeWheel(5, []int{256, 64, 64, 64, 64}, 1, 0, 10000)
//tw := NewTimeWheel(8, []int{64, 64, 64, 64, 64, 64, 64, 64}, 1, 10000)
//tw := NewTimeWheel(4, []int{256, 256, 256, 256}, 1, 10000)
//tw := NewTimeWheel(3, []int{1 << 11, 1 << 11, 1 << 10}, 1, 10000)
b.ReportAllocs()
b.SetBytes(2)
b.StartTimer()
for i := 0; i < b.N; i++ {
e := tw.Add(20000000, 100, nil)
//e := tw.Add(1, 100, nil)
tw.Remove(e)
}
}
func BenchmarkTimeWheelAddRemove2(b *testing.B) {
b.StopTimer()
tw := NewTimeWheel(5, []int{256, 64, 64, 64, 64}, 1, 0, 10000)
//tw := NewTimeWheel(8, []int{64, 64, 64, 64, 64, 64, 64, 64}, 1, 10000)
//tw := NewTimeWheel(4, []int{256, 256, 256, 256}, 1, 10000)
//tw := NewTimeWheel(3, []int{1 << 11, 1 << 11, 1 << 10}, 1, 10000)
b.ReportAllocs()
b.SetBytes(2)
b.StartTimer()
for i := 0; i < b.N; i++ {
e := tw.Add(1, 100, nil)
tw.Remove(e)
}
}
func BenchmarkTimeWheelBinaryAddRemove1(b *testing.B) {
b.StopTimer()
tw := NewTimeWheelBinaryBits(5, []int{8, 6, 6, 6, 6}, 1, 0, 10000)
//tw := NewTimeWheel(8, []int{4, 4, 4, 4, 4, 4, 4, 4}, 1, 10000)
//tw := NewTimeWheel(4, []int{8, 8, 8, 8}, 1, 10000)
//tw := NewTimeWheel(3, []int{11, 11, 10}, 1, 10000)
b.ReportAllocs()
b.SetBytes(2)
b.StartTimer()
for i := 0; i < b.N; i++ {
e := tw.Add(20000000, nil, nil)
//e := tw.Add(1, 100, nil)
tw.Remove(e)
}
}
func BenchmarkTimeWheelBinaryAddRemove2(b *testing.B) {
b.StopTimer()
tw := NewTimeWheelBinaryBits(5, []int{8, 6, 6, 6, 6}, 1, 0, 10000)
//tw := NewTimeWheel(8, []int{4, 4, 4, 4, 4, 4, 4, 4}, 1, 10000)
//tw := NewTimeWheel(4, []int{8, 8, 8, 8}, 1, 10000)
//tw := NewTimeWheel(3, []int{11, 11, 10}, 1, 10000)
b.ReportAllocs()
b.SetBytes(2)
b.StartTimer()
for i := 0; i < b.N; i++ {
e := tw.Add(1, nil, nil)
tw.Remove(e)
}
}
|
package goobj
import (
"bufio"
"errors"
"fmt"
"os"
"reflect"
)
const supportedGoObjVersion = 1
var magicHeader = []byte("\x00\x00go19ld")
var magicFooter = []byte("\xffgo19ld")
// File represents a go object file.
type File struct {
Symbols []Symbol
SymbolReferences []SymbolReference
DataBlock []byte
// the data block starts at this position of the object file
DataBlockPosition int64
}
// SymbolReference represents a symbol's name and its version.
type SymbolReference struct {
Name string
Version int64
}
// Symbol describes metadata associated with data block.
type Symbol struct {
IDIndex int64
Kind SymKind
Size int64
DupOK bool
Local bool
Typelink bool
GoTypeIndex int64
DataAddr DataAddr
Relocations []Relocation
// STEXT type has additional fields
stextFields *StextFields
}
// SymKind represents a type of symbol
type SymKind uint8
// taken from go1.10 cmd/internal/objabi
const (
// An otherwise invalid zero value for the type
Sxxx SymKind = iota
// Executable instructions
STEXT
// Read only static data
SRODATA
// Static data that does not contain any pointers
SNOPTRDATA
// Static data
SDATA
// Statically data that is initially all 0s
SBSS
// Statically data that is initially all 0s and does not contain pointers
SNOPTRBSS
// Thread-local data that is initially all 0s
STLSBSS
// Debugging data
SDWARFINFO
SDWARFRANGE
SDWARFLOC
)
func (kind SymKind) String() string {
switch kind {
case Sxxx:
return "INVALID"
case STEXT:
return "STEXT"
case SRODATA:
return "SRODATA"
case SNOPTRDATA:
return "SNOPTRDATA"
case SDATA:
return "SDATA"
case SBSS:
return "SBSS"
case SNOPTRBSS:
return "SNOPTRBSS"
case STLSBSS:
return "STLSBSS"
case SDWARFINFO:
return "SDWARFINFO"
case SDWARFRANGE:
return "SDWARFRANGE"
case SDWARFLOC:
return "SDWARFLOC"
default:
return "UNKNOWN"
}
}
// Relocation represents a symbol to be relocated and how to relocate it.
type Relocation struct {
Offset int64
Size int64
Type RelocType
Add int64
IDIndex int64
}
// RelocType describes a way to relocate a symbol.
type RelocType int32
// taken from go1.10 cmd/internal/objabi
const (
R_ADDR RelocType = 1 + iota
// R_ADDRPOWER relocates a pair of "D-form" instructions (instructions with 16-bit
// immediates in the low half of the instruction word), usually addis followed by
// another add or a load, inserting the "high adjusted" 16 bits of the address of
// the referenced symbol into the immediate field of the first instruction and the
// low 16 bits into that of the second instruction.
R_ADDRPOWER
// R_ADDRARM64 relocates an adrp, add pair to compute the address of the
// referenced symbol.
R_ADDRARM64
// R_ADDRMIPS (only used on mips/mips64) resolves to the low 16 bits of an external
// address, by encoding it into the instruction.
R_ADDRMIPS
// R_ADDROFF resolves to a 32-bit offset from the beginning of the section
// holding the data being relocated to the referenced symbol.
R_ADDROFF // 5
// R_WEAKADDROFF resolves just like R_ADDROFF but is a weak relocation.
// A weak relocation does not make the symbol it refers to reachable,
// and is only honored by the linker if the symbol is in some other way
// reachable.
R_WEAKADDROFF
R_SIZE
R_CALL // 8
R_CALLARM
R_CALLARM64
R_CALLIND // 11
R_CALLPOWER
// R_CALLMIPS (only used on mips64) resolves to non-PC-relative target address
// of a CALL (JAL) instruction, by encoding the address into the instruction.
R_CALLMIPS
R_CONST
R_PCREL // 15
// R_TLS_LE, used on 386, amd64, and ARM, resolves to the offset of the
// thread-local symbol from the thread local base and is used to implement the
// "local exec" model for tls access (r.Sym is not set on intel platforms but is
// set to a TLS symbol -- runtime.tlsg -- in the linker when externally linking).
R_TLS_LE
// R_TLS_IE, used 386, amd64, and ARM resolves to the PC-relative offset to a GOT
// slot containing the offset from the thread-local symbol from the thread local
// base and is used to implemented the "initial exec" model for tls access (r.Sym
// is not set on intel platforms but is set to a TLS symbol -- runtime.tlsg -- in
// the linker when externally linking).
R_TLS_IE
R_GOTOFF
R_PLT0
R_PLT1
R_PLT2
R_USEFIELD
// R_USETYPE resolves to an *rtype, but no relocation is created. The
// linker uses this as a signal that the pointed-to type information
// should be linked into the final binary, even if there are no other
// direct references. (This is used for types reachable by reflection.)
R_USETYPE
// R_METHODOFF resolves to a 32-bit offset from the beginning of the section
// holding the data being relocated to the referenced symbol.
// It is a variant of R_ADDROFF used when linking from the uncommonType of a
// *rtype, and may be set to zero by the linker if it determines the method
// text is unreachable by the linked program.
R_METHODOFF // 24
R_POWER_TOC
R_GOTPCREL
// R_JMPMIPS (only used on mips64) resolves to non-PC-relative target address
// of a JMP instruction, by encoding the address into the instruction.
// The stack nosplit check ignores this since it is not a function call.
R_JMPMIPS
// R_DWARFSECREF resolves to the offset of the symbol from its section.
// Target of relocation must be size 4 (in current implementation).
R_DWARFSECREF // 28
// R_DWARFFILEREF resolves to an index into the DWARF .debug_line
// file table for the specified file symbol. Must be applied to an
// attribute of form DW_FORM_data4.
R_DWARFFILEREF // 29
// Platform dependent relocations. Architectures with fixed width instructions
// have the inherent issue that a 32-bit (or 64-bit!) displacement cannot be
// stuffed into a 32-bit instruction, so an address needs to be spread across
// several instructions, and in turn this requires a sequence of relocations, each
// updating a part of an instruction. This leads to relocation codes that are
// inherently processor specific.
// Arm64.
// Set a MOV[NZ] immediate field to bits [15:0] of the offset from the thread
// local base to the thread local variable defined by the referenced (thread
// local) symbol. Error if the offset does not fit into 16 bits.
R_ARM64_TLS_LE
// Relocates an ADRP; LD64 instruction sequence to load the offset between
// the thread local base and the thread local variable defined by the
// referenced (thread local) symbol from the GOT.
R_ARM64_TLS_IE
// R_ARM64_GOTPCREL relocates an adrp, ld64 pair to compute the address of the GOT
// slot of the referenced symbol.
R_ARM64_GOTPCREL
// PPC64.
// R_POWER_TLS_LE is used to implement the "local exec" model for tls
// access. It resolves to the offset of the thread-local symbol from the
// thread pointer (R13) and inserts this value into the low 16 bits of an
// instruction word.
R_POWER_TLS_LE
// R_POWER_TLS_IE is used to implement the "initial exec" model for tls access. It
// relocates a D-form, DS-form instruction sequence like R_ADDRPOWER_DS. It
// inserts to the offset of GOT slot for the thread-local symbol from the TOC (the
// GOT slot is filled by the dynamic linker with the offset of the thread-local
// symbol from the thread pointer (R13)).
R_POWER_TLS_IE
// R_POWER_TLS marks an X-form instruction such as "MOVD 0(R13)(R31*1), g" as
// accessing a particular thread-local symbol. It does not affect code generation
// but is used by the system linker when relaxing "initial exec" model code to
// "local exec" model code.
R_POWER_TLS
// R_ADDRPOWER_DS is similar to R_ADDRPOWER above, but assumes the second
// instruction is a "DS-form" instruction, which has an immediate field occupying
// bits [15:2] of the instruction word. Bits [15:2] of the address of the
// relocated symbol are inserted into this field; it is an error if the last two
// bits of the address are not 0.
R_ADDRPOWER_DS
// R_ADDRPOWER_PCREL relocates a D-form, DS-form instruction sequence like
// R_ADDRPOWER_DS but inserts the offset of the GOT slot for the referenced symbol
// from the TOC rather than the symbol's address.
R_ADDRPOWER_GOT
// R_ADDRPOWER_PCREL relocates two D-form instructions like R_ADDRPOWER, but
// inserts the displacement from the place being relocated to the address of the
// the relocated symbol instead of just its address.
R_ADDRPOWER_PCREL
// R_ADDRPOWER_TOCREL relocates two D-form instructions like R_ADDRPOWER, but
// inserts the offset from the TOC to the address of the relocated symbol
// rather than the symbol's address.
R_ADDRPOWER_TOCREL
// R_ADDRPOWER_TOCREL relocates a D-form, DS-form instruction sequence like
// R_ADDRPOWER_DS but inserts the offset from the TOC to the address of the the
// relocated symbol rather than the symbol's address.
R_ADDRPOWER_TOCREL_DS
// R_PCRELDBL relocates s390x 2-byte aligned PC-relative addresses.
// TODO(mundaym): remove once variants can be serialized - see issue 14218.
R_PCRELDBL
// R_ADDRMIPSU (only used on mips/mips64) resolves to the sign-adjusted "upper" 16
// bits (bit 16-31) of an external address, by encoding it into the instruction.
R_ADDRMIPSU
// R_ADDRMIPSTLS (only used on mips64) resolves to the low 16 bits of a TLS
// address (offset from thread pointer), by encoding it into the instruction.
R_ADDRMIPSTLS
// R_ADDRCUOFF resolves to a pointer-sized offset from the start of the
// symbol's DWARF compile unit.
R_ADDRCUOFF // 44
)
func (relocType RelocType) String() string {
switch relocType {
case R_ADDR:
return "R_ADDR"
case R_ADDRPOWER:
return "R_ADDRPOWER"
case R_ADDRARM64:
return "R_ADDRARM64"
case R_ADDRMIPS:
return "R_ADDRMIPS"
case R_ADDROFF:
return "R_ADDROFF"
case R_WEAKADDROFF:
return "R_WEAKADDROFF"
case R_SIZE:
return "R_SIZE"
case R_CALL:
return "R_CALL"
case R_CALLARM:
return "R_CALLARM"
case R_CALLARM64:
return "R_CALLARM64"
case R_CALLIND:
return "R_CALLIND"
case R_CALLPOWER:
return "R_CALLPOWER"
case R_CALLMIPS:
return "R_CALLMIPS"
case R_CONST:
return "R_CONST"
case R_PCREL:
return "R_PCREL"
case R_TLS_LE:
return "R_TLS_LE"
case R_TLS_IE:
return "R_TLS_IE"
case R_GOTOFF:
return "R_GOTOFF"
case R_PLT0:
return "R_PLT0"
case R_PLT1:
return "R_PLT1"
case R_PLT2:
return "R_PLT2"
case R_USEFIELD:
return "R_USEFIELD"
case R_USETYPE:
return "R_USETYPE"
case R_METHODOFF:
return "R_METHODOFF"
case R_POWER_TOC:
return "R_POWER_TOC"
case R_GOTPCREL:
return "R_GOTPCREL"
case R_JMPMIPS:
return "R_JMPMIPS"
case R_DWARFSECREF:
return "R_DWARFSECREF"
case R_DWARFFILEREF:
return "R_DWARFFILEREF"
case R_ARM64_TLS_LE:
return "R_ARM64_TLS_LE"
case R_ARM64_TLS_IE:
return "R_ARM64_TLS_IE"
case R_ARM64_GOTPCREL:
return "R_ARM64_GOTPCREL"
case R_POWER_TLS_LE:
return "R_POWER_TLS_LE"
case R_POWER_TLS_IE:
return "R_POWER_TLS_IE"
case R_POWER_TLS:
return "R_POWER_TLS"
case R_ADDRPOWER_DS:
return "R_ADDRPOWER_DS"
case R_ADDRPOWER_GOT:
return "R_ADDRPOWER_GOT"
case R_ADDRPOWER_PCREL:
return "R_ADDRPOWER_PCREL"
case R_ADDRPOWER_TOCREL:
return "R_ADDRPOWER_TOCREL"
case R_ADDRPOWER_TOCREL_DS:
return "R_ADDRPOWER_TOCREL_DS"
case R_PCRELDBL:
return "R_PCRELDBL"
case R_ADDRMIPSU:
return "R_ADDRMIPSU"
case R_ADDRMIPSTLS:
return "R_ADDRMIPSTLS"
case R_ADDRCUOFF:
return "R_ADDRCUOFF"
default:
return "Unknown"
}
}
// StextFields represents additional metadata STEXT-type symbol have.
type StextFields struct {
Args int64
Frame int64
Leaf bool
CFunc bool
TypeMethod bool
SharedFunc bool
NoSplit bool
Local []Local
// pcln table
PCSP DataAddr
PCFile DataAddr
PCLine DataAddr
PCInline DataAddr
PCData []DataAddr
FuncDataIndex []int64
FuncDataOffset []int64
FileIndex []int64
}
// Local represents a local variable including input args and output.
type Local struct {
AsymIndex int64
Offset int64
Type int64
GotypeIndex int64
}
// DataAddr represents a location of data block.
type DataAddr struct {
Size, Offset int64
}
// Parse parses a given go object file
func Parse(f *os.File) (*File, error) {
parser := newParser(bufio.NewReader(f))
if err := parser.skipHeader(); err != nil {
return nil, err
}
if err := parser.checkVersion(); err != nil {
return nil, err
}
if err := parser.skipDependencies(); err != nil {
return nil, err
}
if err := parser.parseReferences(); err != nil {
return nil, err
}
if err := parser.parseData(); err != nil {
return nil, err
}
if err := parser.parseSymbols(); err != nil {
return nil, err
}
return &parser.File, parser.skipFooter()
}
type parser struct {
reader readerWithCounter
// As a list of symbols are parsed, a symbol is associated with some region of the data block.
// associatedDataSize is the total size of those regions.
associatedDataSize int64
File
}
func newParser(raw *bufio.Reader) *parser {
return &parser{reader: readerWithCounter{raw: raw}}
}
func (p *parser) skipHeader() error {
buff := make([]byte, len(magicHeader))
_ = p.reader.read(buff)
if p.reader.err != nil {
return p.reader.err
}
for !reflect.DeepEqual(buff, magicHeader) {
b := p.reader.readByte()
if p.reader.err != nil {
return errors.New("magic header not found")
}
buff = append(buff[1:], b)
}
return nil
}
func (p *parser) checkVersion() error {
version := p.reader.readByte()
if p.reader.err != nil {
return p.reader.err
}
if version != 1 {
return fmt.Errorf("unexpected version: %d", version)
}
return nil
}
func (p *parser) skipDependencies() error {
for {
b := p.reader.readByte()
if p.reader.err != nil {
return p.reader.err
}
if b == 0 {
return nil
}
}
}
func (p *parser) parseReferences() error {
// the 1st reference is always empty.
p.SymbolReferences = append(p.SymbolReferences, SymbolReference{})
for {
b := p.reader.readByte()
if p.reader.err != nil {
return p.reader.err
}
if b == 0xff {
return nil
} else if b != 0xfe {
return fmt.Errorf("sanity check failed: %#x ", b)
}
if err := p.parseReference(); err != nil {
return err
}
}
}
func (p *parser) parseReference() error {
symbolName := p.reader.readString()
if p.reader.err != nil {
return p.reader.err
}
symbolVersion := p.reader.readVarint()
if p.reader.err != nil {
return p.reader.err
}
p.SymbolReferences = append(p.SymbolReferences, SymbolReference{symbolName, symbolVersion})
return nil
}
func (p *parser) parseData() error {
dataLength := p.reader.readVarint()
if p.reader.err != nil {
return p.reader.err
}
_ = p.reader.readVarint() // reloc
_ = p.reader.readVarint() // pcdata
_ = p.reader.readVarint() // automatics
_ = p.reader.readVarint() // funcdata
_ = p.reader.readVarint() // files
p.DataBlockPosition = p.reader.numReadBytes
p.DataBlock = make([]byte, dataLength)
numRead := 0
for numRead != int(dataLength) {
n := p.reader.read(p.DataBlock[numRead:])
if p.reader.err != nil {
return p.reader.err
}
numRead += n
}
return nil
}
func (p *parser) parseSymbols() error {
for {
b := p.reader.readByte()
if p.reader.err != nil {
return p.reader.err
}
if b == 0xff {
return nil
} else if b != 0xfe {
return fmt.Errorf("sanity check failed: %#x ", b)
}
if err := p.parseSymbol(); err != nil {
return err
}
}
}
func (p *parser) parseSymbol() error {
symbol := Symbol{}
symbol.Kind = SymKind(p.reader.readByte())
symbol.IDIndex = p.reader.readVarint()
flags := p.reader.readVarint()
symbol.DupOK = flags&0x1 != 0
symbol.Local = (flags>>1)&0x1 != 0
symbol.Typelink = (flags>>2)&0x1 != 0
symbol.Size = p.reader.readVarint()
symbol.GoTypeIndex = p.reader.readVarint()
dataSize := p.reader.readVarint()
symbol.DataAddr = DataAddr{Size: dataSize, Offset: p.associatedDataSize}
p.associatedDataSize += dataSize
numRelocs := p.reader.readVarint()
for i := 0; i < int(numRelocs); i++ {
reloc := Relocation{}
reloc.Offset = p.reader.readVarint()
reloc.Size = p.reader.readVarint()
reloc.Type = RelocType(p.reader.readVarint())
reloc.Add = p.reader.readVarint()
reloc.IDIndex = p.reader.readVarint()
symbol.Relocations = append(symbol.Relocations, reloc)
}
if symbol.Kind == STEXT {
if err := p.skipSTEXTFields(); err != nil {
return err
}
}
p.Symbols = append(p.Symbols, symbol)
return p.reader.err
}
func (p *parser) skipSTEXTFields() error {
_ = p.reader.readVarint() // Args
_ = p.reader.readVarint() // Frame
_ = p.reader.readVarint() // Flags
_ = p.reader.readVarint() // NoSplit
numLocals := p.reader.readVarint()
for i := 0; i < int(numLocals); i++ {
_ = p.reader.readVarint() // sym
_ = p.reader.readVarint() // offset
_ = p.reader.readVarint() // type
_ = p.reader.readVarint() // go type
}
pcspSize := p.reader.readVarint()
p.associatedDataSize += pcspSize
pcFileSize := p.reader.readVarint()
p.associatedDataSize += pcFileSize
pcLineSize := p.reader.readVarint()
p.associatedDataSize += pcLineSize
pcInlineSize := p.reader.readVarint()
p.associatedDataSize += pcInlineSize
numPCData := p.reader.readVarint()
for i := 0; i < int(numPCData); i++ {
pcDataSize := p.reader.readVarint()
p.associatedDataSize += pcDataSize
}
numFuncData := p.reader.readVarint()
for i := 0; i < int(numFuncData); i++ {
_ = p.reader.readVarint() // func data index
}
for i := 0; i < int(numFuncData); i++ {
_ = p.reader.readVarint() // func offset
}
numFiles := p.reader.readVarint()
for i := 0; i < int(numFiles); i++ {
_ = p.reader.readVarint() // file index
}
numInlineTrees := p.reader.readVarint()
for i := 0; i < int(numInlineTrees); i++ {
_ = p.reader.readVarint() // parent
_ = p.reader.readVarint() // file
_ = p.reader.readVarint() // line
_ = p.reader.readVarint() // func
}
return p.reader.err
}
func (p *parser) skipFooter() error {
buff := make([]byte, len(magicFooter))
_ = p.reader.read(buff)
if p.reader.err != nil {
return p.reader.err
}
if !reflect.DeepEqual(buff, magicFooter) {
return fmt.Errorf("invalid footer: %#x", buff)
}
return nil
}
// readerWithCounter is bufio.Reader which records the number of read bytes.
// When an error happens, it updates an error field rather than returning it, so that
// the error handling can be delayed. No read operation will be taken if the error field is not nil.
type readerWithCounter struct {
raw *bufio.Reader
numReadBytes int64
err error
}
func (r *readerWithCounter) readVarint() int64 {
var value uint64
var shift uint64
for {
b := r.readByte()
if r.err != nil {
return 0
}
value += uint64(b&0x7f) << shift
if (b>>7)&0x1 == 0 {
break
}
shift += 7
}
return zigzagDecode(value)
}
func (r *readerWithCounter) readString() string {
len := r.readVarint()
if r.err != nil {
return ""
}
buff := make([]byte, len)
numRead := 0
for numRead != int(len) {
n := r.read(buff[numRead:])
if r.err != nil {
return ""
}
numRead += n
}
return string(buff)
}
func (r *readerWithCounter) readByte() (b byte) {
if r.err != nil {
return
}
b, r.err = r.raw.ReadByte()
r.numReadBytes++
return
}
func (r *readerWithCounter) read(p []byte) (n int) {
if r.err != nil {
return
}
n, r.err = r.raw.Read(p)
r.numReadBytes += int64(n)
return
}
|
package config
import (
"net"
"net/url"
"strconv"
)
func getURLs(addr net.IP, port uint16, secret string) (urls URLs) {
values := url.Values{}
values.Set("server", addr.String())
values.Set("port", strconv.Itoa(int(port)))
values.Set("secret", secret)
urls.TG = makeTGURL(values)
urls.TMe = makeTMeURL(values)
urls.TGQRCode = makeQRCodeURL(urls.TG)
urls.TMeQRCode = makeQRCodeURL(urls.TG)
return
}
func makeTGURL(values url.Values) string {
tgURL := url.URL{
Scheme: "tg",
Host: "proxy",
RawQuery: values.Encode(),
}
return tgURL.String()
}
func makeTMeURL(values url.Values) string {
tMeURL := url.URL{
Scheme: "https",
Host: "t.me",
Path: "proxy",
RawQuery: values.Encode(),
}
return tMeURL.String()
}
func makeQRCodeURL(data string) string {
qr := url.URL{
Scheme: "https",
Host: "api.qrserver.com",
Path: "v1/create-qr-code",
}
values := url.Values{}
values.Set("qzone", "4")
values.Set("format", "svg")
values.Set("data", data)
qr.RawQuery = values.Encode()
return qr.String()
}
|
package day03
import (
"fmt"
)
type board struct {
terrain []bool
stride int
}
func (b *board) render() {
for i, v := range b.terrain {
if v {
fmt.Print("#")
} else {
fmt.Print(".")
}
if (i+1)%b.stride == 0 {
fmt.Println()
}
}
}
func (b *board) getCell(x, y int) (bool, error) {
x = x % b.stride
pos := (y * b.stride) + x
if pos >= len(b.terrain) {
return false, fmt.Errorf("position out of range")
}
return b.terrain[pos], nil
}
func (b *board) walk(xoff, yoff int) int {
x, y := xoff, yoff
count := 0
tree, err := b.getCell(x, y)
for err == nil {
if tree {
count++
}
x += xoff
y += yoff
tree, err = b.getCell(x, y)
}
return count
}
func Run(lines []string) error {
board := parseBoard(lines)
// Try all the walks
allWalks := []int{
board.walk(1, 1),
board.walk(3, 1),
board.walk(5, 1),
board.walk(7, 1),
board.walk(1, 2),
}
fmt.Println("Part 1:", allWalks[1], "trees")
product := 1
for _, count := range allWalks {
product *= count
}
fmt.Println("Part 2:", product, "trees")
return nil
}
func parseBoard(lines []string) board {
stride := len(lines[0])
size := stride * len(lines)
terrain := make([]bool, 0, size)
for _, line := range lines {
for _, chr := range line {
terrain = append(terrain, chr == '#')
}
}
return board{terrain: terrain, stride: stride}
}
|
package bmlog
import (
"github.com/sirupsen/logrus"
"os"
"testing"
)
func TestLogrus(t *testing.T) {
logrus.SetLevel(logrus.TraceLevel)
logrus.Trace("Trace msg")
logrus.Debug("Debug msg")
logrus.Info("Info msg")
logrus.Warn("Warn msg")
logrus.Error("Error msg")
//logrus.Fatal("Fatal msg")
//logrus.Panic("Panic msg")
}
func TestStandardLogger(t *testing.T) {
os.Setenv("LOGGER_USER", "debugger")
os.Setenv("LOGGER_DEBUG", "true")
StandardLogger().Info("Test Info")
}
func TestLog2File(t *testing.T) {
os.Setenv("LOGGER_USER", "blackmirror")
os.Setenv("LOGGER_DEBUG", "false")
os.Setenv("LOG_PATH", "/home/jeorch/work/test/temp/go.log")
StandardLogger().Info("TestLog2File")
}
|
package models
import (
//"github.com/astaxie/beego"
"github.com/astaxie/beego/orm"
"tokensky_bg_admin/conf"
)
// init 初始化
func init() {
//admin
orm.RegisterModel(new(AdminBackendUser), new(AdminResource), new(AdminRole), new(AdminRoleResourceRel), new(AdminRoleBackendUserRel))
orm.RegisterModel(new(AdminModelRecord))
orm.RegisterModel(new(TokenskyMessage), new(TokenskyUser), new(TokenskyUserBalance), new(TokenskyAccountBank))
orm.RegisterModel(new(TokenskyRealAuth), new(TokenskyTransactionRecord), new(TokenskyUserElectricityBalance))
orm.RegisterModel(new(TokenskyTibiConfig), new(TokenskyTibiConfigBak), new(TokenskyChongbiConfig), new(TokenskyChongbiConfigBak))
orm.RegisterModel(new(TokenskyUserTibi), new(TokenskyUserAddress), new(TokenskyUserDeposit), new(TokenskyOrderIds), new(TokenskyJiguangRegistrationid))
orm.RegisterModel(new(TokenskyUserBalancesRecord),new(TokenskyUserBalanceHash),new(TokenskyUserBalanceCoin))
//OCT
orm.RegisterModel(new(OtcConf), new(OtcConfBak), new(OtcEntrustOrder), new(OtcOrder), new(OtcAppeal))
orm.RegisterModel(new(OtcUserFrozenBalance), new(OtcEntrustAutoCancelRecord))
//Operation
orm.RegisterModel(new(OperationBanner))
//角色管理
orm.RegisterModel(new(RoleBlackList))
//算力 Hashrate
orm.RegisterModel(new(HashrateCategory), new(HashrateOrder), new(HashrateOrderTransaction), new(HashrateTreaty), new(HashrateOrderProfit))
orm.RegisterModel(new(HashrateSendBalanceRecord))
//理财
orm.RegisterModel(new(FinancialProduct),new(FinancialCategory),new(FinancialProductHistoricalRecord))
orm.RegisterModel(new(FinancialLiveUserBalance),new(FinancialOrder),new(FinancialProfit))
orm.RegisterModel(new(FinancialOrderWithdrawal))
//借贷
orm.RegisterModel(new(BorrowConf),new(BorrowOrder),new(BorrowUseFinancialOrder),new(BorrowLimiting),new(BorrowOrdeLog))
//其它
orm.RegisterModel(new(TokenskyUserInvite))
//爬虫
orm.RegisterModel(new(SpiderCoinMarket))
}
/*Admin相关*/
// AdminBackendUserTBName 获取 AdminBackendUser 对应的表名称
func AdminBackendUserTBName() string {
return conf.DB_ADMIN_DT_PREFIX + "backend_user"
}
// AdminResourceTBName 获取 AdminResource 对应的表名称
func AdminResourceTBName() string {
return conf.DB_ADMIN_DT_PREFIX + "resource"
}
// AdminRoleTBName 获取 AdminRole 对应的表名称
func AdminRoleTBName() string {
return conf.DB_ADMIN_DT_PREFIX + "role"
}
// AdminRoleResourceRelTBName 角色与资源多对多关系表
func AdminRoleResourceRelTBName() string {
return conf.DB_ADMIN_DT_PREFIX + "role_resource_rel"
}
// AdminRoleBackendUserRelTBName 角色与用户多对多关系表
func AdminRoleBackendUserRelTBName() string {
return conf.DB_ADMIN_DT_PREFIX + "role_backenduser_rel"
}
//用户修改记录表
func AdminModelRecordTBName()string{
return conf.DB_ADMIN_DT_PREFIX + "model_record"
}
/*Tokensky相关*/
// TokenskyMessageTBName 获取TokenskyMessage对应多名称
func TokenskyMessageTBName() string {
return conf.DB_TOKENSKY_DT_PREFIX + "message"
}
// TokenskyUserTBName 获取TokenskyUser对应多名称
func TokenskyUserTBName() string {
return conf.DB_TOKENSKY_DT_PREFIX + "user"
}
//TokenskyUserBalanceCoinTBName
func TokenskyUserBalanceCoinTBName()string {
return conf.DB_TOKENSKY_DT_PREFIX + "user_balance_coin"
}
//TokenskyUserBalance 用户资产表
func TokenskyUserBalanceTBName() string {
return conf.DB_TOKENSKY_DT_PREFIX + "user_balance"
}
//用户资产记录表
func TokenskyUserBalancesRecordTBName()string{
return conf.DB_TOKENSKY_DT_PREFIX + "user_balance_record"
}
//交易记录明细表
func TokenskyTransactionRecordTBName() string {
return conf.DB_TOKENSKY_DT_PREFIX + "transaction_record"
}
//用户付款设置表
func TokenskyAccountBankTBName() string {
return conf.DB_TOKENSKY_DT_PREFIX + "account_bank"
}
//提币配置表
func TokenskyTibiConfigTBName() string {
return conf.DB_TOKENSKY_DT_PREFIX + "tibi_config"
}
//提币配置bak表
func TokenskyTibiConfigBakTBName() string {
return conf.DB_TOKENSKY_DT_PREFIX + "tibi_config_bak"
}
//充币配置表
func TokenskyChongbiConfigTBName() string {
return conf.DB_TOKENSKY_DT_PREFIX + "chongbi_config"
}
//充币配置bak表
func TokenskyChongbiConfigBakTBName() string {
return conf.DB_TOKENSKY_DT_PREFIX + "chongbi_config_bak"
}
//提币审核表
func TokenskyUserTibiTBName() string {
return conf.DB_TOKENSKY_DT_PREFIX + "user_tibi"
}
//用户充值地址表
func TokenskyUserAddressTBName() string {
return conf.DB_TOKENSKY_DT_PREFIX + "user_address"
}
//用户充值记录表
func TokenskyUserDepositTBName() string {
return conf.DB_TOKENSKY_DT_PREFIX + "user_deposit"
}
//用户资产哈希表
func TokenskyUserBalanceHashTBName()string{
return conf.DB_TOKENSKY_DT_PREFIX + "user_balance_hash"
}
// TokenskyUserElectricityBalance用户电力资产表
func TokenskyUserElectricityBalanceTBName() string {
return conf.DB_TOKENSKY_DT_PREFIX + "user_electricity_balance"
}
//名称表
func TokenskyOrderIdsTBName() string {
return conf.DB_TOKENSKY_DT_PREFIX + "order_ids"
}
//极光地址表
func TokenskyJiguangRegistrationidTBName() string {
return conf.DB_TOKENSKY_DT_PREFIX + "jiguang_registrationid"
}
//用户邀请表
func TokenskyUserInviteTBName()string{
return conf.DB_TOKENSKY_DT_PREFIX + "user_invite"
}
/*Oct相关*/
//OtcConfTBName 获取OctConf对应多名称
func OtcConfTBName() string {
return conf.DB_OTC_DT_PREFIX + "conf"
}
//OtcConf 记录副表
func OtcConfBakTBName() string {
return conf.DB_OTC_DT_PREFIX + "conf_bak"
}
//OtcEntrustOrder 委托订单表
func OtcEntrustOrderTBName() string {
return conf.DB_OTC_DT_PREFIX + "entrust_order"
}
// OtcOrder订单管理表
func OtcOrderTBName() string {
return conf.DB_OTC_DT_PREFIX + "order"
}
// OtcAppeal 订单申诉表
func OtcAppealTBName() string {
return conf.DB_OTC_DT_PREFIX + "appeal"
}
//卖出委托订单表
func OtcUserFrozenBalanceTBName() string {
return conf.DB_OTC_DT_PREFIX + "user_frozen_balance"
}
//委托单记录取消表
func OtcEntrustAutoCancelRecordTBName() string {
return conf.DB_OTC_DT_PREFIX + "entrust_auto_cancel_record"
}
/*算力相关*/
//算力合约表
func HashrateCategoryTBName() string {
return conf.DB_HASHRATE_DT_PREFIX + "category"
}
//算力订单交易关联表
func HashrateOrderTransactionTBName() string {
return conf.DB_HASHRATE_DT_PREFIX + "order_transaction"
}
//算力订单表
func HashrateOrderTBName() string {
return conf.DB_HASHRATE_DT_PREFIX + "order"
}
//算力合约表
func HashrateTreatyTBName() string {
return conf.DB_HASHRATE_DT_PREFIX + "treaty"
}
//算力订单收益表
func HashrateOrderProfitTBName() string {
return conf.DB_HASHRATE_DT_PREFIX + "order_profit"
}
// 算力奖励发放记录表
func HashrateSendBalanceRecordTBName() string {
return conf.DB_HASHRATE_DT_PREFIX + "send_balance_record"
}
/*运营相关*/
//OperationBanner Banner表
func OperationBannerTBName() string {
return conf.DB_OPERATION_DT_PREFIX + "banner"
}
/*用户管理相关*/
//黑名单表
func RoleBlackListTBName() string {
return conf.DB_ROLE_DT_PREFIX + "black_list"
}
//身份审核
func TokenskyRealAuthTBName() string {
return conf.DB_TOKENSKY_DT_PREFIX + "real_auth"
}
/* 财务管理 */
//财务类型表
func FinancialCategoryTBName() string {
return conf.DB_FINANCIAL_DT_PREFIX + "category"
}
//财务配置表
func FinancialProductTBName() string {
return conf.DB_FINANCIAL_DT_PREFIX + "product"
}
//财务收益表
func FinancialProfitTBName() string {
return conf.DB_FINANCIAL_DT_PREFIX + "profit"
}
//财务配置历史记录表
func FinancialProductHistoricalRecordTBName() string {
return conf.DB_FINANCIAL_DT_PREFIX + "product_historical_record"
}
//财务订单表
func FinancialOrderTBName() string {
return conf.DB_FINANCIAL_DT_PREFIX + "order"
}
//财务订单提币表
func FinancialOrderWithdrawalTBName()string{
return conf.DB_FINANCIAL_DT_PREFIX + "order_withdrawal"
}
//财务资产表
func FinancialLiveUserBalanceTBName() string {
return conf.DB_FINANCIAL_DT_PREFIX + "live_user_balance"
}
/*借贷*/
//借贷配置表
func BorrowConfTBName()string{
return conf.DB_BORROW_DT_PREFIX + "conf"
}
//借贷订单表
func BorrowOrderTBName()string{
return conf.DB_BORROW_DT_PREFIX + "order"
}
//借贷日志表
func BorrowOrdeLogTBName()string{
return conf.DB_BORROW_DT_PREFIX + "order_log"
}
//借贷关联表
func BorrowUseFinancialOrderTBName()string{
return conf.DB_BORROW_DT_PREFIX + "use_financial_order"
}
//借贷强屏表
func BorrowLimitingTBName()string{
return conf.DB_BORROW_DT_PREFIX + "limiting"
}
//爬虫表
func SpiderCoinMarketTBName()string{
return "spider_coin_market"
}
|
package typeutils
func Float32Ptr(f float32) *float32 {
return &f
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package apigee
import (
"context"
"fmt"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
dclService "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured"
iamUnstruct "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/iam"
)
type Environment struct{}
func EnvironmentToUnstructured(r *dclService.Environment) *unstructured.Resource {
u := &unstructured.Resource{
STV: unstructured.ServiceTypeVersion{
Service: "apigee",
Version: "beta",
Type: "Environment",
},
Object: make(map[string]interface{}),
}
if r.ApigeeOrganization != nil {
u.Object["apigeeOrganization"] = *r.ApigeeOrganization
}
if r.CreatedAt != nil {
u.Object["createdAt"] = *r.CreatedAt
}
if r.Description != nil {
u.Object["description"] = *r.Description
}
if r.DisplayName != nil {
u.Object["displayName"] = *r.DisplayName
}
if r.LastModifiedAt != nil {
u.Object["lastModifiedAt"] = *r.LastModifiedAt
}
if r.Name != nil {
u.Object["name"] = *r.Name
}
if r.Properties != nil {
rProperties := make(map[string]interface{})
for k, v := range r.Properties {
rProperties[k] = v
}
u.Object["properties"] = rProperties
}
if r.State != nil {
u.Object["state"] = string(*r.State)
}
return u
}
func UnstructuredToEnvironment(u *unstructured.Resource) (*dclService.Environment, error) {
r := &dclService.Environment{}
if _, ok := u.Object["apigeeOrganization"]; ok {
if s, ok := u.Object["apigeeOrganization"].(string); ok {
r.ApigeeOrganization = dcl.String(s)
} else {
return nil, fmt.Errorf("r.ApigeeOrganization: expected string")
}
}
if _, ok := u.Object["createdAt"]; ok {
if i, ok := u.Object["createdAt"].(int64); ok {
r.CreatedAt = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.CreatedAt: expected int64")
}
}
if _, ok := u.Object["description"]; ok {
if s, ok := u.Object["description"].(string); ok {
r.Description = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Description: expected string")
}
}
if _, ok := u.Object["displayName"]; ok {
if s, ok := u.Object["displayName"].(string); ok {
r.DisplayName = dcl.String(s)
} else {
return nil, fmt.Errorf("r.DisplayName: expected string")
}
}
if _, ok := u.Object["lastModifiedAt"]; ok {
if i, ok := u.Object["lastModifiedAt"].(int64); ok {
r.LastModifiedAt = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.LastModifiedAt: expected int64")
}
}
if _, ok := u.Object["name"]; ok {
if s, ok := u.Object["name"].(string); ok {
r.Name = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Name: expected string")
}
}
if _, ok := u.Object["properties"]; ok {
if rProperties, ok := u.Object["properties"].(map[string]interface{}); ok {
m := make(map[string]string)
for k, v := range rProperties {
if s, ok := v.(string); ok {
m[k] = s
}
}
r.Properties = m
} else {
return nil, fmt.Errorf("r.Properties: expected map[string]interface{}")
}
}
if _, ok := u.Object["state"]; ok {
if s, ok := u.Object["state"].(string); ok {
r.State = dclService.EnvironmentStateEnumRef(s)
} else {
return nil, fmt.Errorf("r.State: expected string")
}
}
return r, nil
}
func GetEnvironment(ctx context.Context, config *dcl.Config, u *unstructured.Resource) (*unstructured.Resource, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToEnvironment(u)
if err != nil {
return nil, err
}
r, err = c.GetEnvironment(ctx, r)
if err != nil {
return nil, err
}
return EnvironmentToUnstructured(r), nil
}
func ListEnvironment(ctx context.Context, config *dcl.Config, apigeeOrganization string) ([]*unstructured.Resource, error) {
c := dclService.NewClient(config)
l, err := c.ListEnvironment(ctx, apigeeOrganization)
if err != nil {
return nil, err
}
var resources []*unstructured.Resource
for {
for _, r := range l.Items {
resources = append(resources, EnvironmentToUnstructured(r))
}
if !l.HasNext() {
break
}
if err := l.Next(ctx, c); err != nil {
return nil, err
}
}
return resources, nil
}
func ApplyEnvironment(ctx context.Context, config *dcl.Config, u *unstructured.Resource, opts ...dcl.ApplyOption) (*unstructured.Resource, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToEnvironment(u)
if err != nil {
return nil, err
}
if ush := unstructured.FetchStateHint(opts); ush != nil {
sh, err := UnstructuredToEnvironment(ush)
if err != nil {
return nil, err
}
opts = append(opts, dcl.WithStateHint(sh))
}
r, err = c.ApplyEnvironment(ctx, r, opts...)
if err != nil {
return nil, err
}
return EnvironmentToUnstructured(r), nil
}
func EnvironmentHasDiff(ctx context.Context, config *dcl.Config, u *unstructured.Resource, opts ...dcl.ApplyOption) (bool, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToEnvironment(u)
if err != nil {
return false, err
}
if ush := unstructured.FetchStateHint(opts); ush != nil {
sh, err := UnstructuredToEnvironment(ush)
if err != nil {
return false, err
}
opts = append(opts, dcl.WithStateHint(sh))
}
opts = append(opts, dcl.WithLifecycleParam(dcl.BlockDestruction), dcl.WithLifecycleParam(dcl.BlockCreation), dcl.WithLifecycleParam(dcl.BlockModification))
_, err = c.ApplyEnvironment(ctx, r, opts...)
if err != nil {
if _, ok := err.(dcl.ApplyInfeasibleError); ok {
return true, nil
}
return false, err
}
return false, nil
}
func DeleteEnvironment(ctx context.Context, config *dcl.Config, u *unstructured.Resource) error {
c := dclService.NewClient(config)
r, err := UnstructuredToEnvironment(u)
if err != nil {
return err
}
return c.DeleteEnvironment(ctx, r)
}
func EnvironmentID(u *unstructured.Resource) (string, error) {
r, err := UnstructuredToEnvironment(u)
if err != nil {
return "", err
}
return r.ID()
}
func (r *Environment) STV() unstructured.ServiceTypeVersion {
return unstructured.ServiceTypeVersion{
"apigee",
"Environment",
"beta",
}
}
func SetPolicyEnvironment(ctx context.Context, config *dcl.Config, u *unstructured.Resource, p *unstructured.Resource) (*unstructured.Resource, error) {
r, err := UnstructuredToEnvironment(u)
if err != nil {
return nil, err
}
policy, err := iamUnstruct.UnstructuredToPolicy(p)
if err != nil {
return nil, err
}
policy.Resource = r
iamClient := iam.NewClient(config)
newPolicy, err := iamClient.SetPolicy(ctx, policy)
if err != nil {
return nil, err
}
return iamUnstruct.PolicyToUnstructured(newPolicy), nil
}
func SetPolicyWithEtagEnvironment(ctx context.Context, config *dcl.Config, u *unstructured.Resource, p *unstructured.Resource) (*unstructured.Resource, error) {
r, err := UnstructuredToEnvironment(u)
if err != nil {
return nil, err
}
policy, err := iamUnstruct.UnstructuredToPolicy(p)
if err != nil {
return nil, err
}
policy.Resource = r
iamClient := iam.NewClient(config)
newPolicy, err := iamClient.SetPolicyWithEtag(ctx, policy)
if err != nil {
return nil, err
}
return iamUnstruct.PolicyToUnstructured(newPolicy), nil
}
func GetPolicyEnvironment(ctx context.Context, config *dcl.Config, u *unstructured.Resource) (*unstructured.Resource, error) {
r, err := UnstructuredToEnvironment(u)
if err != nil {
return nil, err
}
iamClient := iam.NewClient(config)
policy, err := iamClient.GetPolicy(ctx, r)
if err != nil {
return nil, err
}
return iamUnstruct.PolicyToUnstructured(policy), nil
}
func SetPolicyMemberEnvironment(ctx context.Context, config *dcl.Config, u *unstructured.Resource, m *unstructured.Resource) (*unstructured.Resource, error) {
r, err := UnstructuredToEnvironment(u)
if err != nil {
return nil, err
}
member, err := iamUnstruct.UnstructuredToMember(m)
if err != nil {
return nil, err
}
member.Resource = r
iamClient := iam.NewClient(config)
policy, err := iamClient.SetMember(ctx, member)
if err != nil {
return nil, err
}
return iamUnstruct.PolicyToUnstructured(policy), nil
}
func GetPolicyMemberEnvironment(ctx context.Context, config *dcl.Config, u *unstructured.Resource, role, member string) (*unstructured.Resource, error) {
r, err := UnstructuredToEnvironment(u)
if err != nil {
return nil, err
}
iamClient := iam.NewClient(config)
policyMember, err := iamClient.GetMember(ctx, r, role, member)
if err != nil {
return nil, err
}
return iamUnstruct.MemberToUnstructured(policyMember), nil
}
func DeletePolicyMemberEnvironment(ctx context.Context, config *dcl.Config, u *unstructured.Resource, m *unstructured.Resource) error {
r, err := UnstructuredToEnvironment(u)
if err != nil {
return err
}
member, err := iamUnstruct.UnstructuredToMember(m)
if err != nil {
return err
}
member.Resource = r
iamClient := iam.NewClient(config)
if err := iamClient.DeleteMember(ctx, member); err != nil {
return err
}
return nil
}
func (r *Environment) SetPolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, member *unstructured.Resource) (*unstructured.Resource, error) {
return SetPolicyMemberEnvironment(ctx, config, resource, member)
}
func (r *Environment) GetPolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, role, member string) (*unstructured.Resource, error) {
return GetPolicyMemberEnvironment(ctx, config, resource, role, member)
}
func (r *Environment) DeletePolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, member *unstructured.Resource) error {
return DeletePolicyMemberEnvironment(ctx, config, resource, member)
}
func (r *Environment) SetPolicy(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, policy *unstructured.Resource) (*unstructured.Resource, error) {
return SetPolicyEnvironment(ctx, config, resource, policy)
}
func (r *Environment) SetPolicyWithEtag(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, policy *unstructured.Resource) (*unstructured.Resource, error) {
return SetPolicyWithEtagEnvironment(ctx, config, resource, policy)
}
func (r *Environment) GetPolicy(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) (*unstructured.Resource, error) {
return GetPolicyEnvironment(ctx, config, resource)
}
func (r *Environment) Get(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) (*unstructured.Resource, error) {
return GetEnvironment(ctx, config, resource)
}
func (r *Environment) Apply(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, opts ...dcl.ApplyOption) (*unstructured.Resource, error) {
return ApplyEnvironment(ctx, config, resource, opts...)
}
func (r *Environment) HasDiff(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, opts ...dcl.ApplyOption) (bool, error) {
return EnvironmentHasDiff(ctx, config, resource, opts...)
}
func (r *Environment) Delete(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) error {
return DeleteEnvironment(ctx, config, resource)
}
func (r *Environment) ID(resource *unstructured.Resource) (string, error) {
return EnvironmentID(resource)
}
func init() {
unstructured.Register(&Environment{})
}
|
package upstream
import (
"net/rpc"
"github.com/jonmorehouse/gatekeeper/gatekeeper"
)
type NotifyArgs struct{}
type NotifyResp struct{}
type AddUpstreamArgs struct {
Upstream *gatekeeper.Upstream
}
type AddUpstreamResp struct {
Err *gatekeeper.Error
}
type RemoveUpstreamArgs struct {
UpstreamID gatekeeper.UpstreamID
}
type RemoveUpstreamResp struct {
Err *gatekeeper.Error
}
type AddBackendArgs struct {
UpstreamID gatekeeper.UpstreamID
Backend *gatekeeper.Backend
}
type AddBackendResp struct {
Err *gatekeeper.Error
}
type RemoveBackendArgs struct {
BackendID gatekeeper.BackendID
}
type RemoveBackendResp struct {
Err *gatekeeper.Error
}
type HeartbeatArgs struct{}
type HeartbeatResp struct {
Err *gatekeeper.Error
}
type ManagerRPCClient struct {
client *rpc.Client
}
func (c *ManagerRPCClient) Close() {
c.client.Close()
}
func (c *ManagerRPCClient) Notify() *gatekeeper.Error {
err := c.client.Call("Plugin.Notify", &NotifyArgs{}, &NotifyResp{})
return gatekeeper.NewError(err)
}
func (c *ManagerRPCClient) Heartbeat() *gatekeeper.Error {
callArgs := HeartbeatArgs{}
callResp := HeartbeatResp{}
if err := c.client.Call("Plugin.Heartbeat", &callArgs, &callResp); err != nil {
return gatekeeper.NewError(err)
}
return callResp.Err
}
func (c *ManagerRPCClient) AddUpstream(upstream *gatekeeper.Upstream) *gatekeeper.Error {
callArgs := AddUpstreamArgs{
Upstream: upstream,
}
callResp := AddUpstreamResp{}
if err := c.client.Call("Plugin.AddUpstream", &callArgs, &callResp); err != nil {
return gatekeeper.NewError(err)
}
return callResp.Err
}
func (c *ManagerRPCClient) RemoveUpstream(upstreamID gatekeeper.UpstreamID) *gatekeeper.Error {
callArgs := RemoveUpstreamArgs{
UpstreamID: upstreamID,
}
callResp := RemoveUpstreamResp{}
if err := c.client.Call("Plugin.RemoveUpstream", &callArgs, &callResp); err != nil {
return gatekeeper.NewError(err)
}
return callResp.Err
}
func (c *ManagerRPCClient) AddBackend(upstreamID gatekeeper.UpstreamID, backend *gatekeeper.Backend) *gatekeeper.Error {
callArgs := AddBackendArgs{
UpstreamID: upstreamID,
Backend: backend,
}
callResp := AddBackendResp{}
if err := c.client.Call("Plugin.AddBackend", &callArgs, &callResp); err != nil {
return gatekeeper.NewError(err)
}
return callResp.Err
}
func (c *ManagerRPCClient) RemoveBackend(backendID gatekeeper.BackendID) *gatekeeper.Error {
callArgs := RemoveBackendArgs{
BackendID: backendID,
}
callResp := RemoveBackendResp{}
if err := c.client.Call("Plugin.RemoveBackend", &callArgs, &callResp); err != nil {
return gatekeeper.NewError(err)
}
return callResp.Err
}
type ManagerRPCServer struct {
impl Manager
connectedCh chan struct{}
}
func (s *ManagerRPCServer) Notify(*NotifyArgs, *NotifyResp) error {
s.connectedCh <- struct{}{}
return nil
}
func (s *ManagerRPCServer) Heartbeat(args *HeartbeatArgs, resp *HeartbeatResp) error {
return nil
}
func (s *ManagerRPCServer) AddUpstream(args *AddUpstreamArgs, resp *AddUpstreamResp) error {
err := s.impl.AddUpstream(args.Upstream)
resp.Err = gatekeeper.NewError(err)
return nil
}
func (s *ManagerRPCServer) RemoveUpstream(args *RemoveUpstreamArgs, resp *RemoveUpstreamResp) error {
err := s.impl.RemoveUpstream(args.UpstreamID)
resp.Err = gatekeeper.NewError(err)
return nil
}
func (s *ManagerRPCServer) AddBackend(args *AddBackendArgs, resp *AddBackendResp) error {
err := s.impl.AddBackend(args.UpstreamID, args.Backend)
resp.Err = gatekeeper.NewError(err)
return nil
}
func (s *ManagerRPCServer) RemoveBackend(args *RemoveBackendArgs, resp *RemoveBackendResp) error {
err := s.impl.RemoveBackend(args.BackendID)
resp.Err = gatekeeper.NewError(err)
return nil
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"context"
"math"
"sync/atomic"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/distsql"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/statistics"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/dbterror/exeerrors"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/ranger"
"github.com/pingcap/tipb/go-tipb"
"go.uber.org/zap"
)
// AnalyzeIndexExec represents analyze index push down executor.
type AnalyzeIndexExec struct {
baseAnalyzeExec
idxInfo *model.IndexInfo
isCommonHandle bool
result distsql.SelectResult
countNullRes distsql.SelectResult
}
func analyzeIndexPushdown(idxExec *AnalyzeIndexExec) *statistics.AnalyzeResults {
ranges := ranger.FullRange()
// For single-column index, we do not load null rows from TiKV, so the built histogram would not include
// null values, and its `NullCount` would be set by result of another distsql call to get null rows.
// For multi-column index, we cannot define null for the rows, so we still use full range, and the rows
// containing null fields would exist in built histograms. Note that, the `NullCount` of histograms for
// multi-column index is always 0 then.
if len(idxExec.idxInfo.Columns) == 1 {
ranges = ranger.FullNotNullRange()
}
hist, cms, fms, topN, err := idxExec.buildStats(ranges, true)
if err != nil {
return &statistics.AnalyzeResults{Err: err, Job: idxExec.job}
}
var statsVer = statistics.Version1
if idxExec.analyzePB.IdxReq.Version != nil {
statsVer = int(*idxExec.analyzePB.IdxReq.Version)
}
result := &statistics.AnalyzeResult{
Hist: []*statistics.Histogram{hist},
Cms: []*statistics.CMSketch{cms},
TopNs: []*statistics.TopN{topN},
Fms: []*statistics.FMSketch{fms},
IsIndex: 1,
}
cnt := hist.NullCount
if hist.Len() > 0 {
cnt += hist.Buckets[hist.Len()-1].Count
}
if topN.TotalCount() > 0 {
cnt += int64(topN.TotalCount())
}
return &statistics.AnalyzeResults{
TableID: idxExec.tableID,
Ars: []*statistics.AnalyzeResult{result},
Job: idxExec.job,
StatsVer: statsVer,
Count: cnt,
Snapshot: idxExec.snapshot,
}
}
func (e *AnalyzeIndexExec) buildStats(ranges []*ranger.Range, considerNull bool) (hist *statistics.Histogram, cms *statistics.CMSketch, fms *statistics.FMSketch, topN *statistics.TopN, err error) {
if err = e.open(ranges, considerNull); err != nil {
return nil, nil, nil, nil, err
}
defer func() {
err1 := closeAll(e.result, e.countNullRes)
if err == nil {
err = err1
}
}()
hist, cms, fms, topN, err = e.buildStatsFromResult(e.result, true)
if err != nil {
return nil, nil, nil, nil, err
}
if e.countNullRes != nil {
nullHist, _, _, _, err := e.buildStatsFromResult(e.countNullRes, false)
if err != nil {
return nil, nil, nil, nil, err
}
if l := nullHist.Len(); l > 0 {
hist.NullCount = nullHist.Buckets[l-1].Count
}
}
hist.ID = e.idxInfo.ID
return hist, cms, fms, topN, nil
}
func (e *AnalyzeIndexExec) open(ranges []*ranger.Range, considerNull bool) error {
err := e.fetchAnalyzeResult(ranges, false)
if err != nil {
return err
}
if considerNull && len(e.idxInfo.Columns) == 1 {
ranges = ranger.NullRange()
err = e.fetchAnalyzeResult(ranges, true)
if err != nil {
return err
}
}
return nil
}
// fetchAnalyzeResult builds and dispatches the `kv.Request` from given ranges, and stores the `SelectResult`
// in corresponding fields based on the input `isNullRange` argument, which indicates if the range is the
// special null range for single-column index to get the null count.
func (e *AnalyzeIndexExec) fetchAnalyzeResult(ranges []*ranger.Range, isNullRange bool) error {
var builder distsql.RequestBuilder
var kvReqBuilder *distsql.RequestBuilder
if e.isCommonHandle && e.idxInfo.Primary {
kvReqBuilder = builder.SetHandleRangesForTables(e.ctx.GetSessionVars().StmtCtx, []int64{e.tableID.GetStatisticsID()}, true, ranges)
} else {
kvReqBuilder = builder.SetIndexRangesForTables(e.ctx.GetSessionVars().StmtCtx, []int64{e.tableID.GetStatisticsID()}, e.idxInfo.ID, ranges)
}
kvReqBuilder.SetResourceGroupTagger(e.ctx.GetSessionVars().StmtCtx.GetResourceGroupTagger())
startTS := uint64(math.MaxUint64)
isoLevel := kv.RC
if e.ctx.GetSessionVars().EnableAnalyzeSnapshot {
startTS = e.snapshot
isoLevel = kv.SI
}
kvReq, err := kvReqBuilder.
SetAnalyzeRequest(e.analyzePB, isoLevel).
SetStartTS(startTS).
SetKeepOrder(true).
SetConcurrency(e.concurrency).
SetResourceGroupName(e.ctx.GetSessionVars().ResourceGroupName).
SetExplicitRequestSourceType(e.ctx.GetSessionVars().ExplicitRequestSourceType).
Build()
if err != nil {
return err
}
ctx := context.TODO()
result, err := distsql.Analyze(ctx, e.ctx.GetClient(), kvReq, e.ctx.GetSessionVars().KVVars, e.ctx.GetSessionVars().InRestrictedSQL, e.ctx.GetSessionVars().StmtCtx)
if err != nil {
return err
}
if isNullRange {
e.countNullRes = result
} else {
e.result = result
}
return nil
}
func (e *AnalyzeIndexExec) buildStatsFromResult(result distsql.SelectResult, needCMS bool) (*statistics.Histogram, *statistics.CMSketch, *statistics.FMSketch, *statistics.TopN, error) {
failpoint.Inject("buildStatsFromResult", func(val failpoint.Value) {
if val.(bool) {
failpoint.Return(nil, nil, nil, nil, errors.New("mock buildStatsFromResult error"))
}
})
hist := &statistics.Histogram{}
var cms *statistics.CMSketch
var topn *statistics.TopN
if needCMS {
cms = statistics.NewCMSketch(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth]))
topn = statistics.NewTopN(int(e.opts[ast.AnalyzeOptNumTopN]))
}
fms := statistics.NewFMSketch(maxSketchSize)
statsVer := statistics.Version1
if e.analyzePB.IdxReq.Version != nil {
statsVer = int(*e.analyzePB.IdxReq.Version)
}
for {
failpoint.Inject("mockKillRunningAnalyzeIndexJob", func() {
dom := domain.GetDomain(e.ctx)
dom.SysProcTracker().KillSysProcess(dom.GetAutoAnalyzeProcID())
})
if atomic.LoadUint32(&e.ctx.GetSessionVars().Killed) == 1 {
return nil, nil, nil, nil, errors.Trace(exeerrors.ErrQueryInterrupted)
}
failpoint.Inject("mockSlowAnalyzeIndex", func() {
time.Sleep(1000 * time.Second)
})
data, err := result.NextRaw(context.TODO())
if err != nil {
return nil, nil, nil, nil, err
}
if data == nil {
break
}
resp := &tipb.AnalyzeIndexResp{}
err = resp.Unmarshal(data)
if err != nil {
return nil, nil, nil, nil, err
}
hist, cms, fms, topn, err = updateIndexResult(e.ctx, resp, e.job, hist, cms, fms, topn,
e.idxInfo, int(e.opts[ast.AnalyzeOptNumBuckets]), int(e.opts[ast.AnalyzeOptNumTopN]), statsVer)
if err != nil {
return nil, nil, nil, nil, err
}
}
if needCMS && topn.TotalCount() > 0 {
hist.RemoveVals(topn.TopN)
}
if needCMS && cms != nil {
cms.CalcDefaultValForAnalyze(uint64(hist.NDV))
}
return hist, cms, fms, topn, nil
}
func (e *AnalyzeIndexExec) buildSimpleStats(ranges []*ranger.Range, considerNull bool) (fms *statistics.FMSketch, nullHist *statistics.Histogram, err error) {
if err = e.open(ranges, considerNull); err != nil {
return nil, nil, err
}
defer func() {
err1 := closeAll(e.result, e.countNullRes)
if err == nil {
err = err1
}
}()
_, _, fms, _, err = e.buildStatsFromResult(e.result, false)
if e.countNullRes != nil {
nullHist, _, _, _, err := e.buildStatsFromResult(e.countNullRes, false)
if err != nil {
return nil, nil, err
}
if l := nullHist.Len(); l > 0 {
return fms, nullHist, nil
}
}
return fms, nil, nil
}
func analyzeIndexNDVPushDown(idxExec *AnalyzeIndexExec) *statistics.AnalyzeResults {
ranges := ranger.FullRange()
// For single-column index, we do not load null rows from TiKV, so the built histogram would not include
// null values, and its `NullCount` would be set by result of another distsql call to get null rows.
// For multi-column index, we cannot define null for the rows, so we still use full range, and the rows
// containing null fields would exist in built histograms. Note that, the `NullCount` of histograms for
// multi-column index is always 0 then.
if len(idxExec.idxInfo.Columns) == 1 {
ranges = ranger.FullNotNullRange()
}
fms, nullHist, err := idxExec.buildSimpleStats(ranges, len(idxExec.idxInfo.Columns) == 1)
if err != nil {
return &statistics.AnalyzeResults{Err: err, Job: idxExec.job}
}
result := &statistics.AnalyzeResult{
Fms: []*statistics.FMSketch{fms},
// We use histogram to get the Index's ID.
Hist: []*statistics.Histogram{statistics.NewHistogram(idxExec.idxInfo.ID, 0, 0, statistics.Version1, types.NewFieldType(mysql.TypeBlob), 0, 0)},
IsIndex: 1,
}
r := &statistics.AnalyzeResults{
TableID: idxExec.tableID,
Ars: []*statistics.AnalyzeResult{result},
Job: idxExec.job,
// TODO: avoid reusing Version1.
StatsVer: statistics.Version1,
}
if nullHist != nil && nullHist.Len() > 0 {
r.Count = nullHist.Buckets[nullHist.Len()-1].Count
}
return r
}
func updateIndexResult(
ctx sessionctx.Context,
resp *tipb.AnalyzeIndexResp,
job *statistics.AnalyzeJob,
hist *statistics.Histogram,
cms *statistics.CMSketch,
fms *statistics.FMSketch,
topn *statistics.TopN,
idxInfo *model.IndexInfo,
numBuckets int,
numTopN int,
statsVer int,
) (
*statistics.Histogram,
*statistics.CMSketch,
*statistics.FMSketch,
*statistics.TopN,
error,
) {
var err error
needCMS := cms != nil
respHist := statistics.HistogramFromProto(resp.Hist)
if job != nil {
UpdateAnalyzeJob(ctx, job, int64(respHist.TotalRowCount()))
}
hist, err = statistics.MergeHistograms(ctx.GetSessionVars().StmtCtx, hist, respHist, numBuckets, statsVer)
if err != nil {
return nil, nil, nil, nil, err
}
if needCMS {
if resp.Cms == nil {
logutil.Logger(context.TODO()).Warn("nil CMS in response", zap.String("table", idxInfo.Table.O), zap.String("index", idxInfo.Name.O))
} else {
cm, tmpTopN := statistics.CMSketchAndTopNFromProto(resp.Cms)
if err := cms.MergeCMSketch(cm); err != nil {
return nil, nil, nil, nil, err
}
statistics.MergeTopNAndUpdateCMSketch(topn, tmpTopN, cms, uint32(numTopN))
}
}
if fms != nil && resp.Collector != nil && resp.Collector.FmSketch != nil {
fms.MergeFMSketch(statistics.FMSketchFromProto(resp.Collector.FmSketch))
}
return hist, cms, fms, topn, nil
}
|
package main
import (
"fmt"
"os"
"time"
"github.com/fatih/color"
"github.com/piot/cli-screen/src/cliscreen"
"github.com/piot/cursor-go/src/cursor"
"github.com/piot/progressbar-go/src/progressbar"
)
func createDefaultCursor() cursor.Cursor {
writer := os.Stderr
c := cursor.NewAnsiCursor(writer)
const useDebug = false
if useDebug {
return cursor.NewDebugCursor(c, time.Millisecond*100)
}
return c
}
func main() {
c := createDefaultCursor()
firstColor := progressbar.NewColorBarsTheme(color.New(color.FgHiYellow))
secondColor := progressbar.NewColorBarsTheme(color.New(color.FgHiRed))
bars := cliscreen.NewLines()
download := progressbar.NewDownloadBar(320*1024, firstColor)
download2 := progressbar.NewDownloadBar(840*1024, secondColor)
download3 := progressbar.NewDownloadBar(24*1024, progressbar.NewStandardTheme())
stepBar := progressbar.NewStepBar("Some SDK", "Setup", nil)
screen := cliscreen.NewScreen(c)
blue := color.New(color.FgHiCyan)
green := color.New(color.FgHiMagenta)
bars.AddLine(stepBar)
bars.AddLine(download)
bars.AddLine(download3)
for i := 0; i < 400; i++ {
screen.Render(bars)
time.Sleep(time.Millisecond * 33)
if i == 25 {
bars.AddLine(download2)
}
if i == 10 {
stepBar.SetInfo("Installing...")
}
if i == 90 {
stepBar.Done("It worked!")
}
if i == 10 {
screen.Println(blue.Sprint("something happened\non multiple lines!"))
}
if i > 15 && (i%20) == 0 {
screen.Println(green.Sprintf("%d And now it is more\n frequent", i))
}
download.SetBytesDownloaded(int64(i*231 + i*201))
download2.SetBytesDownloaded(int64(i*634 + i*531))
download3.SetBytesDownloaded(int64(i*512 + i*i*2))
if i == 380 {
bars.RemoveAllLines()
}
}
screen.Close()
fmt.Println("done")
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dbutil
import (
"context"
"testing"
"github.com/DATA-DOG/go-sqlmock"
"github.com/stretchr/testify/require"
)
func TestShowGrants(t *testing.T) {
ctx := context.Background()
db, mock, err := sqlmock.New()
require.NoError(t, err)
mockGrants := []string{
"GRANT ALL PRIVILEGES ON *.* TO 'root'@'localhost' WITH GRANT OPTION",
"GRANT PROXY ON ''@'' TO 'root'@'localhost' WITH GRANT OPTION",
}
rows := sqlmock.NewRows([]string{"Grants for root@localhost"})
for _, g := range mockGrants {
rows.AddRow(g)
}
mock.ExpectQuery("^SHOW GRANTS FOR CURRENT_USER$").WillReturnRows(rows)
grants, err := ShowGrants(ctx, db, "", "")
require.NoError(t, err)
require.Equal(t, mockGrants, grants)
require.Nil(t, mock.ExpectationsWereMet())
}
func TestShowGrantsWithRoles(t *testing.T) {
ctx := context.Background()
db, mock, err := sqlmock.New()
require.NoError(t, err)
mockGrantsWithoutRoles := []string{
"GRANT USAGE ON *.* TO `u1`@`localhost`",
"GRANT `r1`@`%`,`r2`@`%` TO `u1`@`localhost`",
}
rows1 := sqlmock.NewRows([]string{"Grants for root@localhost"})
for _, g := range mockGrantsWithoutRoles {
rows1.AddRow(g)
}
mock.ExpectQuery("^SHOW GRANTS FOR CURRENT_USER$").WillReturnRows(rows1)
mockGrantsWithRoles := []string{
"GRANT USAGE ON *.* TO `u1`@`localhost`",
"GRANT SELECT, INSERT, UPDATE, DELETE ON `db1`.* TO `u1`@`localhost`",
"GRANT `r1`@`%`,`r2`@`%` TO `u1`@`localhost`",
}
rows2 := sqlmock.NewRows([]string{"Grants for root@localhost"})
for _, g := range mockGrantsWithRoles {
rows2.AddRow(g)
}
mock.ExpectQuery("^SHOW GRANTS FOR CURRENT_USER USING `r1`@`%`, `r2`@`%`$").WillReturnRows(rows2)
grants, err := ShowGrants(ctx, db, "", "")
require.NoError(t, err)
require.Equal(t, mockGrantsWithRoles, grants)
require.Nil(t, mock.ExpectationsWereMet())
}
func TestShowGrantsPasswordMasked(t *testing.T) {
ctx := context.Background()
db, mock, err := sqlmock.New()
require.NoError(t, err)
cases := []struct {
original string
expected string
}{
{
"GRANT ALL PRIVILEGES ON *.* TO 'root'@'localhost' IDENTIFIED BY PASSWORD <secret> WITH GRANT OPTION",
"GRANT ALL PRIVILEGES ON *.* TO 'root'@'localhost' IDENTIFIED BY PASSWORD 'secret' WITH GRANT OPTION",
},
{
"GRANT ALL PRIVILEGES ON *.* TO 'user'@'%' IDENTIFIED BY PASSWORD",
"GRANT ALL PRIVILEGES ON *.* TO 'user'@'%' IDENTIFIED BY PASSWORD 'secret'",
},
{
"GRANT ALL PRIVILEGES ON *.* TO 'user'@'%' IDENTIFIED BY PASSWORD WITH GRANT OPTION",
"GRANT ALL PRIVILEGES ON *.* TO 'user'@'%' IDENTIFIED BY PASSWORD 'secret' WITH GRANT OPTION",
},
{
"GRANT ALL PRIVILEGES ON *.* TO 'user'@'%' IDENTIFIED BY PASSWORD <secret>",
"GRANT ALL PRIVILEGES ON *.* TO 'user'@'%' IDENTIFIED BY PASSWORD 'secret'",
},
}
for _, ca := range cases {
rows := sqlmock.NewRows([]string{"Grants for root@localhost"})
rows.AddRow(ca.original)
mock.ExpectQuery("^SHOW GRANTS FOR CURRENT_USER$").WillReturnRows(rows)
grants, err := ShowGrants(ctx, db, "", "")
require.NoError(t, err)
require.Len(t, grants, 1)
require.Equal(t, ca.expected, grants[0])
require.Nil(t, mock.ExpectationsWereMet())
}
}
|
package main
import "fmt"
type Creeper struct {
family string
id int
}
type Zombie struct {
family string
id int
}
type Skeleton struct {
family string
id int
}
type Enderman struct {
family string
id int
}
type Overworld struct {
creeper Creeper
zombie Zombie
husk Zombie
skeleton Skeleton
whither_skeleton Skeleton
enderman Enderman
}
type Nether struct {
piglin_brute Zombie
piglin_xbower Skeleton
zombiefied_piglin Zombie
whither_skeleton Skeleton
enderman Enderman
}
type TheEnd struct {
enderman Enderman
}
type Minecraft struct {
overworld Overworld
nether Nether
end TheEnd
}
func (mc Minecraft)BeatTheDragon() bool{
return true
}
func main() {
var axe, hoe int64
var diomonds string
iron, gold, emerald := 1, 2, 3
execution(iron, gold, emerald)
iron, gold, emerald = 3, 2, 1
deexecution(iron, gold, emerald)
fmt.Scan(&axe, &diomonds, &hoe)
myWorld := Minecraft{}
myWorld.BeatTheDragon()
switch diomonds {
case "+": // Getting wood & tools
if diomonds != "" {
fmt.Println(getResourses(axe, hoe))
} else {
fmt.Println("Creeper, oh man")
}
case "-": // Mining deep to get iron
if diomonds != "" {
fmt.Println(getArmor(axe, hoe))
} else {
fmt.Println("So we back in the mine, got our pick axe swinging from side to side,")
}
case "*": //Building a nether portal
if diomonds != "" {
fmt.Println(goToNether(axe, hoe))
} else {
fmt.Println("Side, side to side")
}
case "/": // Getting blaze rot's to activate the portal
/*
defer func() {
if r:= recover(); r != nil {
fmt.Println("recovered from function with divide by zero")
}
}()
*/
if diomonds != "" {
fmt.Println(findStrongHold(axe, hoe))
} else {
fmt.Println("This task a grueling one,")
}
case "%": // Dragon loves beds <3
/*
defer func() {
if r:= recover(); r != nil {
fmt.Println("recovered from function with divide by zero")
}
}()
*/
if diomonds != "" {
fmt.Println(freeTheEnd(axe, hoe))
} else {
fmt.Println("Hope to find some diamonds tonight, night, night")
}
case "_": // End-game
fmt.Println("What's going on here, man?")
case "\\": // Do stuff
fmt.Println("Damn, that's s* is illegal")
}
}
// My names Jeff
func execution(health, armor, hunger int) int {
return health + armor + hunger // Regeneration and Absorption
}
func getResourses(amountOfWood, amountOfCobblestone int64) int64 {
amountOfWood *= 27 * (14 + 75 - 45) // Tree gathering logic
amountOfCobblestone += (7 + 25 - 65) * 44 // Cobbl gen
return amountOfWood/(27*(14+75-45)) + (amountOfCobblestone - ((7 + 25 - 65) * 44))
}
func getArmor(ironOre, goldOre int64) int64 {
ironOre -= 2 * (4 + 5 - 45) // Iron ores + Golems + Villager trading
goldOre += (17 + 245 - 65) * 3 // Boots or helmet
return (ironOre + 2*(4+5-45)) - (goldOre - (17+245-65)*3)
}
func deexecution(health, armor, hunger int) int {
return health - armor - hunger // Instant damage && Wither
}
func goToNether(blazesToKill, ghastTears int64) int64 {
blazesToKill *= 17 * (13 + 35 - 4) // Blaze powder
ghastTears *= (67 + 21 - 625) * 54 // Potion of regeneration + End crystal
return blazesToKill * ghastTears / (17 * (13 + 35 - 4) * (67 + 21 - 625) * 54)
}
func findStrongHold(eyesOfEnder, distance int64) int64 {
if distance == 0 {
panic("division by zero!")
}
eyesOfEnder += 207 * (114 + 715 - 425) // Need 2 activate end portal
distance += (57 + 265 - 665) * 414 // Distanse to go
return (eyesOfEnder - 207*(114+715-425)) / (distance - (57+265-665)*414)
}
func freeTheEnd(speedRun, worldRecord int64) int64 {
if speedRun == 0 {
panic("division by zero!")
}
return speedRun % worldRecord // glitches % any
}
|
package loader
import (
"io/ioutil"
"os"
"path/filepath"
homedir "github.com/mitchellh/go-homedir"
"github.com/pkg/errors"
yaml "gopkg.in/yaml.v2"
"github.com/devspace-cloud/devspace/pkg/util/log"
"github.com/devspace-cloud/devspace/pkg/devspace/config/constants"
"github.com/devspace-cloud/devspace/pkg/devspace/config/generated"
"github.com/devspace-cloud/devspace/pkg/devspace/config/versions/latest"
"github.com/devspace-cloud/devspace/pkg/util/kubeconfig"
)
// ConfigLoader is the base interface for the main config loader
type ConfigLoader interface {
New() *latest.Config
Exists() bool
Load() (*latest.Config, error)
LoadRaw() (map[interface{}]interface{}, error)
LoadWithoutProfile() (*latest.Config, error)
ConfigPath() string
GetProfiles() ([]string, error)
ParseCommands() ([]*latest.CommandConfig, error)
ResolvedVars() map[string]string
Generated() (*generated.Config, error)
SaveGenerated() error
RestoreVars(config *latest.Config) (*latest.Config, error)
Save(config *latest.Config) error
SetDevSpaceRoot() (bool, error)
}
type configLoader struct {
generatedLoader generated.ConfigLoader
generatedConfig *generated.Config
kubeConfigLoader kubeconfig.Loader
resolvedVars map[string]string
options *ConfigOptions
log log.Logger
}
// NewConfigLoader creates a new config loader with the given options
func NewConfigLoader(options *ConfigOptions, log log.Logger) ConfigLoader {
if options == nil {
options = &ConfigOptions{}
}
// Set loaded vars for this
options.LoadedVars = make(map[string]string)
return &configLoader{
generatedConfig: options.GeneratedConfig,
generatedLoader: generated.NewConfigLoader(options.Profile),
kubeConfigLoader: kubeconfig.NewLoader(),
options: options,
log: log,
}
}
// LoadGenerated loads the generated config
func (l *configLoader) Generated() (*generated.Config, error) {
var err error
if l.generatedConfig == nil {
l.generatedConfig, err = l.generatedLoader.Load()
}
return l.generatedConfig, err
}
// SaveGenerated is a convenience method to save the generated config
func (l *configLoader) SaveGenerated() error {
if l.generatedLoader == nil {
return nil
}
generatedConfig, err := l.Generated()
if err != nil {
return err
}
return l.generatedLoader.Save(generatedConfig)
}
func (l *configLoader) ResolvedVars() map[string]string {
return l.resolvedVars
}
// Exists checks whether the yaml file for the config exists or the configs.yaml exists
func (l *configLoader) Exists() bool {
path := l.ConfigPath()
return configExistsInPath(path)
}
// configExistsInPath checks wheter a devspace configuration exists at a certain path
func configExistsInPath(path string) bool {
// Check devspace.yaml
_, err := os.Stat(path)
if err == nil {
return true
}
return false // Normal config file found
}
// New initializes a new config object
func (l *configLoader) New() *latest.Config {
return latest.New().(*latest.Config)
}
// ConfigOptions defines options to load the config
type ConfigOptions struct {
Profile string
KubeContext string
ConfigPath string
GeneratedConfig *generated.Config
LoadedVars map[string]string
Vars []string
}
// Clone clones the config options
func (co *ConfigOptions) Clone() (*ConfigOptions, error) {
out, err := yaml.Marshal(co)
if err != nil {
return nil, err
}
newCo := &ConfigOptions{}
err = yaml.Unmarshal(out, newCo)
if err != nil {
return nil, err
}
return newCo, nil
}
// GetBaseConfig returns the config
func (l *configLoader) LoadWithoutProfile() (*latest.Config, error) {
return l.loadInternal(false)
}
// GetConfig returns the config merged with all potential overwrite files
func (l *configLoader) Load() (*latest.Config, error) {
return l.loadInternal(true)
}
// GetRawConfig loads the raw config from a given path
func (l *configLoader) LoadRaw() (map[interface{}]interface{}, error) {
// What path should we use
configPath := l.ConfigPath()
_, err := os.Stat(configPath)
if err != nil {
return nil, errors.Errorf("Couldn't load '%s': %v", configPath, err)
}
fileContent, err := ioutil.ReadFile(configPath)
if err != nil {
return nil, err
}
rawMap := map[interface{}]interface{}{}
err = yaml.Unmarshal(fileContent, &rawMap)
if err != nil {
return nil, err
}
return rawMap, nil
}
func (l *configLoader) ConfigPath() string {
path := constants.DefaultConfigPath
if l.options.ConfigPath != "" {
path = l.options.ConfigPath
}
return path
}
// loadInternal loads the config internally
func (l *configLoader) loadInternal(allowProfile bool) (*latest.Config, error) {
// Get generated config
generatedConfig, err := l.Generated()
if err != nil {
return nil, err
}
// Check if we should load a specific config
if allowProfile && generatedConfig.ActiveProfile != "" && l.options.Profile == "" {
l.options.Profile = generatedConfig.ActiveProfile
} else if !allowProfile {
l.options.Profile = ""
}
// Load the raw config
rawMap, err := l.LoadRaw()
if err != nil {
return nil, err
}
// Parse the config
config, err := l.parseConfig(rawMap)
if err != nil {
return nil, err
}
// Now we validate the config
err = validate(config)
if err != nil {
return nil, err
}
// Save generated config
if l.generatedLoader != nil {
err = l.generatedLoader.Save(generatedConfig)
if err != nil {
return nil, err
}
}
return config, nil
}
// SetDevSpaceRoot checks the current directory and all parent directories for a .devspace folder with a config and sets the current working directory accordingly
func (l *configLoader) SetDevSpaceRoot() (bool, error) {
if l.options.ConfigPath != "" {
return configExistsInPath(l.options.ConfigPath), nil
}
cwd, err := os.Getwd()
if err != nil {
return false, err
}
originalCwd := cwd
homedir, err := homedir.Dir()
if err != nil {
return false, err
}
lastLength := 0
for len(cwd) != lastLength {
if cwd != homedir {
configExists := configExistsInPath(filepath.Join(cwd, constants.DefaultConfigPath))
if configExists {
// Change working directory
err = os.Chdir(cwd)
if err != nil {
return false, err
}
// Notify user that we are not using the current working directory
if originalCwd != cwd {
l.log.Infof("Using devspace config in %s", filepath.ToSlash(cwd))
}
return true, nil
}
}
lastLength = len(cwd)
cwd = filepath.Dir(cwd)
}
return false, nil
}
|
package kvdb
// IdealBatchSize defines the size of the data batches should ideally add in one
// write.
const IdealBatchSize = 100 * 1024
// Batch is a write-only database that commits changes to its host database
// when Write is called. A batch cannot be used concurrently.
type Batch interface {
KeyValueWriter
// ValueSize retrieves the amount of data queued up for writing.
ValueSize() int
// Write flushes any accumulated data to disk.
Write() error
// Reset resets the batch for reuse.
Reset()
// Replay replays the batch contents.
Replay(w KeyValueWriter) error
}
// Batcher wraps the NewBatch method of a backing data store.
type Batcher interface {
// NewBatch creates a write-only database that buffers changes to its host db
// until a final write is called.
NewBatch() Batch
}
// HookedBatch wraps an arbitrary batch where each operation may be hooked into
// to monitor from black box code.
type HookedBatch struct {
Batch
OnPut func(key []byte, value []byte) // Callback if a key is inserted
OnDelete func(key []byte) // Callback if a key is deleted
}
// Put inserts the given value into the key-value data store.
func (b HookedBatch) Put(key []byte, value []byte) error {
if b.OnPut != nil {
b.OnPut(key, value)
}
return b.Batch.Put(key, value)
}
// Delete removes the key from the key-value data store.
func (b HookedBatch) Delete(key []byte) error {
if b.OnDelete != nil {
b.OnDelete(key)
}
return b.Batch.Delete(key)
}
|
package test
import (
"dappapi/models"
config2 "dappapi/tools/config"
"encoding/json"
"fmt"
"github.com/spf13/cobra"
)
var (
secret string
api string
config string
StartCmd = &cobra.Command{
Use: "test",
Short: "initialize the database",
Run: func(cmd *cobra.Command, args []string) {
run()
},
}
)
func init() {
// StartCmd.PersistentFlags().StringVarP(&config, "config", "c", "config/settings.yml", "Start server with provided configuration file")
// StartCmd.PersistentFlags().StringVarP(&mode, "mode", "m", "dev", "server mode ; eg:dev,test,prod")
StartCmd.PersistentFlags().StringVarP(&secret, "secret", "s", "YVXX6XOJPTT2LH4ZXC6U7IQ6DA23AFGN", "test secret; eg:YVXX6XOJPTT2LH4ZXC6U7IQ6DA23AFGN")
StartCmd.PersistentFlags().StringVarP(&api, "api", "a", "users/list", "test secret; eg:YVXX6XOJPTT2LH4ZXC6U7IQ6DA23AFGN")
}
func run() {
config = "config/dev/settings.yml"
config2.ConfigSetup(config)
usage := `start test`
// secret := tools.NewGoogleAuth().GetSecret()
// secret := "UTHGQODT7AXEFE5EOX7NU746EVX6CST6"
// qrcordUrl := tools.NewGoogleAuth().GetQrcodeUrl("test.qq.com", secret)
// code, err := tools.NewGoogleAuth().GetCode(secret)
// sec := time.Duration(2) * time.Second
param := map[string]interface{}{
"uids": []int{12382, 138620},
}
jsonStr, _ := json.Marshal(param)
models.Post(api, jsonStr)
fmt.Println(usage)
}
|
package main
import (
"fmt"
"os"
"github.com/wuiscmc/spotbot-cli/spotbot"
)
func control(option string, sp *spotbot.Spotbot, opts interface{}) {
switch option {
case "play":
sp.Play()
case "pause":
sp.Pause()
case "next":
sp.NextSong()
case "playlist":
//fmt.Println(sp.CurrentPlaylist())
case "current":
fmt.Println(sp.CurrentTrack())
case "search":
res := sp.Search(opts.(string))
for _, track := range res {
fmt.Println(track)
}
case "shuffle":
sp.Shuffle()
}
}
func main() {
firebaseUrl := os.Getenv("FIREBASE_URL")
spotbotServerUrl := os.Getenv("SPOTBOT_SERVER")
if firebaseUrl == "" || spotbotServerUrl == "" {
fmt.Println("Please set up your FIREBASE_URL and SPOTBOT_SERVER env variables first")
return
}
sp := spotbot.New(firebaseUrl)
var option, query string
numArgs := len(os.Args)
switch {
default:
query = ""
case numArgs == 1:
option = "current"
case numArgs == 2:
option = os.Args[1]
case numArgs == 3:
option = os.Args[1]
query = os.Args[2]
}
control(option, sp, query)
}
|
// Copyright 2015 by caixw, All rights reserved.
// Use of this source code is governed by a MIT
// license that can be found in the LICENSE file.
// apidoc 是一个 RESTful API 文档生成工具。
package main
import (
"bytes"
"flag"
"io/ioutil"
"log"
"os"
"path/filepath"
"runtime"
"runtime/pprof"
"strings"
"github.com/issue9/logs/writers"
"github.com/issue9/term/colors"
"github.com/issue9/version"
yaml "gopkg.in/yaml.v2"
"github.com/tanxiaolong/apidoc/input"
"github.com/tanxiaolong/apidoc/locale"
"github.com/tanxiaolong/apidoc/output"
"github.com/tanxiaolong/apidoc/vars"
"fmt"
)
// 日志信息输出
var (
info = newLog(os.Stdout, vars.InfoColor, "[INFO] ")
warn = newLog(os.Stderr, vars.WarnColor, "[WARN] ")
erro = newLog(os.Stderr, vars.ErroColor, "[ERRO] ")
)
// 确保第一时间初始化本地化信息
func init() {
if err := locale.Init(); err != nil {
warn.Println(err)
return
}
info.SetPrefix(locale.Sprintf(locale.InfoPrefix))
warn.SetPrefix(locale.Sprintf(locale.WarnPrefix))
erro.SetPrefix(locale.Sprintf(locale.ErrorPrefix))
}
func main() {
h := flag.Bool("h", false, locale.Sprintf(locale.FlagHUsage))
v := flag.Bool("v", false, locale.Sprintf(locale.FlagVUsage))
g := flag.Bool("g", false, locale.Sprintf(locale.FlagGUsage))
wd := flag.String("wd", "./", locale.Sprintf(locale.FlagWDUsage))
languages := flag.Bool("languages", false, locale.Sprintf(locale.FlagLanguagesUsage))
encodings := flag.Bool("encodings", false, locale.Sprintf(locale.FlagEncodingsUsage))
pprofType := flag.String("pprof", "", locale.Sprintf(locale.FlagPprofUsage, vars.PprofCPU, vars.PprofMem))
flag.Usage = usage
flag.Parse()
switch {
case *h:
flag.Usage()
return
case *v:
printVersion()
return
case *languages:
locale.Printf(locale.FlagSupportedLanguages, input.Languages())
return
case *encodings:
locale.Printf(locale.FlagSupportedEncodings, input.Encodings())
return
case *g:
genConfigFile(*wd)
return
}
if len(*pprofType) > 0 {
buf := new(bytes.Buffer)
defer func() { // 在程序结束时,将内容写入到文件
profile := filepath.Join(*wd, *pprofType+".prof")
if err := ioutil.WriteFile(profile, buf.Bytes(), os.ModePerm); err != nil {
erro.Println(err)
}
}()
switch strings.ToLower(*pprofType) {
case vars.PprofMem:
defer func() {
if err := pprof.Lookup("heap").WriteTo(buf, 1); err != nil {
erro.Println(err)
}
}()
case vars.PprofCPU:
if err := pprof.StartCPUProfile(buf); err != nil {
erro.Println(err)
}
defer pprof.StopCPUProfile()
default:
erro.Println(locale.Sprintf(locale.FlagInvalidPprrof))
return
}
}
run(*wd)
}
// 真正的程序入口,main 主要是作参数的处理。
func run(wd string) {
cfg, err := loadConfig(filepath.Join(wd, vars.ConfigFilename))
if err != nil {
erro.Println(err)
return
}
// 比较版本号兼容问题
compatible, err := version.SemVerCompatible(vars.Version(), cfg.Version)
if err != nil {
erro.Println(err)
return
}
if !compatible {
erro.Println(locale.Sprintf(locale.VersionInCompatible))
return
}
docs, elapsed := input.Parse(cfg.Inputs...)
fmt.Printf("%+v\n",docs)
cfg.Output.Elapsed = elapsed
if err := output.Render(docs, cfg.Output); err != nil {
erro.Println(err)
return
}
info.Println(locale.Sprintf(locale.Complete, cfg.Output.Dir, elapsed))
}
func usage() {
buf := new(bytes.Buffer)
flag.CommandLine.SetOutput(buf)
flag.PrintDefaults()
locale.Printf(locale.FlagUsage, vars.Name, buf.String(), vars.RepoURL, vars.OfficialURL)
}
// 根据 wd 所在目录的内容生成一个配置文件,并写入到 wd 目录下的 .apidoc.yaml 中
func genConfigFile(wd string) {
o, err := input.Detect(wd, true)
if err != nil {
erro.Println(err)
return
}
cfg := &config{
Version: vars.Version(),
Inputs: []*input.Options{o},
Output: &output.Options{
Dir: filepath.Join(o.Dir, "doc"),
},
}
data, err := yaml.Marshal(cfg)
if err != nil {
erro.Println(err)
return
}
path := filepath.Join(wd, vars.ConfigFilename)
if err = ioutil.WriteFile(path, data, os.ModePerm); err != nil {
erro.Println(err)
return
}
info.Println(locale.Sprintf(locale.FlagConfigWritedSuccess, path))
}
func printVersion() {
locale.Printf(locale.FlagVersionBuildWith, vars.Name, vars.Version(), runtime.Version())
locale.Printf(locale.FlagVersionCommitHash, vars.CommitHash())
}
func newLog(out *os.File, color colors.Color, prefix string) *log.Logger {
return log.New(writers.NewConsole(out, color, colors.Default), prefix, 0)
}
|
//go:generate msgp
package example
import (
"github.com/myitcv/neovim"
"github.com/tinylib/msgp/msgp"
)
// **************************
// DoSomethingAsync
func (n *Example) newDoSomethingAsyncResponder() neovim.AsyncDecoder {
return &doSomethingAsyncWrapper{
Example: n,
args: &DoSomethingAsyncArgs{},
}
}
func (n *doSomethingAsyncWrapper) Args() msgp.Decodable {
return n.args
}
func (n *doSomethingAsyncWrapper) Eval() msgp.Decodable {
return nil
}
func (n *doSomethingAsyncWrapper) Params() *neovim.MethodOptionParams {
return nil
}
type doSomethingAsyncWrapper struct {
*Example
args *DoSomethingAsyncArgs
}
//msgp:tuple DoSomethingAsyncArgs
type DoSomethingAsyncArgs struct {
Arg0 string
}
func (g *doSomethingAsyncWrapper) Run() error {
err := g.Example.DoSomethingAsync(nil, string(g.args.Arg0))
return err
}
// **************************
// GetTwoNumbers
func (n *Example) newGetTwoNumbersResponder() neovim.SyncDecoder {
return &getTwoNumbersWrapper{
Example: n,
args: &GetTwoNumbersArgs{},
results: &GetTwoNumbersResults{},
eval: new(MyEvalResult),
params: new(neovim.MethodOptionParams),
}
}
func (n *getTwoNumbersWrapper) Args() msgp.Decodable {
return n.args
}
func (n *getTwoNumbersWrapper) Eval() msgp.Decodable {
return n.eval
}
func (n *getTwoNumbersWrapper) Params() *neovim.MethodOptionParams {
return n.params
}
func (n *getTwoNumbersWrapper) Results() msgp.Encodable {
return n.results
}
type getTwoNumbersWrapper struct {
*Example
params *neovim.MethodOptionParams
args *GetTwoNumbersArgs
results *GetTwoNumbersResults
eval *MyEvalResult
}
//msgp:tuple GetTwoNumbersArgs
type GetTwoNumbersArgs struct {
Arg0 int64
}
//msgp:tuple GetTwoNumbersResults
type GetTwoNumbersResults struct {
Ret0 int64
Ret1 string
}
func (g *getTwoNumbersWrapper) Run() (error, error) {
res := &GetTwoNumbersResults{}
// TODO method option params
retVal0, retVal1, mErr, err := g.Example.GetTwoNumbers(g.Params(), int(g.args.Arg0), g.eval)
if err != nil || mErr != nil {
return mErr, err
}
res.Ret0 = int64(retVal0)
res.Ret1 = string(retVal1)
g.results = res
return nil, nil
}
|
package lang
import (
"fmt"
"testing"
)
func Test_Add(t *testing.T) {
var i I = 3
fmt.Println(i)
i.add(2)
fmt.Println(i)
}
|
package core
type Processor struct {
ProcessorChan chan string
ProcessorChanOut chan string
}
type MarketBeatStock struct {
Marker string
Today []string
Days30 []string
Days90 []string
Days180 []string
}
type SPBStock struct {
Id string
Marker string
Title string
Code1 string
Code2 string
Count string
Price string
Currency string
Date string
Note string
}
|
package main
import (
"fmt"
"bufio"
"os"
"strconv"
)
type player struct{
xPosition int
yPosition int
}
func parsePlayersFromStdIn() (player, player){
scanner := bufio.NewScanner(os.Stdin)
scanner.Scan()
gridSize, _ := strconv.Atoi(scanner.Text())
var hero player
var princess player
yPosition := gridSize - 1
for scanner.Scan(){
inputLine := scanner.Text()
for xPosition,char := range(inputLine){
if char == 109{
hero = player{xPosition,yPosition}
}
if char == 112{
princess = player{xPosition, yPosition}
}
}
yPosition -= 1
}
return hero, princess
}
func heroGetsPrincess(hero player, princess player){
for {
if hero.xPosition < princess.xPosition{
hero.xPosition += 1
fmt.Println("RIGHT")
}
if hero.xPosition > princess.xPosition{
hero.xPosition -= 1
fmt.Println("LEFT")
}
if hero.yPosition < princess.yPosition{
hero.yPosition += 1
fmt.Println("UP")
}
if hero.yPosition > princess.yPosition{
hero.yPosition -= 1
fmt.Println("DOWN")
}
if hero.xPosition == princess.xPosition && hero.yPosition == princess.yPosition{
break
}
}
}
func main() {
//Enter your code here. Read input from STDIN. Print output to STDOUT
hero, princess := parsePlayersFromStdIn()
heroGetsPrincess(hero, princess)
}
|
package main
import (
"fmt"
"log"
"net/url"
"os"
"strings"
)
func main() {
if len(os.Args) < 2 {
log.Fatal("usage: urlencode <path fragment>")
}
u := &url.URL{Path: strings.Join(os.Args[1:], " ")}
fmt.Println(u.String())
}
|
package opts
import (
"errors"
"flag"
"fmt"
"local/notorious/logging"
"os"
"regexp"
"strings"
)
// raw CLI flags, only used to create an Opts during Parse()
var (
after = flag.Int("A", 0, "how many lines of context [A]fter the match to print")
before = flag.Int("B", 0, "how many lines of context [B]efore the match to print")
context = flag.Int("C", 0, "how many lines of [C]ontext around the match to print")
ignoreCase = flag.Bool("i", false, "[i]gnore case in matches")
lineNumbers = flag.Bool("n", false, "whether to print the line [n]umbers")
literal = flag.Bool("e", false, "match using string lit[e]rals instead of regular expressions")
posix = flag.Bool("posix", false, "use [posix] regular expresisons")
verbose = flag.Bool("v", false, "[v]erbose debug info")
)
// Opts represent the parsed and validated options. Use these instead of the command-line flags directly
// You need to set the Matches func somehow; New() is a good way to do it
type Opts struct {
// Lines of context to match. Default is 0, 0.
Context struct{ Before, After int }
// Whether to print line numbers. Default is 0.
LineNumbers bool
// A function to match a line of text.
Matches func(string) bool
// Print verbose debug output.
Verbose bool
}
// create a new Opts with everything set to the default
func New(pattern string) (Opts, error) {
re, err := regexp.Compile(pattern)
if err != nil {
return Opts{}, fmt.Errorf("could not compile regexp from %q: %v", pattern, err)
}
return Opts{Matches: re.MatchString}, nil
}
// these builder methods are mostly here to make testing over in main_test.go a little easier and cleaner, and you could easily omit them
// WithMatcher returns a copy with Matches set to f
func (o Opts) WithMatcher(f func(s string) bool) Opts {
o.Matches = f
return o
}
// WithContext returns a copy with Context.Before and Context.After set to before and after
func (o Opts) WithContext(before, after int) Opts {
o.Context.Before, o.Context.After = before, after
return o
}
// WithLineNumbers sets LineNumbers to b
func (o Opts) WithLineNumbers(b bool) Opts {
o.LineNumbers = b
return o
}
// WithVerbose sets verbose to b
func (o Opts) WithVerbose(b bool) Opts {
o.Verbose = b
return o
}
// parse and validate the command-line flags
func Parse() (o Opts, err error) {
if flag.Parsed() {
return Opts{}, errors.New("flags already parsed")
}
flag.Parse()
logger := logging.Debug(os.Stderr, *verbose)
pattern := flag.Arg(0)
if pattern == "" {
return Opts{}, errors.New("expected an argument PATTERN")
}
if len(flag.Args()) > 1 {
return Opts{}, errors.New("notorious does not (yet) support more than one positional argument. If you set flags, they need to go before the positional arguments, not after")
}
var lineCtx struct{ Before, After int }
switch {
case *context < 0:
return Opts{}, fmt.Errorf("flag -C must be nonnegative, but got %d", *context)
case *before < 0:
return Opts{}, fmt.Errorf("flag -B must be nonnegative, but got %d", before)
case *after < 0:
return Opts{}, fmt.Errorf("flag -A must be nonnegative, but got %d", after)
case *context != 0 && *before != 0:
return Opts{}, errors.New("flags -B and -C are mutually exclusive")
case *context != 0 && *after != 0:
return Opts{}, errors.New("flags -A and -C are mutually exclusive")
case *context != 0:
lineCtx = struct{ Before, After int }{*context, *context}
default:
lineCtx = struct{ Before, After int }{*before, *after}
}
logger.Printf("%#+v", lineCtx)
var matcher func(s string) bool
switch {
case *ignoreCase && *literal:
logger.Print("mode: case-insenstive literal")
matcher = func(text string) bool { return strings.EqualFold(text, pattern) }
case *literal:
logger.Print("mode: literal")
matcher = func(text string) bool { return text == pattern }
case *ignoreCase && *posix:
logger.Print("mode: case-insensitive posix")
re, err := regexp.CompilePOSIX("(?i)" + pattern)
if err != nil {
return Opts{}, fmt.Errorf("could not compile case-insensitive POSIX regexp from pattern %q: %v", pattern, err)
}
matcher = re.MatchString
case *posix:
logger.Print("mode: posix")
re, err := regexp.CompilePOSIX(pattern)
if err != nil {
return Opts{}, fmt.Errorf("could not compile POSIX regexp from pattern %q: %v", pattern, err)
}
matcher = re.MatchString
case *ignoreCase:
logger.Print("mode: case-insensitive regexp")
re, err := regexp.CompilePOSIX("(?i)" + pattern)
if err != nil {
return Opts{}, fmt.Errorf("could not compile case-insensitive POSIX regexp from pattern %q: %v", pattern, err)
}
matcher = re.MatchString
default:
logger.Print("mode: regexp")
re, err := regexp.Compile(pattern)
if err != nil {
return Opts{}, fmt.Errorf("could not compile regexp from %q: %v", pattern, err)
}
matcher = re.MatchString
}
o = Opts{
Matches: matcher,
Context: lineCtx,
LineNumbers: *lineNumbers,
Verbose: *verbose,
}
return o, nil
}
|
package visagoapi
// BoundingPoly is used to store the
// vertexes marking the postition of the face.
type BoundingPoly struct {
Vertices []*Vertex `json:"vertices,omitempty"`
}
// Vertex is the x and y coordinates of a vertex
type Vertex struct {
X int64 `json:"x"`
Y int64 `json:"y"`
}
|
/*
Auto-Light let you control a led light by hands or any other objects.
It works with HCSR04, an ultrasonic distance meter, together.
The led light will light up when HCSR04 sensor get distance less then 40cm.
And the led will turn off after 45 seconds.
*/
package main
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"strings"
"time"
"github.com/shanghuiyang/rpi-devices/dev"
"github.com/shanghuiyang/rpi-devices/iot"
"github.com/shanghuiyang/rpi-devices/util"
"github.com/stianeikeland/go-rpio"
)
const (
pinLight = 16
pinLed = 4
pinTrig = 21
pinEcho = 26
)
const (
statePattern = "((state))"
ipPattern = "((000.000.000.000))"
datetimePattern = "((yyyy-mm-dd hh:mm:ss))"
datetimeFormat = "2006-01-02 15:04:05"
)
var (
alight *autoLight
pageContext []byte
)
var bool2int = map[bool]int{
false: 0,
true: 1,
}
func main() {
if err := rpio.Open(); err != nil {
log.Fatalf("[autolight]failed to open rpio, error: %v", err)
return
}
defer rpio.Close()
led := dev.NewLed(pinLed)
light := dev.NewLed(pinLight)
if light == nil {
log.Printf("[autolight]failed to new a led light")
return
}
dist := dev.NewHCSR04(pinTrig, pinEcho)
if dist == nil {
log.Printf("[autolight]failed to new a HCSR04")
return
}
wsnCfg := &iot.WsnConfig{
Token: iot.WsnToken,
API: iot.WsnNumericalAPI,
}
cloud := iot.NewCloud(wsnCfg)
alight = newAutoLight(dist, light, led, cloud)
util.WaitQuit(func() {
alight.off()
rpio.Close()
})
alight.start()
http.HandleFunc("/", lightServer)
err := http.ListenAndServe(":8080", nil)
if err != nil {
log.Fatal("[autolight]ListenAndServe: ", err.Error())
}
}
func lightServer(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
homePageHandler(w, r)
case "POST":
operationHandler(w, r)
}
}
func homePageHandler(w http.ResponseWriter, r *http.Request) {
if len(pageContext) == 0 {
var err error
pageContext, err = ioutil.ReadFile("light.html")
if err != nil {
log.Printf("[autolight]failed to read light.html")
fmt.Fprintf(w, "internal error: failed to read home page")
return
}
}
ip := util.GetIP()
if ip == "" {
log.Printf("[autolight]failed to get ip")
fmt.Fprintf(w, "internal error: failed to get ip")
return
}
wbuf := bytes.NewBuffer([]byte{})
rbuf := bytes.NewBuffer(pageContext)
for {
line, err := rbuf.ReadBytes('\n')
if err == io.EOF {
break
}
s := string(line)
switch {
case strings.Index(s, ipPattern) >= 0:
s = strings.Replace(s, ipPattern, ip, 1)
case strings.Index(s, datetimePattern) >= 0:
datetime := time.Now().Format(datetimeFormat)
s = strings.Replace(s, datetimePattern, datetime, 1)
case strings.Index(s, statePattern) >= 0:
state := "unchecked"
if alight.state {
state = "checked"
}
s = strings.Replace(s, statePattern, state, 1)
}
wbuf.Write([]byte(s))
}
w.Write(wbuf.Bytes())
}
func operationHandler(w http.ResponseWriter, r *http.Request) {
op := r.FormValue("op")
switch op {
case "on":
log.Printf("[autolight]web op: on")
alight.on()
case "off":
log.Printf("[autolight]web op: off")
alight.off()
default:
log.Printf("[autolight]web op: invalid operator")
}
}
type autoLight struct {
dist *dev.HCSR04
light *dev.Led
led *dev.Led
cloud iot.Cloud
trigTime time.Time
state bool // true: turn on, false: turn off
chLight chan bool
chLed chan bool
}
func newAutoLight(dist *dev.HCSR04, light *dev.Led, led *dev.Led, cloud iot.Cloud) *autoLight {
return &autoLight{
dist: dist,
light: light,
led: led,
state: false,
trigTime: time.Now(),
cloud: cloud,
chLight: make(chan bool, 4),
chLed: make(chan bool, 4),
}
}
func (a *autoLight) start() {
log.Printf("[autolight]start to service")
go a.detect()
go a.ctrLight()
go a.ctrLed()
}
func (a *autoLight) detect() {
// need to warm-up the ultrasonic distance meter first
a.dist.Dist()
time.Sleep(500 * time.Millisecond)
for {
d := a.dist.Dist()
detected := (d < 20)
a.chLight <- detected
a.chLed <- detected
t := 300 * time.Millisecond
if detected {
log.Printf("[autolight]detected objects, distance = %.2fcm", d)
// make a dalay detecting
t = 2 * time.Second
}
time.Sleep(t)
}
}
func (a *autoLight) ctrLight() {
go func() {
for {
time.Sleep(10 * time.Second)
v := &iot.Value{
Device: "5dd29e1be4b074c40dfe87c4",
Value: bool2int[a.state],
}
if err := a.cloud.Push(v); err != nil {
log.Printf("[autolight]push: failed to push the state of light to cloud, error: %v", err)
}
}
}()
for detected := range a.chLight {
if detected {
if !a.state {
a.on()
}
a.trigTime = time.Now()
continue
}
timeout := time.Now().Sub(a.trigTime).Seconds() > 45
if timeout && a.state {
log.Printf("[autolight]timeout, light off")
a.off()
}
}
}
func (a *autoLight) ctrLed() {
for detected := range a.chLed {
if detected {
a.led.Blink(1, 200)
}
}
}
func (a *autoLight) on() {
a.state = true
a.trigTime = time.Now()
a.light.On()
}
func (a *autoLight) off() {
a.state = false
a.light.Off()
}
|
package gogen
import (
"go/parser"
"go/token"
"go/ast"
"path/filepath"
"fmt"
)
// ParseDir will create a Build from the directory that
// was passed into the function.
func ParseDir(path string) (*Build, error) {
var fileSet token.FileSet
packages, err := parser.ParseDir(&fileSet, path, nil, parser.AllErrors)
if err != nil {
return nil, err
}
// create new build for the file set
build := NewBuild()
// iterate over all packages in the directory
for _, pkg := range packages {
// iterate over all files within the package
for name, astTree := range pkg.Files {
baseName := filepath.Base(name)
// create a comment map from file
commentMap := ast.NewCommentMap(&fileSet, astTree, astTree.Comments)
fileAST, err := ParseFileAST(baseName, astTree, commentMap)
if err != nil {
return nil, err
}
build.AddFile(baseName, fileAST)
}
}
return build, nil
}
// ParseFile will create a Build from the file path that
// was passed. FileSet of the Build will only contain a
// single file.
func ParseFile(path string) (*Build, error) {
var fileSet token.FileSet
astTree, err := parser.ParseFile(&fileSet, path, nil, parser.AllErrors|parser.ParseComments)
if err != nil {
return nil, err
}
fileName := filepath.Base(path)
// create a comment map from file
commentMap := ast.NewCommentMap(&fileSet, astTree, astTree.Comments)
// create new build for the file
build := NewBuild()
fileAST, err := ParseFileAST(fileName, astTree, commentMap)
if err != nil {
return nil, err
}
// add parsed file to the build file set
build.AddFile(fileName, fileAST)
return build, nil
}
// ParseFileAST creates a File parse with all necessary
// structures.
func ParseFileAST(name string, tree *ast.File, commentMap ast.CommentMap) (*File, error) {
f := NewFile(name, tree)
for _, i := range tree.Imports {
f.AddImport(ParseImport(i, commentMap.Filter(i)))
}
for _, declaration := range tree.Decls {
switch decValue := declaration.(type) {
// catch only generic declarations
case *ast.GenDecl:
for _, spec := range decValue.Specs {
switch specValue := spec.(type) {
case *ast.TypeSpec:
// all cases should pass in also specValue as
// it is the underlying spec
switch typeValue := specValue.Type.(type) {
case *ast.StructType:
f.AddStruct(ParseStruct(specValue, typeValue, commentMap.Filter(declaration)))
case *ast.InterfaceType:
f.AddInterface(ParseInterface(specValue, typeValue, commentMap.Filter(declaration)))
case *ast.FuncType:
fmt.Println("Generic value not recognized: ", specValue)
case *ast.ArrayType:
f.AddArray(ParseArray(specValue, typeValue, commentMap.Filter(declaration)))
case *ast.MapType:
fmt.Println("Generic value not recognized: ", specValue)
case *ast.ChanType:
fmt.Println("Generic value not recognized: ", specValue)
default:
f.AddBaseType(ParseBaseType(specValue, typeValue, commentMap.Filter(declaration)))
}
case *ast.ImportSpec:
// just ignore for now
case *ast.ValueSpec:
f.AddConstant(ParseConstant(specValue, commentMap.Filter(declaration)))
default:
fmt.Println("Generic value not recognized: ", specValue)
}
}
// catch function declarations
case *ast.FuncDecl:
fun := ParseFunction(decValue, commentMap.Filter(declaration))
if !fun.IsMethod() {
// add the function to the top level map
f.AddFunction(fun)
} else {
// add the function to the structure it belongs to
if len(fun.parent.Recv.List) <= 0 {
// TODO: no receiver defined report?
break
}
// struct that should be assigned the method
var structType *ast.StructType
switch receiver := fun.parent.Recv.List[0].Type.(type) {
// pointer receiver
case *ast.StarExpr:
// if the receiver is defined append it to it,
// otherwise register it as normal function
if receiver.X.(*ast.Ident).Obj != nil {
structType = receiver.X.(*ast.Ident).Obj.Decl.(*ast.TypeSpec).Type.(*ast.StructType)
} else {
f.AddFunction(fun)
}
// copy receiver
case *ast.Ident:
switch receiver.Obj.Decl.(*ast.TypeSpec).Type.(type) {
case *ast.StructType:
structType = receiver.Obj.Decl.(*ast.TypeSpec).Type.(*ast.StructType)
}
}
// search for the structures that receive the method
// and bind it
for _, st := range f.structures {
if st.parent == structType {
st.AddMethod(fun)
}
}
}
}
}
return f, nil
}
|
// Used to show the landing page of the application
package requests
import (
"glsamaker/pkg/app/handler/authentication"
"glsamaker/pkg/app/handler/authentication/utils"
"glsamaker/pkg/database/connection"
"glsamaker/pkg/logger"
"glsamaker/pkg/models"
"net/http"
)
// Show renders a template to show the landing page of the application
func Show(w http.ResponseWriter, r *http.Request) {
user := utils.GetAuthenticatedUser(r)
if !user.Permissions.Glsa.View {
authentication.AccessDenied(w, r)
return
}
var requests []*models.Glsa
err := user.CanAccess(connection.DB.Model(&requests).
Where("type = ?", "request").
Relation("Bugs").
Relation("Creator").
Relation("Comments").
Relation("Comments.User")).
Select()
if err != nil {
logger.Info.Println("Error during request selection")
logger.Info.Println(err)
http.NotFound(w, r)
return
}
for _, request := range requests {
request.ComputeStatus(user)
}
renderRequestsTemplate(w, user, requests)
}
|
package trea
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document01200102 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:trea.012.001.02 Document"`
Message *ForeignExchangeOptionNotificationV02 `xml:"FXOptnNtfctnV02"`
}
func (d *Document01200102) AddMessage() *ForeignExchangeOptionNotificationV02 {
d.Message = new(ForeignExchangeOptionNotificationV02)
return d.Message
}
// Scope
// The ForeignExchangeOptionNotification message is sent by a central system to a participant to provide details of a foreign exchange option trade.
// Usage
// The notification is sent by the central settlement system to the two trading parties after it has received Create, Amend or Cancel messages from both. The message may also contain information on the settlement of the trade and/or premium.
type ForeignExchangeOptionNotificationV02 struct {
// Specifies the trading side of the currency option trade which is reported.
TradingSideIdentification *iso20022.TradePartyIdentification4 `xml:"TradgSdId"`
// Specifies the counterparty of the non deliverable trade which is reported.
CounterpartySideIdentification *iso20022.TradePartyIdentification4 `xml:"CtrPtySdId"`
// Provides information on the conditions of the option.
OptionData *iso20022.OptionData2 `xml:"OptnData"`
// Provides information on the status of a trade in a settlement system.
TradeStatus *iso20022.TradeStatus1 `xml:"TradSts"`
// Provides information on the settlement of a trade.
SettlementData *iso20022.SettlementData2 `xml:"SttlmData,omitempty"`
}
func (f *ForeignExchangeOptionNotificationV02) AddTradingSideIdentification() *iso20022.TradePartyIdentification4 {
f.TradingSideIdentification = new(iso20022.TradePartyIdentification4)
return f.TradingSideIdentification
}
func (f *ForeignExchangeOptionNotificationV02) AddCounterpartySideIdentification() *iso20022.TradePartyIdentification4 {
f.CounterpartySideIdentification = new(iso20022.TradePartyIdentification4)
return f.CounterpartySideIdentification
}
func (f *ForeignExchangeOptionNotificationV02) AddOptionData() *iso20022.OptionData2 {
f.OptionData = new(iso20022.OptionData2)
return f.OptionData
}
func (f *ForeignExchangeOptionNotificationV02) AddTradeStatus() *iso20022.TradeStatus1 {
f.TradeStatus = new(iso20022.TradeStatus1)
return f.TradeStatus
}
func (f *ForeignExchangeOptionNotificationV02) AddSettlementData() *iso20022.SettlementData2 {
f.SettlementData = new(iso20022.SettlementData2)
return f.SettlementData
}
|
/*
@File : test3.go
@Time : 2019/01/25 14:02:16
@Author : Bruce
@Version : 1.0
@Contact : bruce.he@patpat.com
@License : (C)Copyright 2019, patpat.com
@Desc : None
*/
package main
import (
"fmt"
)
func main() {
fmt.Println("hello2\n")
}
|
package oauth2
import (
"errors"
"github.com/dgrijalva/jwt-go"
"time"
)
func generateToken(client_id string, client_secret string, expire_time time.Duration) (string, error) {
param := map[string]string{
"client_id" : client_id,
}
return generateTokenWithParam(client_secret, expire_time, param)
}
func generateTokenWithParam(client_secret string, expire_time time.Duration, param map[string]string) (string, error) {
now := time.Now()
claims := jwt.MapClaims{
"iat": now.Unix(),
"exp": now.Add(expire_time).Unix(),
}
for k, v := range param {
claims[k] = v
}
token := jwt.NewWithClaims(
jwt.SigningMethodHS256,
claims)
return token.SignedString([]byte(client_secret))
}
func parseToken(client_secret string, token string) (string, error) {
jwtToken, err := jwt.Parse(token, func(token *jwt.Token) (i interface{}, e error) {
return []byte(client_secret), nil
})
if err != nil {
return "", err
}
if !jwtToken.Valid {
return "", err
}
claims, ok := jwtToken.Claims.(jwt.MapClaims)
if !ok {
return "", errors.New("parse jwt error")
}
return claims["client_id"].(string), nil
}
|
package pipeline
import (
"io"
"log"
"sort"
)
func ArraySource(data ...int) <-chan int {
out := make(chan int)
go func() {
for _, n := range data {
out <- n
log.Println("write data to chan", n)
//time.Sleep(time.Second)
}
log.Println("end write data to chan")
close(out)
}()
return out
}
func SortInMem(ch <-chan int) <-chan int {
out := make(chan int)
go func() {
var data []int
for n := range ch {
data = append(data, n)
log.Println("read data from chan", n)
}
log.Println("before sort data:", data)
sort.Ints(data)
for _, d := range data {
out <- d
}
close(out)
}()
return out
}
func SourceFrom(reader io.Reader) <-chan int {
return nil
}
//func RandomSink()
|
/*
* @lc app=leetcode.cn id=42 lang=golang
*
* [42] 接雨水
*/
package main
import "fmt"
/*
func min(a, b int) int {
if a < b {
return a
}
return b
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
*/
/* 按行求
func trap(height []int) int {
var (
max, heightLen, tmp, sum int
flag bool
)
heightLen = len(height)
for i := 0; i < heightLen; i++ {
if height[i] > max {
max = height[i]
}
}
for i := 0; i <= max; i++ {
tmp = 0
// 已经找到第一个大于当前行的数
flag = false
for j := 0; j < heightLen; j++ {
// 比当前行数小,可以积水
if flag && height[j] < i {
tmp++
}
if height[j] >= i {
sum += tmp
tmp, flag = 0, true
}
}
}
return sum
} */
/*
按列计算
只要左边最大和右边最大中最小的大于当前列,当前列就可积水
func trap(height []int) int {
var sum, maxLeft, maxRight, minHeight int
heightLen := len(height)
for i := 1; i < heightLen-1; i++ {
maxLeft, maxRight = 0, 0
for j := i - 1; j >= 0; j-- {
if height[j] > maxLeft {
maxLeft = height[j]
}
}
for j := i + 1; j < heightLen; j++ {
if height[j] > maxRight {
maxRight = height[j]
}
}
minHeight = min(maxLeft, maxRight)
if height[i] < minHeight {
sum += minHeight - height[i]
}
}
return sum
} */
/* // 一次遍历,保存第i列的左边最高和右边最高,优化按列计算复杂度
func trap(height []int) (ans int) {
n := len(height)
if n == 0 {
return
}
leftMax := make([]int, n)
leftMax[0] = height[0]
for i := 1; i < n; i++ {
leftMax[i] = max(leftMax[i-1], height[i])
}
rightMax := make([]int, n)
rightMax[n-1] = height[n-1]
for i := n - 2; i >= 0; i-- {
rightMax[i] = max(rightMax[i+1], height[i])
}
for i, h := range height {
ans += min(leftMax[i], rightMax[i]) - h
}
return
} */
// @lc code=start
// 对于位置left而言,它左边最大值一定是left_max,右边最大值“大于等于”right_max,
// 这时候,如果left_max<right_max成立,
// 那么它就知道自己能存多少水了。
// 无论右边将来会不会出现更大的right_max,
// 都不影响这个结果。
// 所以当left_max<right_max时,
// 我们就希望去处理left下标,
// 反之,我们希望去处理right下标。
func trap(height []int) int {
res, left, right, maxLeft, maxRight := 0, 0, len(height)-1, 0, 0
for left <= right {
if height[left] <= height[right] {
if height[left] > maxLeft {
maxLeft = height[left]
} else {
res += maxLeft - height[left]
}
left++
} else {
if height[right] >= maxRight {
maxRight = height[right]
} else {
res += maxRight - height[right]
}
right--
}
}
return res
}
// @lc code=end
func main() {
fmt.Println(trap([]int{2, 1, 0, 1, 2}))
}
|
// Copyright © 2020 Attestant Limited.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package first
import (
"context"
"time"
eth2client "github.com/attestantio/go-eth2-client"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/attestantio/vouch/util"
"github.com/pkg/errors"
)
// AttestationData provides the first attestation data from a number of beacon nodes.
func (s *Service) AttestationData(ctx context.Context, slot phase0.Slot, committeeIndex phase0.CommitteeIndex) (*phase0.AttestationData, error) {
started := time.Now()
log := util.LogWithID(ctx, log, "strategy_id")
// We create a cancelable context with a timeout. When a provider responds we cancel the context to cancel the other requests.
ctx, cancel := context.WithTimeout(ctx, s.timeout)
respCh := make(chan *phase0.AttestationData, 1)
for name, provider := range s.attestationDataProviders {
go func(ctx context.Context, name string, provider eth2client.AttestationDataProvider, ch chan *phase0.AttestationData) {
log := log.With().Str("provider", name).Uint64("slot", uint64(slot)).Logger()
attestationData, err := provider.AttestationData(ctx, slot, committeeIndex)
s.clientMonitor.ClientOperation(name, "attestation data", err == nil, time.Since(started))
if err != nil {
log.Warn().Dur("elapsed", time.Since(started)).Err(err).Msg("Failed to obtain attestation data")
return
}
if attestationData == nil {
log.Warn().Dur("elapsed", time.Since(started)).Err(err).Msg("Returned empty attestation data")
return
}
log.Trace().Dur("elapsed", time.Since(started)).Msg("Obtained attestation data")
ch <- attestationData
}(ctx, name, provider, respCh)
}
select {
case <-ctx.Done():
cancel()
log.Warn().Msg("Failed to obtain attestation data before timeout")
return nil, errors.New("failed to obtain attestation data before timeout")
case attestationData := <-respCh:
cancel()
return attestationData, nil
}
}
|
/*
* Copyright (c) 2020. Ant Group. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*/
package snapshotter
import (
"context"
"github.com/pkg/errors"
"github.com/dragonflyoss/image-service/contrib/nydus-snapshotter/config"
"github.com/dragonflyoss/image-service/contrib/nydus-snapshotter/pkg/utils/signals"
"github.com/dragonflyoss/image-service/contrib/nydus-snapshotter/snapshot"
)
func Start(ctx context.Context, cfg config.Config) error {
rs, err := snapshot.NewSnapshotter(ctx, &cfg)
if err != nil {
return errors.Wrap(err, "failed to initialize snapshotter")
}
stopSignal := signals.SetupSignalHandler()
opt := ServeOptions{
ListeningSocketPath: cfg.Address,
}
return Serve(ctx, rs, opt, stopSignal)
}
|
package dbserver
import (
"database/sql"
"github.com/labstack/echo"
)
type Db struct {
db *sql.DB
}
func (db Db) CreateTable(e echo.Context) error {
}
|
package bag
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewBag(t *testing.T) {
t.Run("Test create new bag", func(t *testing.T) {
bag := NewBag("vJrwpWtwJgWrhcsFMMfFFhFp")
assert.Equal(t, "vJrwpWtwJgWr", bag.FirstComp)
assert.Equal(t, "hcsFMMfFFhFp", bag.SecondComp)
})
}
func TestGetRepeatedRuneInSingleBag(t *testing.T) {
type TestCase struct {
Input string
Expected int
}
testCases := []TestCase{
{
Input: "vJrwpWtwJgWrhcsFMMfFFhFp",
Expected: 16,
},
{
Input: "jqHRNqRjqzjGDLGLrsFMfFZSrLrFZsSL",
Expected: 38,
},
{
Input: "PmmdzqPrVvPwwTWBwg",
Expected: 42,
},
{
Input: "wMqvLMZHhHMvwLHjbvcjnnSBnvTQFn",
Expected: 22,
},
{
Input: "ttgJtRGJQctTZtZT",
Expected: 20,
},
{
Input: "CrZsJsPPZsGzwwsLwLmpwMDw",
Expected: 19,
},
}
for _, tc := range testCases {
t.Run("Test get repeated rune", func(t *testing.T) {
bag := NewBag(tc.Input)
assert.Equal(t, tc.Expected, bag.GetSingleBagRepeatedRunePriority())
})
}
}
func TestGetRepeatedRuneInBagGroup(t *testing.T) {
type TestCase struct {
First string
Second string
Third string
Expected int
}
testCases := []TestCase{
{
First: "vJrwpWtwJgWrhcsFMMfFFhFp",
Second: "jqHRNqRjqzjGDLGLrsFMfFZSrLrFZsSL",
Third: "PmmdzqPrVvPwwTWBwg",
Expected: 18,
},
{
First: "wMqvLMZHhHMvwLHjbvcjnnSBnvTQFn",
Second: "ttgJtRGJQctTZtZT",
Third: "CrZsJsPPZsGzwwsLwLmpwMDw",
Expected: 52,
},
}
for _, tc := range testCases {
t.Run("Test get repeated rune", func(t *testing.T) {
group := NewGroup(NewBag(tc.First), NewBag(tc.Second), NewBag(tc.Third))
assert.Equal(t, tc.Expected, group.GetGroupRepeatedRunePriority())
})
}
}
|
package filters
import (
"fmt"
"math/rand"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stripe/unilog/clevels"
"github.com/stripe/unilog/json"
)
func TestCalculateSamplingRate(t *testing.T) {
type CalculateSamplingLevel struct {
Name string
Austerity clevels.AusterityLevel
Criticality clevels.AusterityLevel
Expected float64
}
cases := []CalculateSamplingLevel{
{
Name: "log level higher than austerity",
Austerity: clevels.Sheddable,
Criticality: clevels.SheddablePlus,
Expected: 1.0,
},
{
Name: "log level one lower than austerity",
Austerity: clevels.Critical,
Criticality: clevels.SheddablePlus,
Expected: 0.1,
},
{
Name: "log level two lower than austerity",
Austerity: clevels.Critical,
Criticality: clevels.Sheddable,
Expected: 0.01,
},
{
Name: "log level three lower than austerity",
Austerity: clevels.CriticalPlus,
Criticality: clevels.Sheddable,
Expected: 0.001,
},
}
for _, tc := range cases {
t.Run(tc.Name, func(t *testing.T) {
samplingRate := samplingRate(tc.Austerity, tc.Criticality)
assert.Equal(t, tc.Expected, samplingRate)
})
}
}
func TestAusterityFilter(t *testing.T) {
// Make sure SendSystemAusterityLevel is called before we override
// the underlying channel below
a := AusterityFilter{}
AusteritySetup(true)
clevels.SystemAusterityLevel = make(chan clevels.AusterityLevel)
line := fmt.Sprintf("some random log line! clevel=%s", clevels.SheddablePlus)
kill := make(chan struct{})
go func() {
for {
select {
case clevels.SystemAusterityLevel <- clevels.Critical:
case <-kill:
return
}
}
}()
// seed rand deterministically
rand.Seed(17)
// count number of lines dropped
dropped := 0
var outputtedLine string
// now sample out the line a bunch!
for i := 0; i < 10000; i++ {
outputtedLine = a.FilterLine(line)
if strings.Contains(outputtedLine, "(shedded)") {
dropped++
}
}
// this number is deterministic because rand is seeded & deterministic
// TODO (kiran, 2016-12-06): maybe add an epsilon
assert.Equal(t, 8983, dropped)
kill <- struct{}{}
}
func TestAusterityJSON(t *testing.T) {
// Make sure SendSystemAusterityLevel is called before we override
// the underlying channel below
a := AusterityFilter{}
AusteritySetup(true)
clevels.SystemAusterityLevel = make(chan clevels.AusterityLevel)
kill := make(chan struct{})
go func() {
for {
select {
case clevels.SystemAusterityLevel <- clevels.Critical:
case <-kill:
return
}
}
}()
// seed rand deterministically
rand.Seed(17)
// count number of lines dropped
dropped := 0
// now sample out the line a bunch!
for i := 0; i < 10000; i++ {
line := json.LogLine{"message": "some random log line!", "clevel": "sheddableplus"}
a.FilterJSON(&line)
if _, ok := line["message"]; !ok {
dropped++
}
}
// this number is deterministic because rand is seeded & deterministic
assert.Equal(t, 8983, dropped)
kill <- struct{}{}
}
|
package config_test
import (
"fmt"
"testing"
"github.com/debarshibasak/kubestrike/v1alpha1/config"
"github.com/ghodss/yaml"
)
func TestParsing(t *testing.T) {
kubeadm := `
apiVersion: kubestrike.debarshi.github.com/master/v1alpha1
kind: CreateClusterKind
provider: Multipass
multipass:
masterCount: 1
workerCount: 1
kubeadm:
networking:
plugin: flannel
podCidr: 10.233.0.0/16
`
var a config.CreateCluster
err := yaml.Unmarshal([]byte(kubeadm), &a)
if err != nil {
t.Fatal(err)
}
fmt.Println(a.APIVersion)
fmt.Println(a.KubeadmEngine)
fmt.Println(a.ClusterName)
k3sclient := `
apiVersion: kubestrike.debarshi.github.com/master/v1alpha1
kind: CreateClusterKind
provider: Multipass
multipass:
masterCount: 1
workerCount: 1
k3s:
networking:
backend: vxlan
podCidr: 10.233.0.0/16
`
err = yaml.Unmarshal([]byte(k3sclient), &a)
if err != nil {
t.Fatal(err)
}
fmt.Println(a.APIVersion)
fmt.Println(a.KubeadmEngine)
fmt.Println(a.K3sEngine)
fmt.Println(a.ClusterName)
}
|
package main
import (
"fmt"
"math"
)
func main() {
fmt.Println(storeWater([]int{1, 3}, []int{6, 8}))
//fmt.Println(storeWater([]int{9, 0, 1}, []int{0, 2, 2}))
fmt.Println(storeWater([]int{3, 2, 5}, []int{0, 0, 0}))
}
func storeWater(bucket []int, vat []int) int {
n := len(bucket)
maxk := 0
for _, v := range vat {
if v > maxk {
maxk = v
}
}
if maxk == 0 {
return 0
}
res := math.MaxInt32
for k := 1; k <= maxk && k < res; k++ {
t := 0
for i := 0; i < n; i++ {
t += max(0, (vat[i]+k-1)/k-bucket[i])
}
res = min(res, t+k)
}
return res
}
func max(x, y int) int {
if x > y {
return x
}
return y
}
func min(x, y int) int {
if x < y {
return x
}
return y
}
func storeWater2(bucket []int, vat []int) int {
idx := 0
var p float64 = 0
for i := 0; i < len(vat); i++ {
if bucket[i] != 0 && vat[i] != 0 {
if t := float64(vat[i]) / float64(bucket[i]); t < p {
p = t
idx = i
}
}
}
pi := int(math.Ceil(p))
var ans int
for i, b := range bucket {
if vat[i] != 0 && pi != 0 {
if i != idx {
ans += vat[i]/pi - b
}
}
}
return ans + pi
}
|
package main
import "fmt"
func max(a, b int) int {
if a > b {
return a
}
return b
}
func lengthOfLongestSubstringKDistinct(s string, k int) int {
if len(s) == 0 || k == 0 {
return 0
}
sChars := []rune(s)
start, end := -1, -1 // start is exclusive: (start, end]
maxLen := 0
currDistinctCount := 0
occMap := make(map[rune]int)
for end < len(sChars) {
if currDistinctCount <= k {
end++
if end >= len(sChars) {
break
}
occMap[sChars[end]]++
if occMap[sChars[end]] == 1 {
currDistinctCount++
}
if currDistinctCount <= k {
maxLen = max(maxLen, end-start)
}
} else {
start++
occMap[sChars[start]] -= 1
if occMap[sChars[start]] == 0 {
currDistinctCount--
}
}
}
return maxLen
}
type testCase struct {
s string
k int
expectedResult int
}
func test() {
tests := []testCase{
testCase{"eceba", 2, 3},
testCase{"aa", 1, 2},
testCase{"abcba", 2, 3},
testCase{"abcd", 1, 1},
}
for i, tc := range tests {
result := lengthOfLongestSubstringKDistinct(tc.s, tc.k)
if result == tc.expectedResult {
fmt.Printf("Test case %d PASSED\n", i)
} else {
fmt.Printf("Test case %d FAILED. Got %d but expected %d\n", i, result, tc.expectedResult)
}
}
}
func main() {
test()
}
|
package staticrender
import (
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"testing"
"github.com/vugu/vugu/gen"
)
func TestRendererStaticTable(t *testing.T) {
debug := false
vuguDir, err := filepath.Abs("..")
if err != nil {
t.Fatal(err)
}
type tcase struct {
name string
opts gen.ParserGoPkgOpts
recursive bool
infiles map[string]string // file structure to start with
outReMatch []string // regexps that must match against output
outReNotMatch []string // regexps that must not match against output
afterRun func(dir string, t *testing.T) // called after Run
bfiles map[string]string // additional files to write before building
}
tcList := []tcase{
{
name: "simple",
opts: gen.ParserGoPkgOpts{},
recursive: false,
infiles: map[string]string{
"root.vugu": `<div>root here</div>`,
},
outReMatch: []string{`root here`},
outReNotMatch: []string{`should not match`},
},
{
name: "full-html",
opts: gen.ParserGoPkgOpts{},
recursive: false,
infiles: map[string]string{
"root.vugu": `<html><title vg-if='true'>test title</title><body><div>root here</div></body></html><script src="/a.js"></script>`,
},
outReMatch: []string{
`root here`,
`<title>test title</title>`, // if statement should have fired
`</div><script src="/a.js"></script></body>`, // js should have be written directly inside the body tag
},
outReNotMatch: []string{`should not match`},
},
{
name: "comp",
opts: gen.ParserGoPkgOpts{},
recursive: false,
infiles: map[string]string{
"root.vugu": `<html>
<head>
<title>testing!</title>
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css"/>
<script>
console.log("Some script here");
</script>
</head>
<body>
<div>
This is a test!
Component here:
<main:Comp1/>
</div>
</body>
</html>`,
"comp1.vugu": `<span>
comp1 in the house
<div vg-content='vugu.HTML("<p>Some <strong>nested</strong> craziness</p>")'></div>
</span>`,
},
outReMatch: []string{
`<div><p>Some <strong>nested</strong> craziness</p></div>`,
`bootstrap.min.css`,
`Some script here`,
},
outReNotMatch: []string{`should not match`},
},
{
name: "vg-template",
opts: gen.ParserGoPkgOpts{},
recursive: false,
infiles: map[string]string{
"root.vugu": `<div><span>example1</span><vg-template vg-if='true'>text here</vg-template></div>`,
},
outReMatch: []string{
`<span>example1</span>text here`,
},
outReNotMatch: []string{`vg-template`},
},
}
for _, tc := range tcList {
tc := tc
t.Run(tc.name, func(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "TestRendererStaticTable")
if err != nil {
t.Fatal(err)
}
if debug {
t.Logf("Test %q using tmpDir: %s", tc.name, tmpDir)
} else {
t.Parallel()
}
// write a sensible go.mod and main.go, individual tests can override if they really want
startf := make(map[string]string, 2)
startf["go.mod"] = "module testcase\nreplace github.com/vugu/vugu => " + vuguDir + "\n"
startf["main.go"] = `// +build !wasm
package main
import (
"os"
"github.com/vugu/vugu"
"github.com/vugu/vugu/staticrender"
)
func main() {
rootBuilder := &Root{}
buildEnv, err := vugu.NewBuildEnv()
if err != nil { panic(err) }
renderer := staticrender.New(os.Stdout)
buildResults := buildEnv.RunBuild(rootBuilder)
err = renderer.Render(buildResults)
if err != nil { panic(err) }
}
`
tstWriteFiles(tmpDir, startf)
tstWriteFiles(tmpDir, tc.infiles)
tc.opts.SkipGoMod = true
tc.opts.SkipMainGo = true
if tc.recursive {
err = gen.RunRecursive(tmpDir, &tc.opts)
} else {
err = gen.Run(tmpDir, &tc.opts)
}
if err != nil {
t.Fatal(err)
}
if tc.afterRun != nil {
tc.afterRun(tmpDir, t)
}
tstWriteFiles(tmpDir, tc.bfiles)
// build executable for this platform
cmd := exec.Command("go", "mod", "tidy")
cmd.Dir = tmpDir
b, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("build error: %s; OUTPUT:\n%s", err, b)
}
cmd = exec.Command("go", "build", "-o", "main.out", ".")
cmd.Dir = tmpDir
b, err = cmd.CombinedOutput()
if err != nil {
t.Fatalf("build error: %s; OUTPUT:\n%s", err, b)
}
// now execute the command and capture the output
cmd = exec.Command(filepath.Join(tmpDir, "main.out"))
cmd.Dir = tmpDir
b, err = cmd.CombinedOutput()
if err != nil {
t.Fatalf("run error: %s; OUTPUT:\n%s", err, b)
}
// verify the output
for _, reTxt := range tc.outReMatch {
re := regexp.MustCompile(reTxt)
if !re.Match(b) {
t.Errorf("Failed to match regexp %q on output", reTxt)
}
}
for _, reTxt := range tc.outReNotMatch {
re := regexp.MustCompile(reTxt)
if re.Match(b) {
t.Errorf("Unexpected match for regexp %q on output", reTxt)
}
}
// only if everthing is golden do we remove
if !t.Failed() {
os.RemoveAll(tmpDir)
} else {
// and if not then dump the output that was produced
t.Logf("FULL OUTPUT:\n%s", b)
}
})
}
}
func tstWriteFiles(dir string, m map[string]string) {
for name, contents := range m {
p := filepath.Join(dir, name)
os.MkdirAll(filepath.Dir(p), 0755)
err := ioutil.WriteFile(p, []byte(contents), 0644)
if err != nil {
panic(err)
}
}
}
// NOTE: this was moved into the table test above
// func TestRendererStatic(t *testing.T) {
// cachekiller := 0
// _ = cachekiller
// // make a temp dir
// tmpDir, err := ioutil.TempDir("", "TestRendererStatic")
// if err != nil {
// t.Fatal(err)
// }
// log.Printf("tmpDir: %s", tmpDir)
// // defer os.RemoveAll(tmpDir)
// wd, err := os.Getwd()
// if err != nil {
// t.Fatal(err)
// }
// vuguwd, err := filepath.Abs(filepath.Join(wd, ".."))
// if err != nil {
// t.Fatal(err)
// }
// // put a go.mod here that points back to the local copy of vugu
// err = ioutil.WriteFile(filepath.Join(tmpDir, "go.mod"), []byte(fmt.Sprintf(`module test-render-static
// replace github.com/vugu/vugu => %s
// require github.com/vugu/vugu v0.0.0-00010101000000-000000000000
// `, vuguwd)), 0644)
// // output some components
// err = ioutil.WriteFile(filepath.Join(tmpDir, "root.vugu"), []byte(`<html>
// <head>
// <title>testing!</title>
// <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css"/>
// <script>
// console.log("Some script here");
// </script>
// </head>
// <body>
// <div>
// This is a test!
// Component here:
// <main:Comp1/>
// </div>
// </body>
// </html>`), 0644)
// if err != nil {
// t.Fatal(err)
// }
// err = ioutil.WriteFile(filepath.Join(tmpDir, "comp1.vugu"), []byte(`<span>
// comp1 in the house
// <div vg-content='vugu.HTML("<p>Some <strong>nested</strong> craziness</p>")'></div>
// </span>`), 0644)
// if err != nil {
// t.Fatal(err)
// }
// // run the vugu codegen
// p := gen.NewParserGoPkg(tmpDir, nil)
// err = p.Run()
// if err != nil {
// t.Fatal(err)
// }
// // put our static output generation code here
// err = ioutil.WriteFile(filepath.Join(tmpDir, "staticout.go"), []byte(`// +build !wasm
// package main
// import (
// "log"
// //"fmt"
// "flag"
// "os"
// "github.com/vugu/vugu"
// "github.com/vugu/vugu/staticrender"
// )
// func main() {
// //mountPoint := flag.String("mount-point", "#vugu_mount_point", "The query selector for the mount point for the root component, if it is not a full HTML component")
// flag.Parse()
// //fmt.Printf("Entering main(), -mount-point=%q\n", *mountPoint)
// //defer fmt.Printf("Exiting main()\n")
// rootBuilder := &Root{}
// buildEnv, err := vugu.NewBuildEnv()
// if err != nil {
// log.Fatal(err)
// }
// renderer := staticrender.New(os.Stdout)
// buildResults := buildEnv.RunBuild(rootBuilder)
// err = renderer.Render(buildResults)
// if err != nil {
// panic(err)
// }
// }
// `), 0644)
// if err != nil {
// t.Fatal(err)
// }
// // build it
// cmd := exec.Command("go", "build", "-v", "-o", "staticout")
// cmd.Dir = tmpDir
// b, err := cmd.CombinedOutput()
// log.Printf("go build produced:\n%s", b)
// if err != nil {
// t.Fatal(err)
// }
// // run it and see what it output
// cmd = exec.Command("./staticout")
// cmd.Dir = tmpDir
// b, err = cmd.CombinedOutput()
// log.Printf("staticout produced:\n%s", b)
// if err != nil {
// t.Fatal(err)
// }
// if !strings.Contains(string(b), "<div><p>Some <strong>nested</strong> craziness</p></div>") {
// t.Errorf("falied to find target string in output")
// }
// if !strings.Contains(string(b), "bootstrap.min.css") {
// t.Errorf("falied to find target string in output")
// }
// if !strings.Contains(string(b), "Some script here") {
// t.Errorf("falied to find target string in output")
// }
// }
|
package types
const TicketRandomnessLookback = 1
// DioneTask represents the values of task computation
type DioneTask struct {
OriginChain uint8
RequestType string
RequestParams string
Payload []byte
RequestID string
}
|
package ignoreme
import "fmt"
func Hello() {
fmt.Println(0b0001)
}
|
package controllers
import "bitbucket.org/waas_pro/api/middlewares"
func (s *Server) initializeRoutes() {
// Login Route
s.Router.HandleFunc("/login", middlewares.SetMiddlewareJSON(s.Login)).Methods("POST")
//Users routes
s.Router.HandleFunc("/users", middlewares.SetMiddlewareJSON(s.CreateUser)).Methods("POST") // done
s.Router.HandleFunc("/users", middlewares.SetMiddlewareJSON(s.GetUsers)).Methods("GET") // done
s.Router.HandleFunc("/users/{id}", middlewares.SetMiddlewareJSON(s.GetUser)).Methods("GET") // done
s.Router.HandleFunc("/users", middlewares.SetMiddlewareJSON(s.DeleteUsers)).Methods("DELETE") // done
//Wallet routes
s.Router.HandleFunc("/wallet/{id}", middlewares.SetMiddlewareJSON(s.CreateWallet)).Methods("POST") // done
s.Router.HandleFunc("/wallet/{id}", middlewares.SetMiddlewareJSON(s.DeleteWallets)).Methods("DELETE") // done
s.Router.HandleFunc("/block/{id}", middlewares.SetMiddlewareJSON(s.BlockWallets)).Methods("POST") // done
//Transaction routes
s.Router.HandleFunc("/credit/{id}", middlewares.SetMiddlewareJSON(s.Credit)).Methods("POST") // done
s.Router.HandleFunc("/debit/{id}", middlewares.SetMiddlewareJSON(s.Debit)).Methods("POST") // done
}
|
package pkg
import (
"HttpBigFilesServer/MainApplication/internal/files/model"
"HttpBigFilesServer/MainApplication/internal/files/usecase"
"encoding/json"
)
func HandleDownLoadError(err error) int {
if err == usecase.ErrorSizesDoesNotMatch ||
err == usecase.ErrorCreateFile ||
err == usecase.ErrorWriteFile ||
err == usecase.ErrorLoading ||
err == usecase.ErrorCouldNotGenID ||
err == usecase.ErrorCouldNotSaveFileInfo {
return 500
}
return 400
}
func GetOkDownloadResponse(file model.File) []byte {
ans, _ := json.Marshal(file)
return ans
}
|
package ospafLib
import (
"fmt"
)
type Pool struct {
Accounts []Account
}
func InitPool() (pool Pool, err error) {
pool.Accounts, err = LoadAccounts("")
if err != nil {
fmt.Println("Cannot Using pool due to: ", err)
return pool, err
}
for index := 0; index < len(pool.Accounts); index++ {
pool.Accounts[index].Load()
}
return pool, err
}
//Could have different algorithm
func (pool *Pool) PickAccount() int {
maxIndex := -1
maxRemain := 0
for index := 0; index < len(pool.Accounts); index++ {
if pool.Accounts[index].Remains > maxRemain {
maxRemain = pool.Accounts[index].Remains
maxIndex = index
}
}
if maxRemain < 5 {
fmt.Println("The pool is unhealthy!")
maxIndex = -1
}
return maxIndex
}
func (pool *Pool) ReadURL(url string, param map[string]string) (string, int) {
index := pool.PickAccount()
if index == -1 {
return "No avaiable account in the pool", -1
} else {
// fmt.Println("Using ", pool.Accounts[index].User)
}
return pool.Accounts[index].ReadURL(url, param)
}
func (pool *Pool) ReadPage(url string, param map[string]string) (string, int, int, int) {
index := pool.PickAccount()
if index == -1 {
return "No avaiable account in the pool", -1, -1, -1
} else {
// fmt.Println("Using ", pool.Accounts[index].User)
}
return pool.Accounts[index].ReadPage(url, param)
}
|
package helper
import (
"github.com/astaxie/beego"
. "github.com/qiniu/api/conf"
qiuniu_io "github.com/qiniu/api/io"
"github.com/qiniu/api/rs"
"github.com/satori/go.uuid"
"io"
"strings"
)
func uptoken(bucketName string) string {
putPolicy := rs.PutPolicy{
Scope: bucketName,
}
return putPolicy.Token(nil)
}
func init() {
ACCESS_KEY = "fuI-VbB3VrpleFvmJYVwTaan60h9Yu_hWgpaJRgd"
SECRET_KEY = "XdsSJybSynYqQlsuoCoI2sOF5_br-smlB27hfmGH"
}
func UploadFile(data io.Reader, size int64, mime_type string) string {
var err error
var ret qiuniu_io.PutRet
uptoken := uptoken(beego.AppConfig.String("qiniu_bucket"))
extra := &qiuniu_io.PutExtra{}
extra.MimeType = mime_type
var key string = uuid.NewV4().String() + "." + strings.Split(mime_type, "/")[1]
err = qiuniu_io.Put2(nil, &ret, uptoken, key, data, size, extra)
if err != nil {
beego.Error("io.Put2 failed", err)
return ""
}
beego.Info("upload success")
return beego.AppConfig.String("qiniu_cdn_url") + ret.Key
}
|
package main
import (
"fmt"
"testing"
)
func Test_lastStoneWeight(t *testing.T) {
tts := []struct {
input []int
expected int
}{
{[]int{2, 7, 4, 1, 8, 1}, 1},
{[]int{1, 3}, 2},
}
for _, tt := range tts {
tt := tt
t.Run(fmt.Sprintf("input %v", tt.input), func(t *testing.T) {
t.Parallel()
actual := lastStoneWeight(tt.input)
if tt.expected != actual {
t.Errorf("expected: %d <=> actual: %d", tt.expected, actual)
}
})
}
}
|
// Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl_test
import (
"strconv"
"testing"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/ddl/util/callback"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/testkit"
"github.com/stretchr/testify/require"
)
func TestFailBeforeDecodeArgs(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomainWithSchemaLease(t, testLease)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table t1 (c1 int, c2 int);")
tk.MustExec("insert t1 values (1, 2);")
var tableID int64
rs := tk.MustQuery("select TIDB_TABLE_ID from information_schema.tables where table_name='t1' and table_schema='test';")
tableIDi, _ := strconv.Atoi(rs.Rows()[0][0].(string))
tableID = int64(tableIDi)
d := dom.DDL()
tc := &callback.TestDDLCallback{Do: dom}
first := true
stateCnt := 0
tc.OnJobRunBeforeExported = func(job *model.Job) {
// It can be other schema states except failed schema state.
// This schema state can only appear once.
if job.SchemaState == model.StateWriteOnly {
stateCnt++
} else if job.SchemaState == model.StateWriteReorganization {
if first {
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/errorBeforeDecodeArgs", `return(true)`))
first = false
} else {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/errorBeforeDecodeArgs"))
}
}
}
d.SetHook(tc)
defaultValue := int64(3)
jobID := testCreateColumn(tk, t, testkit.NewTestKit(t, store).Session(), tableID, "c3", "", defaultValue, dom)
// Make sure the schema state only appears once.
require.Equal(t, 1, stateCnt)
testCheckJobDone(t, store, jobID, true)
}
|
package mwords
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestEntropyBits(t *testing.T) {
t.Parallel()
// invalid entropy bits
for bits := uint(0); bits < entropyMinBits; bits++ {
if isValidEntropy(bits) {
t.Errorf("validated invalid number of bits %d\n", bits)
}
}
for bits := uint(entropyMaxBits + 1); bits < entropyMaxBits*2; bits++ {
if isValidEntropy(bits) {
t.Errorf("validated invalid number of bits %d\n", bits)
}
}
// valid range
for bits := uint(entropyMinBits); bits < entropyMaxBits+1; bits++ {
if isValidEntropy(bits) && bits%entropyMultiple != 0 {
t.Errorf("validated invalid number of bits %d\n", bits)
}
}
}
func assertRandomWords(t *testing.T, num uint) {
assert.Equal(t, uint(len(RandomWords(num))), num, "invalid word count")
}
func TestRandomWords(t *testing.T) {
for i := uint(0); i < 100; i++ {
assertRandomWords(t, i)
}
}
|
package main
import (
"bufio"
"context"
"fmt"
"log"
"os"
"strings"
v1 "github.com/idirall22/grpc_chat/api/pb"
"google.golang.org/grpc"
)
var id string
var toUserID string
func main() {
cc, err := grpc.Dial(":8080", grpc.WithInsecure())
if err != nil {
log.Fatal(err)
}
defer cc.Close()
client := v1.NewChatServiceClient(cc)
stream, err := client.Chat(context.Background())
if err != nil {
log.Fatal(err)
}
err = stream.Send(&v1.ChatStreamRequest{
Type: &v1.ChatStreamRequest_LoginRequest{
LoginRequest: &v1.LoginRequest{Name: "idir"},
},
})
if err != nil {
log.Fatal(err)
}
m, err := stream.Recv()
if err != nil {
log.Fatal(err)
}
res := (m.Type).(*v1.ChatStreamResponse_LoginResponse)
id = res.LoginResponse.Id
go func() {
for {
m, err := stream.Recv()
if err != nil {
log.Fatal(err)
}
res := (m.Type).(*v1.ChatStreamResponse_MessageResponse)
fmt.Println("-------------------------")
fmt.Println(res.MessageResponse.Message)
}
}()
scanner := bufio.NewScanner(os.Stdin)
// cmd:
// user:id example: user:1
// users
// msg:string ex: msg:hello
for scanner.Scan() {
in := scanner.Text()
cmd := strings.Split(in, ":")
if len(cmd) <= 1 {
if cmd[0] == "users" {
resList, err := client.List(context.Background(), &v1.ListUsersRequest{})
if err != nil {
log.Fatal(err)
}
for _, user := range resList.Users {
fmt.Println(user)
}
continue
}
fmt.Println("Please try again")
continue
}
switch strings.ToLower(cmd[0]) {
case "user":
toUserID = cmd[1]
break
case "msg":
err := stream.Send(&v1.ChatStreamRequest{
Type: &v1.ChatStreamRequest_MessageRequest{
MessageRequest: &v1.MessageRequest{
Message: &v1.MessageChat{
FromUserId: id,
ToUserId: toUserID,
Message: in,
},
},
},
})
if err != nil {
log.Fatal(err)
}
}
}
}
|
package model
/*
https://rpki.apnic.net/repository/apnic-rpki-root-iana-origin.cer
rsync://rpki.apnic.net/repository/apnic-rpki-root-iana-origin.cer
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx9RWSL61YAAYumEiU8z8
qH2ETVIL01ilxZlzIL9JYSORMN5Cmtf8V2JblIealSqgOTGjvSjEsiV73s67zYQI
7C/iSOb96uf3/s86NqbxDiFQGN8qG7RNcdgVuUlAidl8WxvLNI8VhqbAB5uSg/Mr
LeSOvXRja041VptAxIhcGzDMvlAJRwkrYK/Mo8P4E2rSQgwqCgae0ebY1CsJ3Cjf
i67C1nw7oXqJJovvXJ4apGmEv8az23OLC6Ki54Ul/E6xk227BFttqFV3YMtKx42H
cCcDVZZy01n7JjzvO8ccaXmHIgR7utnqhBRNNq5Xc5ZhbkrUsNtiJmrZzVlgU6Ou
0wIDAQAB
TalModel:
TalSyncUrls []talUrl is "https://rpki.apnic.net/repository/apnic-rpki-root-iana-origin.cer" and "rsync://rpki.apnic.net/repository/apnic-rpki-root-iana-origin.cer"
SubjectPublicKeyInfo string is "MIIBIj*****"
TalSyncUrl[0]:
TalUrl is "https://rpki.apnic.net/repository/apnic-rpki-root-iana-origin.cer"
RrdpUrl is RpkiNotify url(https://rrdp.apnic.net/notification.xml), comes from sia in this cer
SupportRrdp is true, when RpkiNotify is exist and get "https" is right
RsyncUrl is same to talUrl
SupportRsync is true, when CaRepository is exist and RsyncUrl start with "rsync:" and rsync is right
*/
type TalModel struct {
TalSyncUrls []TalSyncUrl `json:"talSyncUrls"`
SubjectPublicKeyInfo string `json:"subjectPublicKeyInfo"`
}
type TalSyncUrl struct {
// url saved in tal file
TalUrl string `json:"talUrl"`
// rsync
// is cer url for rsync
RsyncUrl string `json:"rsyncUrl"`
SupportRsync bool `json:"supportRsync"`
// rrdp
// is notify.xml for rrdp
RrdpUrl string `json:"rrdpUrl"`
SupportRrdp bool `json:"supportRrdp"`
Error string `json:"error"`
// saved tmp file
LocalFile string `json:"-"`
}
|
package editor
import (
"github.com/gdamore/tcell"
"github.com/jantb/olive/ds"
"github.com/rivo/tview"
)
type Gutter struct {
*tview.Box
*Editor
warning []ds.Position
error []ds.Position
cursorX, cursorY int
}
// NewView returns a new view view primitive.
func (e *Editor) NewGutter() *Gutter {
e.gutter_width = 2
return &Gutter{
Box: tview.NewBox().SetBorder(false),
Editor: e,
}
}
// Draw draws this primitive onto the screen.
func (l *Gutter) Draw(screen tcell.Screen) {
_, bg, _ := defaultStyle.Decompose()
l.Box.SetBackgroundColor(bg).Draw(screen)
invalidBefore := 0
if l.view.dataView[l.curViewID] != nil {
invalidBefore = l.view.dataView[l.curViewID].LineCache.InvalidBefore()
}
for y, _ := invalidBefore+l.view.offy+1, 0; y <= Min(invalidBefore+l.view.offy+l.view.height, l.view.footer.totalLines); y++ {
//for _, pos := range l.warning {
// if true {
//
// }
//}
//l.drawText(screen, fmt.Sprintf("%"+strconv.Itoa(l.linenums_width-1)+"s", strconv.Itoa(y)), yy, 0)
//yy++
}
}
func (l *Gutter) drawText(screen tcell.Screen, text string, y, offsetX int) {
for x, r := range text {
l.draw(screen, x+offsetX, y, r)
}
}
func (l *Gutter) draw(screen tcell.Screen, x, y int, r rune) {
xr, yr, _, _ := l.Box.GetInnerRect()
screen.SetContent(xr+x, yr+y, r, nil, defaultStyle.Foreground(tcell.ColorLightCyan))
}
|
/*
Chef has an integer sequence A1,A2,…,AN. For each index i (1≤i≤N), Chef needs to divide Ai into two positive integers x and y such that x+y=Ai, then place this as a point (x,y) in the infinite 2-dimensional coordinate plane.
Help Chef to find the maximum number of distinct points that can be put in the plane, if he optimally splits the values Ai.
Note that Chef can only perform one split for each index.
Note: Please use fast input/output methods for this problem.
Input Format
The first line contains a single integer T - the number of test cases. The description of T test cases follows.
The first line of each test case contains a single integer N.
The second line contains N integers A1,A2,…,AN.
Output Format
For each test case, print a single line containing one integer — the maximum number of distinct points there can be in the infinite plane.
Constraints
1≤T≤10
1≤N≤2⋅10^5
2≤Ai≤10^5
*/
package main
func main() {
assert(points([]int{2, 2, 4, 4, 2, 6}) == 4)
assert(points([]int{16, 8}) == 2)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func points(a []int) int {
m := make(map[int]int)
for _, v := range a {
m[v]++
}
r := 0
for k, v := range m {
r += min(k-1, v)
}
return r
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"errors"
"fmt"
"github.com/davecgh/go-spew/spew"
cnstypes "github.com/vmware/govmomi/cns/types"
"github.com/vmware/govmomi/object"
vim25types "github.com/vmware/govmomi/vim25/types"
vsanfstypes "github.com/vmware/govmomi/vsan/vsanfs/types"
"golang.org/x/net/context"
cnsvolume "sigs.k8s.io/vsphere-csi-driver/pkg/common/cns-lib/volume"
"sigs.k8s.io/vsphere-csi-driver/pkg/common/cns-lib/vsphere"
"sigs.k8s.io/vsphere-csi-driver/pkg/common/utils"
"sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/logger"
)
// CreateBlockVolumeUtil is the helper function to create CNS block volume.
func CreateBlockVolumeUtil(ctx context.Context, clusterFlavor cnstypes.CnsClusterFlavor, manager *Manager, spec *CreateVolumeSpec, sharedDatastores []*vsphere.DatastoreInfo) (*cnsvolume.CnsVolumeInfo, error) {
log := logger.GetLogger(ctx)
vc, err := GetVCenter(ctx, manager)
if err != nil {
log.Errorf("failed to get vCenter from Manager, err: %+v", err)
return nil, err
}
if spec.ScParams.StoragePolicyName != "" {
// Get Storage Policy ID from Storage Policy Name
spec.StoragePolicyID, err = vc.GetStoragePolicyIDByName(ctx, spec.ScParams.StoragePolicyName)
if err != nil {
log.Errorf("Error occurred while getting Profile Id from Profile Name: %s, err: %+v", spec.ScParams.StoragePolicyName, err)
return nil, err
}
}
var datastores []vim25types.ManagedObjectReference
if spec.ScParams.DatastoreURL == "" {
// Check if datastore URL is specified by the storage pool parameter
if spec.VsanDirectDatastoreURL != "" {
// Create Datacenter object
var dcList []*vsphere.Datacenter
for _, dc := range vc.Config.DatacenterPaths {
dcList = append(dcList,
&vsphere.Datacenter{
Datacenter: object.NewDatacenter(
vc.Client.Client,
vim25types.ManagedObjectReference{
Type: "Datacenter",
Value: dc,
}),
VirtualCenterHost: vc.Config.Host,
})
}
// Search the datastore from the URL in the datacenter list
var datastoreObj *vsphere.Datastore
for _, datacenter := range dcList {
datastoreObj, err = datacenter.GetDatastoreByURL(ctx, spec.VsanDirectDatastoreURL)
if err != nil {
log.Warnf("Failed to find datastore with URL %q in datacenter %q from VC %q, Error: %+v",
spec.VsanDirectDatastoreURL, datacenter.InventoryPath, vc.Config.Host, err)
continue
}
log.Debugf("Successfully fetched the datastore %v from the URL: %v",
datastoreObj.Reference(), spec.VsanDirectDatastoreURL)
datastores = append(datastores, datastoreObj.Reference())
break
}
if datastores == nil {
errMsg := fmt.Sprintf("DatastoreURL: %s specified in the create volume spec is not found.",
spec.VsanDirectDatastoreURL)
return nil, errors.New(errMsg)
}
} else {
// If DatastoreURL is not specified in StorageClass, get all shared datastores
datastores = getDatastoreMoRefs(sharedDatastores)
}
} else {
// Check datastore specified in the StorageClass should be shared datastore across all nodes.
// vc.GetDatacenters returns datacenters found on the VirtualCenter.
// If no datacenters are mentioned in the VirtualCenterConfig during registration, all
// Datacenters for the given VirtualCenter will be returned, else only the listed
// Datacenters are returned.
datacenters, err := vc.GetDatacenters(ctx)
if err != nil {
log.Errorf("failed to find datacenters from VC: %q, Error: %+v", vc.Config.Host, err)
return nil, err
}
isSharedDatastoreURL := false
var datastoreObj *vsphere.Datastore
for _, datacenter := range datacenters {
datastoreObj, err = datacenter.GetDatastoreByURL(ctx, spec.ScParams.DatastoreURL)
if err != nil {
log.Warnf("failed to find datastore with URL %q in datacenter %q from VC %q, Error: %+v", spec.ScParams.DatastoreURL, datacenter.InventoryPath, vc.Config.Host, err)
continue
}
for _, sharedDatastore := range sharedDatastores {
if sharedDatastore.Info.Url == spec.ScParams.DatastoreURL {
isSharedDatastoreURL = true
break
}
}
if isSharedDatastoreURL {
break
}
}
if datastoreObj == nil {
errMsg := fmt.Sprintf("DatastoreURL: %s specified in the storage class is not found.", spec.ScParams.DatastoreURL)
log.Errorf(errMsg)
return nil, errors.New(errMsg)
}
if isSharedDatastoreURL {
datastores = append(datastores, datastoreObj.Reference())
} else {
errMsg := fmt.Sprintf("Datastore: %s specified in the storage class is not accessible to all nodes.", spec.ScParams.DatastoreURL)
log.Errorf(errMsg)
return nil, errors.New(errMsg)
}
}
var containerClusterArray []cnstypes.CnsContainerCluster
containerCluster := vsphere.GetContainerCluster(manager.CnsConfig.Global.ClusterID, manager.CnsConfig.VirtualCenter[vc.Config.Host].User, clusterFlavor, manager.CnsConfig.Global.ClusterDistribution)
containerClusterArray = append(containerClusterArray, containerCluster)
createSpec := &cnstypes.CnsVolumeCreateSpec{
Name: spec.Name,
VolumeType: spec.VolumeType,
Datastores: datastores,
BackingObjectDetails: &cnstypes.CnsBlockBackingDetails{
CnsBackingObjectDetails: cnstypes.CnsBackingObjectDetails{
CapacityInMb: spec.CapacityMB,
},
},
Metadata: cnstypes.CnsVolumeMetadata{
ContainerCluster: containerCluster,
ContainerClusterArray: containerClusterArray,
},
}
if spec.StoragePolicyID != "" {
profileSpec := &vim25types.VirtualMachineDefinedProfileSpec{
ProfileId: spec.StoragePolicyID,
}
if spec.AffineToHost != "" {
hostVsanUUID, err := getHostVsanUUID(ctx, spec.AffineToHost, vc)
if err != nil {
log.Errorf("failed to get the vSAN UUID for node: %s", spec.AffineToHost)
return nil, err
}
param1 := vim25types.KeyValue{Key: VsanAffinityKey, Value: hostVsanUUID}
param2 := vim25types.KeyValue{Key: VsanAffinityMandatory, Value: "1"}
param3 := vim25types.KeyValue{Key: VsanMigrateForDecom, Value: "1"}
profileSpec.ProfileParams = append(profileSpec.ProfileParams, param1, param2, param3)
}
createSpec.Profile = append(createSpec.Profile, profileSpec)
}
log.Debugf("vSphere CSI driver creating volume %s with create spec %+v", spec.Name, spew.Sdump(createSpec))
volumeInfo, err := manager.VolumeManager.CreateVolume(ctx, createSpec)
if err != nil {
log.Errorf("failed to create disk %s with error %+v", spec.Name, err)
return nil, err
}
return volumeInfo, nil
}
// CreateFileVolumeUtil is the helper function to create CNS file volume with datastores.
func CreateFileVolumeUtil(ctx context.Context, clusterFlavor cnstypes.CnsClusterFlavor,
manager *Manager, spec *CreateVolumeSpec, datastores []*vsphere.DatastoreInfo) (string, error) {
log := logger.GetLogger(ctx)
vc, err := GetVCenter(ctx, manager)
if err != nil {
log.Errorf("failed to get vCenter from Manager, err: %+v", err)
return "", err
}
if spec.ScParams.StoragePolicyName != "" {
// Get Storage Policy ID from Storage Policy Name
spec.StoragePolicyID, err = vc.GetStoragePolicyIDByName(ctx, spec.ScParams.StoragePolicyName)
if err != nil {
log.Errorf("Error occurred while getting Profile Id from Profile Name: %q, err: %+v", spec.ScParams.StoragePolicyName, err)
return "", err
}
}
var datastoreMorefs []vim25types.ManagedObjectReference
if spec.ScParams.DatastoreURL == "" {
datastoreMorefs = getDatastoreMoRefs(datastores)
} else {
// If datastoreUrl is set in storage class, then check if part of input datastores.
// If true, create the file volume on the datastoreUrl set in storage class.
var isFound bool
for _, dsInfo := range datastores {
if spec.ScParams.DatastoreURL == dsInfo.Info.Url {
isFound = true
datastoreMorefs = append(datastoreMorefs, dsInfo.Reference())
break
}
}
if !isFound {
msg := fmt.Sprintf("CSI user doesn't have permission on the datastore: %s specified in storage class",
spec.ScParams.DatastoreURL)
log.Error(msg)
return "", errors.New(msg)
}
}
// Retrieve net permissions from CnsConfig of manager and convert to required format
netPerms := make([]vsanfstypes.VsanFileShareNetPermission, 0)
for _, netPerm := range manager.CnsConfig.NetPermissions {
netPerms = append(netPerms, vsanfstypes.VsanFileShareNetPermission{
Ips: netPerm.Ips,
Permissions: netPerm.Permissions,
AllowRoot: !netPerm.RootSquash,
})
}
var containerClusterArray []cnstypes.CnsContainerCluster
containerCluster := vsphere.GetContainerCluster(manager.CnsConfig.Global.ClusterID, manager.CnsConfig.VirtualCenter[vc.Config.Host].User, clusterFlavor, manager.CnsConfig.Global.ClusterDistribution)
containerClusterArray = append(containerClusterArray, containerCluster)
createSpec := &cnstypes.CnsVolumeCreateSpec{
Name: spec.Name,
VolumeType: spec.VolumeType,
Datastores: datastoreMorefs,
BackingObjectDetails: &cnstypes.CnsVsanFileShareBackingDetails{
CnsFileBackingDetails: cnstypes.CnsFileBackingDetails{
CnsBackingObjectDetails: cnstypes.CnsBackingObjectDetails{
CapacityInMb: spec.CapacityMB,
},
},
},
Metadata: cnstypes.CnsVolumeMetadata{
ContainerCluster: containerCluster,
ContainerClusterArray: containerClusterArray,
},
CreateSpec: &cnstypes.CnsVSANFileCreateSpec{
SoftQuotaInMb: spec.CapacityMB,
Permission: netPerms,
},
}
if spec.StoragePolicyID != "" {
profileSpec := &vim25types.VirtualMachineDefinedProfileSpec{
ProfileId: spec.StoragePolicyID,
}
createSpec.Profile = append(createSpec.Profile, profileSpec)
}
log.Debugf("vSphere CSI driver creating volume %q with create spec %+v", spec.Name, spew.Sdump(createSpec))
volumeInfo, err := manager.VolumeManager.CreateVolume(ctx, createSpec)
if err != nil {
log.Errorf("failed to create file volume %q with error %+v", spec.Name, err)
return "", err
}
return volumeInfo.VolumeID.Id, nil
}
// CreateFileVolumeUtilOld is the helper function to create CNS file volume with datastores from
// TargetvSANFileShareDatastoreURLs in vsphere conf.
func CreateFileVolumeUtilOld(ctx context.Context, clusterFlavor cnstypes.CnsClusterFlavor,
manager *Manager, spec *CreateVolumeSpec) (string, error) {
log := logger.GetLogger(ctx)
vc, err := GetVCenter(ctx, manager)
if err != nil {
log.Errorf("failed to get vCenter from Manager, err: %+v", err)
return "", err
}
if spec.ScParams.StoragePolicyName != "" {
// Get Storage Policy ID from Storage Policy Name
spec.StoragePolicyID, err = vc.GetStoragePolicyIDByName(ctx, spec.ScParams.StoragePolicyName)
if err != nil {
log.Errorf("Error occurred while getting Profile Id from Profile Name: %q, err: %+v", spec.ScParams.StoragePolicyName, err)
return "", err
}
}
var datastores []vim25types.ManagedObjectReference
if spec.ScParams.DatastoreURL == "" {
if len(manager.VcenterConfig.TargetvSANFileShareDatastoreURLs) == 0 {
datacenters, err := vc.ListDatacenters(ctx)
if err != nil {
log.Errorf("failed to find datacenters from VC: %q, Error: %+v", vc.Config.Host, err)
return "", err
}
// get all vSAN datastores from VC
vsanDsURLToInfoMap, err := vc.GetVsanDatastores(ctx, datacenters)
if err != nil {
log.Errorf("failed to get vSAN datastores with error %+v", err)
return "", err
}
var allvsanDatastoreUrls []string
for dsURL := range vsanDsURLToInfoMap {
allvsanDatastoreUrls = append(allvsanDatastoreUrls, dsURL)
}
fsEnabledMap, err := IsFileServiceEnabled(ctx, allvsanDatastoreUrls, vc, datacenters)
if err != nil {
log.Errorf("failed to get if file service is enabled on vsan datastores with error %+v", err)
return "", err
}
for dsURL, dsInfo := range vsanDsURLToInfoMap {
if val, ok := fsEnabledMap[dsURL]; ok {
if val {
datastores = append(datastores, dsInfo.Reference())
}
}
}
if len(datastores) == 0 {
msg := "no file service enabled vsan datastore is present in the environment"
log.Error(msg)
return "", errors.New(msg)
}
} else {
// If DatastoreURL is not specified in StorageClass, get all datastores from TargetvSANFileShareDatastoreURLs
// in vcenter configuration.
for _, TargetvSANFileShareDatastoreURL := range manager.VcenterConfig.TargetvSANFileShareDatastoreURLs {
datastoreMoref, err := getDatastore(ctx, vc, TargetvSANFileShareDatastoreURL)
if err != nil {
log.Errorf("failed to get datastore %s. Error: %+v", TargetvSANFileShareDatastoreURL, err)
return "", err
}
datastores = append(datastores, datastoreMoref)
}
}
} else {
// If datastoreUrl is set in storage class, then check the allowed list is empty.
// If true, create the file volume on the datastoreUrl set in storage class.
if len(manager.VcenterConfig.TargetvSANFileShareDatastoreURLs) == 0 {
datastoreMoref, err := getDatastore(ctx, vc, spec.ScParams.DatastoreURL)
if err != nil {
log.Errorf("failed to get datastore %q. Error: %+v", spec.ScParams.DatastoreURL, err)
return "", err
}
datastores = append(datastores, datastoreMoref)
} else {
// If datastoreUrl is set in storage class, then check if this is in the allowed list.
found := false
for _, targetVSANFSDsURL := range manager.VcenterConfig.TargetvSANFileShareDatastoreURLs {
if spec.ScParams.DatastoreURL == targetVSANFSDsURL {
found = true
break
}
}
if !found {
msg := fmt.Sprintf("Datastore URL %q specified in storage class is not in the allowed list %+v",
spec.ScParams.DatastoreURL, manager.VcenterConfig.TargetvSANFileShareDatastoreURLs)
log.Error(msg)
return "", errors.New(msg)
}
datastoreMoref, err := getDatastore(ctx, vc, spec.ScParams.DatastoreURL)
if err != nil {
log.Errorf("failed to get datastore %q. Error: %+v", spec.ScParams.DatastoreURL, err)
return "", err
}
datastores = append(datastores, datastoreMoref)
}
}
// Retrieve net permissions from CnsConfig of manager and convert to required format
netPerms := make([]vsanfstypes.VsanFileShareNetPermission, 0)
for _, netPerm := range manager.CnsConfig.NetPermissions {
netPerms = append(netPerms, vsanfstypes.VsanFileShareNetPermission{
Ips: netPerm.Ips,
Permissions: netPerm.Permissions,
AllowRoot: !netPerm.RootSquash,
})
}
var containerClusterArray []cnstypes.CnsContainerCluster
containerCluster := vsphere.GetContainerCluster(manager.CnsConfig.Global.ClusterID, manager.CnsConfig.VirtualCenter[vc.Config.Host].User, clusterFlavor, manager.CnsConfig.Global.ClusterDistribution)
containerClusterArray = append(containerClusterArray, containerCluster)
createSpec := &cnstypes.CnsVolumeCreateSpec{
Name: spec.Name,
VolumeType: spec.VolumeType,
Datastores: datastores,
BackingObjectDetails: &cnstypes.CnsVsanFileShareBackingDetails{
CnsFileBackingDetails: cnstypes.CnsFileBackingDetails{
CnsBackingObjectDetails: cnstypes.CnsBackingObjectDetails{
CapacityInMb: spec.CapacityMB,
},
},
},
Metadata: cnstypes.CnsVolumeMetadata{
ContainerCluster: containerCluster,
ContainerClusterArray: containerClusterArray,
},
CreateSpec: &cnstypes.CnsVSANFileCreateSpec{
SoftQuotaInMb: spec.CapacityMB,
Permission: netPerms,
},
}
if spec.StoragePolicyID != "" {
profileSpec := &vim25types.VirtualMachineDefinedProfileSpec{
ProfileId: spec.StoragePolicyID,
}
createSpec.Profile = append(createSpec.Profile, profileSpec)
}
log.Debugf("vSphere CSI driver creating volume %q with create spec %+v", spec.Name, spew.Sdump(createSpec))
volumeInfo, err := manager.VolumeManager.CreateVolume(ctx, createSpec)
if err != nil {
log.Errorf("failed to create file volume %q with error %+v", spec.Name, err)
return "", err
}
return volumeInfo.VolumeID.Id, nil
}
// getHostVsanUUID returns the config.clusterInfo.nodeUuid of the ESX host's HostVsanSystem
func getHostVsanUUID(ctx context.Context, hostMoID string, vc *vsphere.VirtualCenter) (string, error) {
log := logger.GetLogger(ctx)
log.Debugf("getHostVsanUUID for host moid: %v", hostMoID)
// get host vsan UUID from the HostSystem
hostMoRef := vim25types.ManagedObjectReference{Type: "HostSystem", Value: hostMoID}
host := &vsphere.HostSystem{
HostSystem: object.NewHostSystem(vc.Client.Client, hostMoRef),
}
nodeUUID, err := host.GetHostVsanNodeUUID(ctx)
if err != nil {
log.Errorf("Failed getting ESX host %v vsanUuid, err: %v", host, err)
return "", err
}
log.Debugf("Got HostVsanUUID for host %s: %s", host.Reference(), nodeUUID)
return nodeUUID, nil
}
// AttachVolumeUtil is the helper function to attach CNS volume to specified vm
func AttachVolumeUtil(ctx context.Context, manager *Manager,
vm *vsphere.VirtualMachine,
volumeID string, checkNVMeController bool) (string, error) {
log := logger.GetLogger(ctx)
log.Debugf("vSphere CSI driver is attaching volume: %q to vm: %q", volumeID, vm.String())
diskUUID, err := manager.VolumeManager.AttachVolume(ctx, vm, volumeID, checkNVMeController)
if err != nil {
log.Errorf("failed to attach disk %q with VM: %q. err: %+v", volumeID, vm.String(), err)
return "", err
}
log.Debugf("Successfully attached disk %s to VM %v. Disk UUID is %s", volumeID, vm, diskUUID)
return diskUUID, nil
}
// DetachVolumeUtil is the helper function to detach CNS volume from specified vm
func DetachVolumeUtil(ctx context.Context, manager *Manager,
vm *vsphere.VirtualMachine,
volumeID string) error {
log := logger.GetLogger(ctx)
log.Debugf("vSphere CSI driver is detaching volume: %s from node vm: %s", volumeID, vm.InventoryPath)
err := manager.VolumeManager.DetachVolume(ctx, vm, volumeID)
if err != nil {
log.Errorf("failed to detach disk %s with err %+v", volumeID, err)
return err
}
log.Debugf("Successfully detached disk %s from VM %v.", volumeID, vm)
return nil
}
// DeleteVolumeUtil is the helper function to delete CNS volume for given volumeId
func DeleteVolumeUtil(ctx context.Context, volManager cnsvolume.Manager, volumeID string, deleteDisk bool) error {
log := logger.GetLogger(ctx)
var err error
log.Debugf("vSphere CSI driver is deleting volume: %s with deleteDisk flag: %t", volumeID, deleteDisk)
err = volManager.DeleteVolume(ctx, volumeID, deleteDisk)
if err != nil {
log.Errorf("failed to delete disk %s, deleteDisk flag: %t with error %+v", volumeID, deleteDisk, err)
return err
}
log.Debugf("Successfully deleted disk for volumeid: %s, deleteDisk flag: %t", volumeID, deleteDisk)
return nil
}
// ExpandVolumeUtil is the helper function to extend CNS volume for given volumeId
func ExpandVolumeUtil(ctx context.Context, manager *Manager, volumeID string, capacityInMb int64, useAsyncQueryVolume,
isIdempotencyHandlingEnabled bool) error {
var err error
log := logger.GetLogger(ctx)
log.Debugf("vSphere CSI driver expanding volume %q to new size %d Mb.", volumeID, capacityInMb)
if isIdempotencyHandlingEnabled {
// Avoid querying volume when idempotency handling is enabled.
err = manager.VolumeManager.ExpandVolume(ctx, volumeID, capacityInMb)
if err != nil {
log.Errorf("failed to expand volume %q with error %+v", volumeID, err)
return err
}
log.Infof("Successfully expanded volume for volumeid %q to new size %d Mb.", volumeID, capacityInMb)
return nil
} else {
expansionRequired, err := isExpansionRequired(ctx, volumeID, capacityInMb, manager, useAsyncQueryVolume)
if err != nil {
return err
}
if expansionRequired {
log.Infof("Requested size %d Mb is greater than current size for volumeID: %q. Need volume expansion.",
capacityInMb, volumeID)
err = manager.VolumeManager.ExpandVolume(ctx, volumeID, capacityInMb)
if err != nil {
log.Errorf("failed to expand volume %q with error %+v", volumeID, err)
return err
}
log.Infof("Successfully expanded volume for volumeid %q to new size %d Mb.", volumeID, capacityInMb)
} else {
log.Infof("Requested volume size is equal to current size %d Mb. Expansion not required.", capacityInMb)
}
return err
}
}
// QueryVolumeByID is the helper function to query volume by volumeID
func QueryVolumeByID(ctx context.Context, volManager cnsvolume.Manager, volumeID string) (*cnstypes.CnsVolume, error) {
log := logger.GetLogger(ctx)
queryFilter := cnstypes.CnsQueryFilter{
VolumeIds: []cnstypes.CnsVolumeId{{Id: volumeID}},
}
queryResult, err := volManager.QueryVolume(ctx, queryFilter)
if err != nil {
msg := fmt.Sprintf("QueryVolume failed for volumeID: %s with error %+v", volumeID, err)
log.Error(msg)
return nil, err
}
if len(queryResult.Volumes) == 0 {
msg := fmt.Sprintf("volumeID %q not found in QueryVolume", volumeID)
log.Error(msg)
return nil, ErrNotFound
}
return &queryResult.Volumes[0], nil
}
// Helper function to get DatastoreMoRefs
func getDatastoreMoRefs(datastores []*vsphere.DatastoreInfo) []vim25types.ManagedObjectReference {
var datastoreMoRefs []vim25types.ManagedObjectReference
for _, datastore := range datastores {
datastoreMoRefs = append(datastoreMoRefs, datastore.Reference())
}
return datastoreMoRefs
}
// Helper function to get DatastoreMoRef for given datastoreURL in the given virtual center.
func getDatastore(ctx context.Context, vc *vsphere.VirtualCenter, datastoreURL string) (vim25types.ManagedObjectReference, error) {
log := logger.GetLogger(ctx)
datacenters, err := vc.GetDatacenters(ctx)
if err != nil {
return vim25types.ManagedObjectReference{}, err
}
var datastoreObj *vsphere.Datastore
for _, datacenter := range datacenters {
datastoreObj, err = datacenter.GetDatastoreByURL(ctx, datastoreURL)
if err != nil {
log.Warnf("failed to find datastore with URL %q in datacenter %q from VC %q, Error: %+v",
datastoreURL, datacenter.InventoryPath, vc.Config.Host, err)
} else {
return datastoreObj.Reference(), nil
}
}
msg := fmt.Sprintf("Unable to find datastore for datastore URL %s in VC %+v", datastoreURL, vc)
return vim25types.ManagedObjectReference{}, errors.New(msg)
}
// isExpansionRequired verifies if the requested size to expand a volume is greater than the current size
func isExpansionRequired(ctx context.Context, volumeID string, requestedSize int64, manager *Manager, useAsyncQueryVolume bool) (bool, error) {
log := logger.GetLogger(ctx)
volumeIds := []cnstypes.CnsVolumeId{{Id: volumeID}}
queryFilter := cnstypes.CnsQueryFilter{
VolumeIds: volumeIds,
}
// Select only the backing object details.
querySelection := cnstypes.CnsQuerySelection{
Names: []string{
string(cnstypes.QuerySelectionNameTypeBackingObjectDetails),
},
}
// Query only the backing object details.
queryResult, err := utils.QueryAllVolumeUtil(ctx, manager.VolumeManager, queryFilter, querySelection, useAsyncQueryVolume)
if err != nil {
log.Errorf("QueryVolume failed with err=%+v", err.Error())
return false, err
}
var currentSize int64
if len(queryResult.Volumes) > 0 {
currentSize = queryResult.Volumes[0].BackingObjectDetails.GetCnsBackingObjectDetails().CapacityInMb
} else {
msg := fmt.Sprintf("failed to find volume by querying volumeID: %q", volumeID)
log.Error(msg)
return false, err
}
return currentSize < requestedSize, nil
}
|
package s3
import (
"io"
"github.com/fastly/cli/pkg/common"
"github.com/fastly/cli/pkg/compute/manifest"
"github.com/fastly/cli/pkg/config"
"github.com/fastly/cli/pkg/errors"
"github.com/fastly/cli/pkg/text"
"github.com/fastly/go-fastly/fastly"
)
// UpdateCommand calls the Fastly API to update Amazon S3 logging endpoints.
type UpdateCommand struct {
common.Base
manifest manifest.Data
// required
EndpointName string // Can't shaddow common.Base method Name().
Version int
// optional
NewName common.OptionalString
Address common.OptionalString
BucketName common.OptionalString
AccessKey common.OptionalString
SecretKey common.OptionalString
Domain common.OptionalString
Path common.OptionalString
Period common.OptionalUint
GzipLevel common.OptionalUint
Format common.OptionalString
FormatVersion common.OptionalUint
MessageType common.OptionalString
ResponseCondition common.OptionalString
TimestampFormat common.OptionalString
Placement common.OptionalString
PublicKey common.OptionalString
Redundancy common.OptionalString
ServerSideEncryption common.OptionalString
ServerSideEncryptionKMSKeyID common.OptionalString
}
// NewUpdateCommand returns a usable command registered under the parent.
func NewUpdateCommand(parent common.Registerer, globals *config.Data) *UpdateCommand {
var c UpdateCommand
c.Globals = globals
c.manifest.File.Read(manifest.Filename)
c.CmdClause = parent.Command("update", "Update a S3 logging endpoint on a Fastly service version")
c.CmdClause.Flag("service-id", "Service ID").Short('s').StringVar(&c.manifest.Flag.ServiceID)
c.CmdClause.Flag("version", "Number of service version").Required().IntVar(&c.Version)
c.CmdClause.Flag("name", "The name of the S3 logging object").Short('n').Required().StringVar(&c.EndpointName)
c.CmdClause.Flag("new-name", "New name of the S3 logging object").Action(c.NewName.Set).StringVar(&c.NewName.Value)
c.CmdClause.Flag("bucket", "Your S3 bucket name").Action(c.BucketName.Set).StringVar(&c.BucketName.Value)
c.CmdClause.Flag("access-key", "Your S3 account access key").Action(c.AccessKey.Set).StringVar(&c.AccessKey.Value)
c.CmdClause.Flag("secret-key", "Your S3 account secret key").Action(c.SecretKey.Set).StringVar(&c.SecretKey.Value)
c.CmdClause.Flag("domain", "The domain of the S3 endpoint").Action(c.Domain.Set).StringVar(&c.Domain.Value)
c.CmdClause.Flag("path", "The path to upload logs to").Action(c.Path.Set).StringVar(&c.Path.Value)
c.CmdClause.Flag("period", "How frequently log files are finalized so they can be available for reading (in seconds, default 3600)").Action(c.Period.Set).UintVar(&c.Period.Value)
c.CmdClause.Flag("gzip-level", "What level of GZIP encoding to have when dumping logs (default 0, no compression)").Action(c.GzipLevel.Set).UintVar(&c.GzipLevel.Value)
c.CmdClause.Flag("format", "Apache style log formatting").Action(c.Format.Set).StringVar(&c.Format.Value)
c.CmdClause.Flag("format-version", "The version of the custom logging format used for the configured endpoint. Can be either 2 (default) or 1").Action(c.FormatVersion.Set).UintVar(&c.FormatVersion.Value)
c.CmdClause.Flag("message-type", "How the message should be formatted. One of: classic (default), loggly, logplex or blank").Action(c.MessageType.Set).StringVar(&c.MessageType.Value)
c.CmdClause.Flag("response-condition", "The name of an existing condition in the configured endpoint, or leave blank to always execute").Action(c.ResponseCondition.Set).StringVar(&c.ResponseCondition.Value)
c.CmdClause.Flag("timestamp-format", `strftime specified timestamp formatting (default "%Y-%m-%dT%H:%M:%S.000")`).Action(c.TimestampFormat.Set).StringVar(&c.TimestampFormat.Value)
c.CmdClause.Flag("redundancy", "The S3 redundancy level. Can be either standard or reduced_redundancy").Action(c.Redundancy.Set).EnumVar(&c.Redundancy.Value, string(fastly.S3RedundancyStandard), string(fastly.S3RedundancyReduced))
c.CmdClause.Flag("placement", "Where in the generated VCL the logging call should be placed, overriding any format_version default. Can be none or waf_debug").Action(c.Placement.Set).StringVar(&c.Placement.Value)
c.CmdClause.Flag("public-key", "A PGP public key that Fastly will use to encrypt your log files before writing them to disk").Action(c.PublicKey.Set).StringVar(&c.PublicKey.Value)
c.CmdClause.Flag("server-side-encryption", "Set to enable S3 Server Side Encryption. Can be either AES256 or aws:kms").Action(c.ServerSideEncryption.Set).EnumVar(&c.ServerSideEncryption.Value, string(fastly.S3ServerSideEncryptionAES), string(fastly.S3ServerSideEncryptionKMS))
c.CmdClause.Flag("server-side-encryption-kms-key-id", "Server-side KMS Key ID. Must be set if server-side-encryption is set to aws:kms").Action(c.ServerSideEncryptionKMSKeyID.Set).StringVar(&c.ServerSideEncryptionKMSKeyID.Value)
return &c
}
// createInput transforms values parsed from CLI flags into an object to be used by the API client library.
func (c *UpdateCommand) createInput() (*fastly.UpdateS3Input, error) {
serviceID, source := c.manifest.ServiceID()
if source == manifest.SourceUndefined {
return nil, errors.ErrNoServiceID
}
s3, err := c.Globals.Client.GetS3(&fastly.GetS3Input{
Service: serviceID,
Name: c.EndpointName,
Version: c.Version,
})
if err != nil {
return nil, err
}
input := fastly.UpdateS3Input{
Service: s3.ServiceID,
Version: s3.Version,
Name: s3.Name,
NewName: s3.Name,
BucketName: s3.BucketName,
Domain: s3.Domain,
AccessKey: s3.AccessKey,
SecretKey: s3.SecretKey,
Path: s3.Path,
Period: s3.Period,
GzipLevel: s3.GzipLevel,
Format: s3.Format,
FormatVersion: s3.FormatVersion,
ResponseCondition: s3.ResponseCondition,
MessageType: s3.MessageType,
TimestampFormat: s3.TimestampFormat,
Redundancy: s3.Redundancy,
Placement: s3.Placement,
PublicKey: s3.PublicKey,
ServerSideEncryption: s3.ServerSideEncryption,
ServerSideEncryptionKMSKeyID: s3.ServerSideEncryptionKMSKeyID,
}
if c.NewName.Valid {
input.NewName = c.NewName.Value
}
if c.BucketName.Valid {
input.BucketName = c.BucketName.Value
}
if c.AccessKey.Valid {
input.AccessKey = c.AccessKey.Value
}
if c.SecretKey.Valid {
input.SecretKey = c.SecretKey.Value
}
if c.Domain.Valid {
input.Domain = c.Domain.Value
}
if c.Path.Valid {
input.Path = c.Path.Value
}
if c.Period.Valid {
input.Period = c.Period.Value
}
if c.GzipLevel.Valid {
input.GzipLevel = c.GzipLevel.Value
}
if c.Format.Valid {
input.Format = c.Format.Value
}
if c.FormatVersion.Valid {
input.FormatVersion = c.FormatVersion.Value
}
if c.MessageType.Valid {
input.MessageType = c.MessageType.Value
}
if c.ResponseCondition.Valid {
input.ResponseCondition = c.ResponseCondition.Value
}
if c.TimestampFormat.Valid {
input.TimestampFormat = c.TimestampFormat.Value
}
if c.Placement.Valid {
input.Placement = c.Placement.Value
}
if c.PublicKey.Valid {
input.PublicKey = c.PublicKey.Value
}
if c.ServerSideEncryptionKMSKeyID.Valid {
input.ServerSideEncryptionKMSKeyID = c.ServerSideEncryptionKMSKeyID.Value
}
if c.Redundancy.Valid {
switch c.Redundancy.Value {
case string(fastly.S3RedundancyStandard):
input.Redundancy = fastly.S3RedundancyStandard
case string(fastly.S3RedundancyReduced):
input.Redundancy = fastly.S3RedundancyReduced
}
}
if c.ServerSideEncryption.Valid {
switch c.ServerSideEncryption.Value {
case string(fastly.S3ServerSideEncryptionAES):
input.ServerSideEncryption = fastly.S3ServerSideEncryptionAES
case string(fastly.S3ServerSideEncryptionKMS):
input.ServerSideEncryption = fastly.S3ServerSideEncryptionKMS
}
}
return &input, nil
}
// Exec invokes the application logic for the command.
func (c *UpdateCommand) Exec(in io.Reader, out io.Writer) error {
input, err := c.createInput()
if err != nil {
return err
}
s3, err := c.Globals.Client.UpdateS3(input)
if err != nil {
return err
}
text.Success(out, "Updated S3 logging endpoint %s (service %s version %d)", s3.Name, s3.ServiceID, s3.Version)
return nil
}
|
package main
import "net/http"
const sessionCookie = "SESSION"
func setSession(w http.ResponseWriter, value string) {
c := http.Cookie{Name: sessionCookie, Value: value}
http.SetCookie(w, &c)
}
func (s *server) getUser(r *http.Request) string {
var visitingUser string
sess, err := r.Cookie(sessionCookie)
if err == nil && sess.Value != "" {
visitingUser, _ = s.sessions.GetName(sess.Value)
}
return visitingUser
}
func (s *server) checkAuth(h http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
usr := s.getUser(r)
if usr == "" {
renderTemplate(w, r, "login_first.tpl.html", nil)
return
}
h.ServeHTTP(w, r)
}
}
|
package main
import "fmt"
const (
x = 1
y = "this"
z = true
)
type Weekday int
const (
Sunday Weekday = 0
Monday Weekday = 1
Tuesday Weekday = 2
Wednesday Weekday = 3
Thursday Weekday = 4
Friday Weekday = 5
Saturday Weekday = 6
)
func Weekend(day Weekday) bool {
switch day {
case Sunday, Saturday:
return true
default:
return false
}
}
const s string = "constant"
// We can use iota to simulate C’s enum or #define constant.
type Season uint8
const (
Spring = Season(iota)
Summer
Autumn
Winter
)
func main() {
fmt.Println(x, y, z)
fmt.Println(Sunday) // 0
fmt.Println(Saturday) // 6
fmt.Println(Weekend(Saturday)) // true
fmt.Println(Weekend(Tuesday)) // false
fmt.Println("--------------")
fmt.Println(Spring) // 0
fmt.Println(Summer) // 1
fmt.Println(Autumn) // 2
fmt.Println(Winter) // 3
fmt.Println("--------------")
s := Summer
fmt.Println(s) // 1
s = Season(9)
fmt.Println(s) // 9
}
|
package quote
import (
"fmt"
"strings"
"github.com/PuerkitoBio/goquery"
"github.com/mmbros/quote/internal/quotegetter"
)
// TorCheck checks if a Tor connection is used,
// retrieving the "https://check.torproject.org" page.
// It returns:
// - bool: true if Tor is used, false otherwise
// - string: the message contained in the html page
// - error: if the message cannot be determined
func TorCheck(proxy string) (bool, string, error) {
// URL to fetch
var webURL string = "https://check.torproject.org"
client, err := quotegetter.DefaultClient(proxy)
if err != nil {
return false, "", err
}
// Make request
resp, err := client.Get(webURL)
if err != nil {
return false, "", err
}
defer resp.Body.Close()
doc, err := goquery.NewDocumentFromReader(resp.Body)
if err != nil {
return false, "", err
}
msg := strings.TrimSpace(doc.Find("h1").Text())
if msg == "" {
return false, "", fmt.Errorf("can't determine if you are using Tor")
}
return msg == "Congratulations. This browser is configured to use Tor.", msg, nil
}
|
package main
import (
"time"
"go.mongodb.org/mongo-driver/bson/primitive"
)
// Enum types for contest state
const (
OPEN = iota
VOTING
CONCLUDED
)
// User collection in Mongo
type User struct {
Id primitive.ObjectID `bson:"_id"`
Username string `bson:"username"`
Password string `bson:"password"`
}
// Contest collection in Mongo
type Contest struct {
Id primitive.ObjectID `bson:"_id"`
Name string `bson:"name"`
State int `bson:"state"`
Description string `bson:"description"`
OwnerId primitive.ObjectID `bson:"owner_id"`
OwnerName string `bson:"owner_name"`
TimeCreated time.Time `bson:"time_created"`
}
// Contest helper methods
func (c Contest) FormatTime() string {
return c.TimeCreated.Format("Jan 2")
}
func (c Contest) GetStringId() string {
return c.Id.Hex()
}
func (c Contest) GetStateString() string {
if c.State == OPEN {
return "Accepting Submissions"
} else if c.State == VOTING {
return "Voting in Progress"
} else {
return "Voting Concluded"
}
}
func (c Contest) IsOpen() bool {
return c.State == OPEN
}
func (c Contest) IsVoting() bool {
return c.State == VOTING
}
func (c Contest) IsConcluded() bool {
return c.State == CONCLUDED
}
// ContestEntry collection in Mongo
type ContestEntry struct {
Id primitive.ObjectID `bson:"_id"`
ContestID primitive.ObjectID `bson:"contest_id"`
ImagePath string `bson:"path"`
Name string `bson:"title"`
OwnerId primitive.ObjectID `bson:"owner_id"`
OwnerName string `bson:"owner_name"`
}
func (c ContestEntry) GetStringId() string {
return c.Id.Hex()
}
type ContestVote struct {
Id primitive.ObjectID `bson:"_id"`
ContestID primitive.ObjectID `bson:"contest_id"`
EntryID primitive.ObjectID `bson:"entry_id"`
UserID primitive.ObjectID `bson:"user_id"`
}
// Struct to hold data for rendering contest detail view
type ContestDetailData struct {
Contest Contest
ShowSubmitForm bool
ShowVoteForm bool
ShowEndSubmission bool
ShowEndVoting bool
EntryCount int64
Entries []ContestEntry
}
|
package executor
import (
"fmt"
"io"
"os"
"os/exec"
"path"
"syscall"
"strings"
"io/ioutil"
log "github.com/sirupsen/logrus"
"github.com/virtru/cork/server/definition"
"github.com/virtru/cork/server/streamer"
)
func init() {
RegisterHandler("command", CommandStepHandler)
RegisterRunner("command", CommandStepRunnerFactory)
}
func CommandStepRunnerFactory(params StepRunnerParams) (StepRunner, error) {
runner := &CommandStepRunner{}
log.Debugf("Init params : %+v", params)
err := runner.Initialize(params)
if err != nil {
return nil, err
}
return runner, nil
}
// CommandStepHandler - Handles executing a command step
func CommandStepHandler(corkDir string, executor *StepsExecutor, stream streamer.StepStream, step *definition.Step) (map[string]string, error) {
log.Debugf("Running command step %s", step.Name)
args, err := step.Args.ResolveArgs(executor.Renderer)
if err != nil {
log.Debugf("Error resolving arguments: %v", err)
return nil, err
}
log.Debugf("Resolved Args: %+v", *args)
log.Debugf("Loading command: %s", args.Command)
command, err := LoadCommand(corkDir, args.Command)
if err != nil {
log.Debugf("Error loading command %s: %v", args.Command, err)
return nil, err
}
log.Debugf("Executing command: %s", args.Command)
cmd := command.ExecCommand()
stepStreamer := streamer.New(stream)
defer stepStreamer.Close()
for key, value := range args.Params {
upperKey := strings.ToUpper(key)
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", upperKey, value))
}
cmd.Dir = executor.Renderer.WorkDir
outputsDir, err := ioutil.TempDir("", "cork-command-outputs-")
if err != nil {
return nil, err
}
defer os.RemoveAll(outputsDir)
cmd.Env = append(cmd.Env, fmt.Sprintf("CORK_DIR=%s", corkDir))
cmd.Env = append(cmd.Env, fmt.Sprintf("CORK_WORK_DIR=%s", executor.Renderer.WorkDir))
cmd.Env = append(cmd.Env, fmt.Sprintf("CORK_HOST_WORK_DIR=%s", executor.Renderer.HostWorkDir))
cmd.Env = append(cmd.Env, fmt.Sprintf("CACHE_DIR=%s", executor.Renderer.CacheDir))
cmd.Env = append(cmd.Env, fmt.Sprintf("CORK_OUTPUTS_DIR=%s", outputsDir))
log.Debugf("Env for %s: %v", step.Name, cmd.Env)
err = stepStreamer.Run(cmd)
if err != nil {
log.Debugf("Command %s encountered an error: %v", args.Command, err)
return nil, err
}
err = cmd.Wait()
if err != nil {
log.Debugf("Command %s encountered an error: %v", args.Command, err)
return nil, err
}
// Collect any output
return getOutputs(args.Command, outputsDir, step.Outputs)
}
func getOutputs(commandName string, outputsDir string, outputKeys []string) (map[string]string, error) {
outputs := make(map[string]string)
for _, key := range outputKeys {
outputPath := path.Join(outputsDir, key)
_, err := os.Stat(outputPath)
if err != nil {
if os.IsNotExist(err) {
return nil, createCommandInvalidError(
commandName,
fmt.Sprintf("Invalid command '%s'. Expected output value '%s' could not be found", commandName, key),
)
}
return nil, err
}
data, err := ioutil.ReadFile(outputPath)
if err != nil {
return nil, err
}
outputs[key] = string(data)
}
return outputs, nil
}
type StdinPiper struct {
InputBytesChan chan []byte
KillInput chan bool
}
func NewStdinPiper() *StdinPiper {
return &StdinPiper{
InputBytesChan: make(chan []byte),
KillInput: make(chan bool),
}
}
func (s *StdinPiper) Read(p []byte) (n int, err error) {
select {
case newInputBytes := <-s.InputBytesChan:
for i, b := range newInputBytes {
p[i] = b
}
return len(newInputBytes), nil
case <-s.KillInput:
return 0, io.EOF
}
}
func (s *StdinPiper) Kill() {
s.KillInput <- true
}
func (s *StdinPiper) Write(bytes []byte) {
s.InputBytesChan <- bytes
}
type Command struct {
Name string
Path string
}
type CommandDoesNotExist struct {
Name string
Message string
}
type CommandInvalid struct {
Name string
Message string
}
func (si CommandInvalid) Error() string {
return si.Message
}
func (sdne CommandDoesNotExist) Error() string {
return sdne.Message
}
func createCommandDoesNotExistError(name string) CommandDoesNotExist {
return CommandDoesNotExist{
Name: name,
Message: fmt.Sprintf("Command %s does not exist", name),
}
}
func createCommandInvalidError(name string, message string) CommandInvalid {
return CommandInvalid{
Name: name,
Message: message,
}
}
func IsCommandDoesNotExist(err error) bool {
switch err.(type) {
case CommandDoesNotExist:
return true
default:
return false
}
}
func CheckCommandPath(name string, commandPath string, depth int) error {
if depth > 10 {
return createCommandInvalidError(
name,
fmt.Sprintf("Invalid command '%s'. Max symlink depth reached", name),
)
}
stat, err := os.Stat(commandPath)
if err != nil {
if os.IsNotExist(err) {
return createCommandDoesNotExistError(name)
}
return createCommandInvalidError(
name,
fmt.Sprintf("Invalid command '%s'. Got err: %v", name, err),
)
}
mode := stat.Mode()
if mode&os.ModeSymlink != 0 {
linkPath, err := os.Readlink(commandPath)
if err != nil {
return createCommandInvalidError(
name,
fmt.Sprintf("Invalid command '%s'. Symlink encountered error: %v", name, err),
)
}
return CheckCommandPath(name, linkPath, depth+1)
}
if !(mode.IsRegular()) {
return createCommandInvalidError(
name,
fmt.Sprintf("Invalid command '%s'. Command is not executable", name),
)
}
// This file is executable by anyone
if mode&0001 != 0 {
return nil
}
statT := stat.Sys().(*syscall.Stat_t)
uid := statT.Uid
gid := statT.Gid
corkUID := uint32(os.Geteuid())
corkGID := uint32(os.Getegid())
if uid != corkUID && gid != corkGID {
return createCommandInvalidError(
name,
fmt.Sprintf("Invalid command '%s'. Command is not owned by the cork server's uid %d or gid %d", name, corkUID, corkGID),
)
}
if mode&0110 == 0 {
return createCommandInvalidError(
name,
fmt.Sprintf("Invalid command '%s'. Command is not executable", name),
)
}
return nil
}
func LoadCommand(corkDir string, name string) (*Command, error) {
commandPath := path.Join(corkDir, "commands", name)
log.Debugf("Loading command %s from %s", name, commandPath)
err := CheckCommandPath(name, commandPath, 0)
if err != nil {
return nil, err
}
return &Command{
Name: name,
Path: commandPath,
}, nil
}
func (s *Command) ExecCommand() *exec.Cmd {
cmd := exec.Command(s.Path)
cmd.Env = os.Environ()
return cmd
}
type CommandStepRunner struct {
Params StepRunnerParams
Cmd *exec.Cmd
StdinPiper *StdinPiper
StepStreamer *streamer.StepStreamer
}
func (c *CommandStepRunner) Initialize(params StepRunnerParams) error {
c.Params = params
log.Debugf("Loading command: %s", c.Params.Args.Command)
command, err := LoadCommand(c.Params.Context.CorkDir, c.Params.Args.Command)
if err != nil {
return err
}
c.Cmd = command.ExecCommand()
return nil
}
func (c *CommandStepRunner) Run() {
context := c.Params.Context
log.Debugf("Executing command: %s", c.Params.Args.Command)
stepStreamer := streamer.New(c.Params.Stream)
defer stepStreamer.Close()
cmd := c.Cmd
for key, value := range c.Params.Args.Params {
upperKey := strings.ToUpper(key)
cmd.Env = append(cmd.Env, fmt.Sprintf("CORK_PARAM_%s=%s", upperKey, value))
// Support the old style until we get all existing cork servers using this
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", upperKey, value))
}
cmd.Dir = context.WorkDir
cmd.Env = append(cmd.Env, fmt.Sprintf("CORK_DIR=%s", context.CorkDir))
cmd.Env = append(cmd.Env, fmt.Sprintf("CORK_WORK_DIR=%s", context.WorkDir))
cmd.Env = append(cmd.Env, fmt.Sprintf("CORK_HOST_WORK_DIR=%s", context.HostWorkDir))
cmd.Env = append(cmd.Env, fmt.Sprintf("CACHE_DIR=%s", context.CacheDir))
cmd.Env = append(cmd.Env, fmt.Sprintf("CORK_OUTPUTS_DIR=%s", context.OutputsDir))
log.Debugf("Env for command %s: %v", c.Params.Args.Command, cmd.Env)
stdinPiper := NewStdinPiper()
c.StdinPiper = stdinPiper
c.StepStreamer = stepStreamer
cmd.Stdin = stdinPiper
err := stepStreamer.Run(cmd)
if err != nil {
log.Debugf("Command %s encountered an error")
c.Params.ErrorChan <- err
c.Params.DoneChan <- true
return
}
err = cmd.Wait()
if err != nil {
c.Params.ErrorChan <- err
c.Params.DoneChan <- true
return
}
c.Params.DoneChan <- true
return
}
func (c *CommandStepRunner) HandleInput(bytes []byte) error {
err := c.StepStreamer.Write(bytes)
if err != nil {
return err
}
return nil
}
func (c *CommandStepRunner) HandleSignal(signal int32) error {
return nil
}
|
package main
import (
"fmt"
"time"
)
func main() {
fmt.Println("Alpine ice climbing is the best sport!")
time.Sleep(time.Second * 1000)
}
|
package c31_hmac_sha1_timing_leak
import (
"bytes"
"github.com/vodafon/cryptopals/set1/c2_fixed_xor"
"github.com/vodafon/cryptopals/set4/c28_sha1_key_mac"
)
const (
blockSize = 64
outputSize = 20
)
type HMACSystem struct {
key []byte
hash *c28_sha1_key_mac.SHA1
}
func NewHMACSystem(key []byte) HMACSystem {
return HMACSystem{
key: key,
hash: &c28_sha1_key_mac.SHA1{},
}
}
func (obj HMACSystem) HMAC(message []byte) []byte {
key := []byte{}
switch {
case len(obj.key) > blockSize:
key = pad(obj.newHash(obj.key), blockSize)
case len(obj.key) < blockSize:
key = pad(obj.key, blockSize)
default:
key = obj.key
}
oKeyPad := c2_fixed_xor.SafeXORBytes(key, bytes.Repeat([]byte{0x5c}, blockSize))
iKeyPad := c2_fixed_xor.SafeXORBytes(key, bytes.Repeat([]byte{0x36}, blockSize))
iHash := obj.newHash(append(iKeyPad, message...))
return obj.newHash(append(oKeyPad, iHash...))
}
func pad(src []byte, size int) []byte {
return append(src, make([]byte, size-len(src))...)
}
func (obj HMACSystem) newHash(src []byte) []byte {
obj.hash.Reset()
obj.hash.Write(src)
mac := obj.hash.CheckSum()
return mac[:]
}
|
/**
* Copyright (c) 2018 ZTE Corporation.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* and the Apache License 2.0 which both accompany this distribution,
* and are available at http://www.eclipse.org/legal/epl-v10.html
* and http://www.apache.org/licenses/LICENSE-2.0
*
* Contributors:
* ZTE - initial Project
*/
package msb
import (
"encoding/json"
"io/ioutil"
"msb2pilot/log"
"msb2pilot/models"
"net/http"
"os"
)
var (
msbAddr = "http://localhost:9081"
)
func getBaseUrl() string {
baseUrl := os.Getenv(models.EnvMsbAddress)
if baseUrl == "" {
baseUrl = msbAddr
}
return baseUrl
}
func GetAllPublishServices() []*models.PublishService {
url := getBaseUrl() + "/api/msdiscover/v1/publishservicelist"
res, err := http.Get(url)
if err != nil {
log.Log.Error("fail to get public address", url, err)
return nil
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
log.Log.Error("fail to read response", err)
return nil
}
result := make([]*models.PublishService, 0)
err = json.Unmarshal(b, &result)
if err != nil {
log.Log.Error("fail to unmarshal publish address", err)
return nil
}
return result
}
|
package test
import (
"encoding/json"
"fmt"
"github.com/davecgh/go-spew/spew"
"log"
"testing"
)
type Abc struct {
A string `json:"a,omitempty"`
B string `json:"b,omitempty"`
C string `json:"c,omitempty"`
}
func Test_aa(t *testing.T) {
k := int32(8008888)
fmt.Println(k)
}
func Test_map_r(t *testing.T) {
fmt.Println(ret_map())
}
func ret_map() (m map[string]string) {
m = make(map[string]string)
m["a"] = "c"
m["b"] = "b_v"
m["c"] = "c_v"
for k := range m {
fmt.Println(k)
}
return
}
func Test_abc_marshal(t *testing.T) {
bean := Abc{
A: "1",
B: "2",
C: "3",
}
s, err := json.Marshal(bean)
if err != nil {
log.Println("----------", err, "------------")
return
}
log.Println("----------", string(s), "------------")
}
func Test_test_map_unmarshal(t *testing.T) {
m := map[string]interface{}{"a": "1", "b": "2", "c": "3"}
//
s, err := json.Marshal(m)
if err != nil {
fmt.Println("x_test.go->", err)
return
}
//
fmt.Println("-----")
fmt.Println(string(s))
//-------- -----------------------------
m1 := &Abc{}
//
err = json.Unmarshal(s, &m1)
if err != nil {
log.Println("----------", "------------")
fmt.Println(err)
return
}
log.Println("----------", "unmarshal", "------------")
spew.Dump(m1)
}
func Test_map_unmarshal_2(t *testing.T) {
m := map[string]interface{}{"a": "1", "b": "2", "c": "3"}
//
s, err := json.Marshal(m)
if err != nil {
fmt.Println("x_test.go->", err)
return
}
//
fmt.Println("-----")
fmt.Println(string(s))
//-------- -----------------------------
m1 := new(map[string]interface{})
//
err = json.Unmarshal(s, &m1)
if err != nil {
log.Println("----------", "------------")
fmt.Println(err)
return
}
log.Println("----------", "unmarshal", "------------")
spew.Dump(m1)
}
|
package web
import (
"encoding/json"
"net/http"
"strings"
"github.com/cybozu-go/sabakan/v2"
)
func (s Server) handleLabels(w http.ResponseWriter, r *http.Request) {
args := strings.SplitN(r.URL.Path[len("/api/v1/labels/"):], "/", 2)
if len(args) == 0 || len(args[0]) == 0 {
renderError(r.Context(), w, APIErrBadRequest)
return
}
switch r.Method {
case "PUT":
s.handleLabelsPut(w, r, args[0])
return
case "DELETE":
if len(args) != 2 {
renderError(r.Context(), w, APIErrBadRequest)
return
}
s.handleLabelsDelete(w, r, args[0], args[1])
return
}
}
func (s Server) handleLabelsPut(w http.ResponseWriter, r *http.Request, serial string) {
var labels map[string]string
err := json.NewDecoder(r.Body).Decode(&labels)
if err != nil {
renderError(r.Context(), w, APIErrBadRequest)
return
}
err = s.Model.Machine.AddLabels(r.Context(), serial, labels)
if err == sabakan.ErrNotFound {
renderError(r.Context(), w, APIErrNotFound)
return
}
if err != nil {
renderError(r.Context(), w, InternalServerError(err))
}
}
func (s Server) handleLabelsDelete(w http.ResponseWriter, r *http.Request, serial, label string) {
err := s.Model.Machine.DeleteLabel(r.Context(), serial, label)
if err == sabakan.ErrNotFound {
renderError(r.Context(), w, APIErrNotFound)
return
}
if err != nil {
renderError(r.Context(), w, InternalServerError(err))
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.