text stringlengths 11 4.05M |
|---|
package server
type Storage interface {
NewRoutes()
StartAPI()
} |
package discover
import (
"encoding/json"
"io/ioutil"
"strings"
"github.com/k8guard/k8guard-discover/messaging"
"github.com/k8guard/k8guard-discover/metrics"
"github.com/k8guard/k8guard-discover/rules"
lib "github.com/k8guard/k8guardlibs"
"github.com/k8guard/k8guardlibs/messaging/types"
"github.com/k8guard/k8guardlibs/violations"
"github.com/prometheus/client_golang/prometheus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/pkg/apis/apps/v1beta1"
)
func GetAllDeployFromApi() []v1beta1.Deployment {
deploys, err := Clientset.AppsV1beta1().Deployments(lib.Cfg.Namespace).List(metav1.ListOptions{})
if err != nil {
lib.Log.Error("error: ", err)
panic(err.Error())
}
if lib.Cfg.OutputPodsToFile == true {
r, _ := json.Marshal(deploys.Items)
err = ioutil.WriteFile("deployments.txt", r, 0644)
if err != nil {
lib.Log.Error("error:", err)
panic(err)
}
}
metrics.Update(metrics.ALL_DEPLOYMENT_COUNT, len(deploys.Items))
return deploys.Items
}
func GetBadDeploys(theDeploys []v1beta1.Deployment, sendToBroker bool) []lib.Deployment {
timer := prometheus.NewTimer(prometheus.ObserverFunc(metrics.FNGetBadDeploys.Set))
defer timer.ObserveDuration()
allBadDeploys := []lib.Deployment{}
cacheAllImages(true)
allBadDeploys = append(allBadDeploys, verifyRequiredDeployments(theDeploys, sendToBroker)...)
for _, kd := range theDeploys {
if isIgnoredNamespace(kd.Namespace) == true || isIgnoredDeployment(kd.ObjectMeta.Name) == true {
continue
}
if kd.Status.Replicas == 0 {
continue
}
d := lib.Deployment{}
d.Name = kd.Name
d.Cluster = lib.Cfg.ClusterName
d.Namespace = kd.Namespace
getVolumesWithHostPathForAPod(kd.Name, kd.Spec.Template.Spec, &d.ViolatableEntity)
verifyRequiredAnnotations(kd.ObjectMeta.Annotations, &d.ViolatableEntity, "deployment", violations.REQUIRED_DEPLOYMENT_ANNOTATIONS_TYPE)
verifyRequiredLabels(kd.ObjectMeta.Labels, &d.ViolatableEntity, "deployment", violations.REQUIRED_DEPLOYMENT_LABELS_TYPE)
verifyRequiredAnnotations(kd.Spec.Template.ObjectMeta.Annotations, &d.ViolatableEntity, "pod", violations.REQUIRED_POD_ANNOTATIONS_TYPE)
verifyRequiredLabels(kd.Spec.Template.ObjectMeta.Labels, &d.ViolatableEntity, "pod", violations.REQUIRED_POD_LABELS_TYPE)
GetBadContainers(kd.Namespace, "deployment", kd.Spec.Template.Spec, &d.ViolatableEntity)
if isValidReplicaSize(kd) == false && rules.IsNotIgnoredViolation(kd.Namespace, "deployment", kd.Name, violations.SINGLE_REPLICA_TYPE) {
d.Violations = append(d.Violations, violations.Violation{Source: kd.Name, Type: violations.SINGLE_REPLICA_TYPE})
}
if len(d.ViolatableEntity.Violations) > 0 {
allBadDeploys = append(allBadDeploys, d)
if sendToBroker {
messaging.SendData(types.DEPLOYMENT_MESSAGE, d.Name, d)
}
}
}
metrics.Update(metrics.BAD_DEPLOYMENT_COUNT, len(allBadDeploys))
return allBadDeploys
}
func isValidReplicaSize(deployment v1beta1.Deployment) bool {
if *deployment.Spec.Replicas == 1 {
return false
}
return true
}
func isIgnoredDeployment(deploymentName string) bool {
for _, d := range lib.Cfg.IgnoredDeployments {
if strings.HasPrefix(deploymentName, d) == true {
return true
}
}
return false
}
func verifyRequiredDeployments(theDeployments []v1beta1.Deployment, sendToBroker bool) []lib.Deployment {
entityType := "deployment"
badDeployments := []lib.Deployment{}
for _, ns := range GetAllNamespaceFromApi() {
if rules.IsNotIgnoredViolation(ns.Name, entityType, "*", violations.REQUIRED_ENTITIES_TYPE) {
for _, a := range lib.Cfg.RequiredEntities {
rule := strings.Split(a, ":")
// does the rule apply to this namespace and entity type?
if !(rules.Exact(ns.Name, rule[0]) && rules.Exact(entityType, rule[1])) {
continue
}
found := false
for _, kd := range theDeployments {
if rules.Exact(kd.ObjectMeta.Namespace, rule[0]) && rules.Exact(kd.ObjectMeta.Name, rule[2]) {
found = true
break
}
}
if !found {
d := lib.Deployment{}
d.Name = rule[2]
d.Cluster = lib.Cfg.ClusterName
d.Namespace = ns.Name
d.ViolatableEntity.Violations = append(d.ViolatableEntity.Violations, violations.Violation{Source: rule[2], Type: violations.REQUIRED_DEPLOYMENTS_TYPE})
badDeployments = append(badDeployments, d)
if sendToBroker {
messaging.SendData(types.DEPLOYMENT_MESSAGE, d.Name, d)
}
}
}
}
}
return badDeployments
}
|
// Copyright (c) 2018 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wallet
import (
"os"
"testing"
"time"
"github.com/btcsuite/btcd/chaincfg"
_ "github.com/btcsuite/btcwallet/walletdb/bdb"
)
// TestCreateWatchingOnly checks that we can construct a watching-only
// wallet.
func TestCreateWatchingOnly(t *testing.T) {
// Set up a wallet.
dir, err := os.MkdirTemp("", "watchingonly_test")
if err != nil {
t.Fatalf("Failed to create db dir: %v", err)
}
defer os.RemoveAll(dir)
pubPass := []byte("hello")
loader := NewLoader(
&chaincfg.TestNet3Params, dir, true, defaultDBTimeout, 250,
WithWalletSyncRetryInterval(10*time.Millisecond),
)
_, err = loader.CreateNewWatchingOnlyWallet(pubPass, time.Now())
if err != nil {
t.Fatalf("unable to create wallet: %v", err)
}
}
|
package ehttp
import (
"github.com/enjoy-web/ehttp/swagger"
)
// Response of the api
// Fields
// Description -- Description of the response model
// Model -- The Response Model (nil, struct, or []string )
// Headers -- The Response info in the HTTP header
type Response struct {
Description string
Model interface{}
Headers map[string]ValueInfo
}
// ToSwaggerResponse to *swagger.Response
func (r Response) ToSwaggerResponse() (*swagger.Response, error) {
resp := &swagger.Response{Description: r.Description}
if r.hasModel() {
schema, err := getSwaggerSchemaFromObj(r.Model)
if err != nil {
return nil, err
}
resp.Schema = schema
}
if r.hasHeaders() {
resp.Headers = make(map[string]*swagger.Header, 0)
for name, valueInfo := range r.Headers {
header, err := valueInfo.toSwaggerHeader()
if err != nil {
return nil, err
}
resp.Headers[name] = header
}
}
return resp, nil
}
func (r Response) hasModel() bool {
return r.Model != nil
}
func (r Response) hasHeaders() bool {
return r.Headers != nil
}
|
package ctree
import (
"bytes"
"fmt"
"reflect"
"testing"
)
func TestSize(t *testing.T) {
r := &mys{"root"}
e := &mys{"="}
p := &mys{"+"}
o := &mys{"1"}
two := &mys{"2"}
tests := []struct {
name string
input Tree
expected int
}{
{"simple", BuildTree("", r).Add(e).Down().Add(p).Add(o).Add(two).Up().Build(), 5},
{"simple", BuildTree("", r).Add(e).Down().Add(p).Down().Add(o).Down().Add(two).Up().Build(), 5},
{"simple", BuildTree("", r).Add(e).Add(&mys{"a"}).Build(), 3},
{"simple", BuildTree("", r).Build(), 1},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.input.Size() != tt.expected {
t.Errorf(`
Expected %d
Received %d
`, tt.expected, tt.input.Size())
}
})
}
}
func TestPathAsPos(t *testing.T) {
r := &mys{"root"}
e := &mys{"="}
p := &mys{"+"}
o := &mys{"1"}
two := &mys{"2"}
tests := []struct {
name string
input Tree
node INode
expected []int
}{
{"0-0", BuildTree("", r).Add(e).Down().Add(p).Add(o).Add(two).Up().Build(), e, []int{0}},
{"1000", BuildTree("", r).Add(e).Down().Add(p).Down().Add(o).Add(two).Up().Build(), two, []int{0, 0, 1}},
{"1000", BuildTree("", r).Add(e).Down().Add(p).Add(o).Add(two).Up().Build(), two, []int{0, 2}},
{"0#00", BuildTree("", r).Add(e).Add(&mys{"a"}).Build(), r, []int{}},
{"0#01", BuildTree("", r).Build(), r, []int{}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if !reflect.DeepEqual(tt.input.PathAsPosition(tt.node), tt.expected) {
t.Errorf(`
Expected %v
Received %v
`, tt.expected, tt.input.PathAsPosition(tt.node))
}
})
}
}
func TestCommonAncestor(t *testing.T) {
r := &mys{"root"}
a := &mys{"a"}
a2 := &mys{"a2"}
b := &mys{"b"}
c := &mys{"c"}
d := &mys{"d"}
e := &mys{"e"}
f := &mys{"f"}
tree := BuildTree("", r).
Add(a).Down().
/**/ Add(b).Down().
/* */ Add(c).Add(d).
/**/ Up().
/**/ Add(e).Add(f).
Up().
Add(a2).
Build()
tests := []struct {
name string
n1, n2 INode
expected INode
}{
{"0#00", r, r, r},
{"0#01", c, d, b},
{"0#02", f, d, a},
{"0#03", a2, e, r},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tree.CommonAncestor(tt.n1, tt.n2) != tt.expected {
t.Errorf(`
Expected %v
Received %v
`, tt.expected, tree.CommonAncestor(tt.n1, tt.n2))
}
})
}
}
func TestIterator(t *testing.T) {
r := &mys{"root"}
e := &mys{"="}
p := &mys{"+"}
o := &mys{"1"}
two := &mys{"2"}
tests := []struct {
name string
input *ctree
expected string
}{
{"simple", BuildTree("", r).Add(e).Down().Add(p).Add(o).Add(two).Up().Build().(*ctree), " (= (+ 1 2)) <EOF>"},
{"simple", BuildTree("", r).Add(e).Add(&mys{"a"}).Build().(*ctree), " (= a) <EOF>"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf := bytes.Buffer{}
iter := NewPreOrderTreeIterator(tt.input, e)
var start bool
for iter.HasNext() {
// fmt.Printf("1.%v\n", iter.stack)
e := iter.Next()
if _, ok := e.(*Down); ok {
start = true
buf.WriteString(fmt.Sprintf(" %v", e))
} else if _, ok := e.(*Up); ok {
buf.WriteString(fmt.Sprintf("%v", e))
} else if start {
buf.WriteString(fmt.Sprintf("%v", e))
start = false
} else {
buf.WriteString(fmt.Sprintf(" %v", e))
}
}
rec := buf.String()
if tt.expected != rec {
t.Errorf(`
Expected %s
Received '%s'
`, tt.expected, rec)
}
})
}
}
func TestSExpr02(t *testing.T) {
r := &mys{"root"}
e := &mys{"="}
p := &mys{"+"}
o := &mys{"1"}
two := &mys{"2"}
tests := []struct {
name string
input *ctree
expected string
expected2 string
}{
{"simple",
BuildTree("", r).Add(e).Down().Add(&mys{"a"}).
Add(&mys{"b"}).
Add(&mys{"c"}).
Add(&mys{"d"}).
Add(p).Add(o).Add(two).Up().Build().(*ctree),
" (root (= (a b c d + 1 2)))",
" (+ 1 2)",
},
{"simple",
BuildTree("", r).Add(e).
Down().Add(&mys{"a"}).
Down().Add(&mys{"b"}).
Down().Add(&mys{"c"}).
Down().Add(&mys{"d"}).
Down().Add(p).
Down().Add(o).
Down().Add(two).
Build().(*ctree),
" (root (= (a (b (c (d (+ (1 (2)))))))))",
" (+ (1 (2)))",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.expected != tt.input.SExpr(r) {
t.Errorf(`
Expected %s
Received '%s'
`, tt.expected, tt.input.SExpr(r))
}
if tt.expected2 != tt.input.SExpr(p) {
t.Errorf(`
2.
Expected %s
Received '%s'
`, tt.expected2, tt.input.SExpr(p))
}
})
}
}
//' = ( + 1 2 )'
|
package gorasp
import (
"fmt"
)
func printAThing(v RankSelect) {
val := v.RankOfIndex(2)
fmt.Println(val)
}
func main() {
fmt.Println("Hello, rasp!")
val := NewRankSelectSimple([]int{0, 0, 1, 0, 1, 1, 0})
printAThing(val)
fmt.Println(val)
fmt.Println(val.RankOfIndex(1))
}
|
package main
import (
"fmt"
"github.com/bndr/gopencils"
)
type Repo struct {
*Project
Name string
Resource *gopencils.Resource
}
func (repo *Repo) GetPullRequest(id int64) PullRequest {
return PullRequest{
Repo: repo,
Id: id,
Resource: repo.Resource.Res("pull-requests").Id(fmt.Sprint(id)),
}
}
func (repo *Repo) ListPullRequest(state string) ([]PullRequest, error) {
reply := struct {
Size int
Limit int
IsLastPage bool
Values []PullRequest
}{}
query := map[string]string{
"state": state,
}
err := repo.DoGet(repo.Resource.Res("pull-requests", &reply), query)
if err != nil {
return nil, err
}
return reply.Values, nil
}
|
package main
import "sort"
func merge(intervals [][]int) [][]int {
if len(intervals) == 0 {
return [][]int{}
}
// 进行排序,让区间开端小的排在前面,如果相等,则让尾端小的排在前面
sort.Slice(intervals, func(i, j int) bool {
if intervals[i][0] == intervals[j][0] {
return intervals[i][1] < intervals[j][1]
}
return intervals[i][0] < intervals[j][0]
})
// 执行区间合并
intervalSequence := make([][]int, 0)
first, last := intervals[0][0], intervals[0][1]
for i := 0; i < len(intervals); i++ {
// 这里控制 [1, 3] [3, 5] 是否合并为[1, 5]
// > : 不合并
// >= : 合并
if last >= intervals[i][0] {
last = max(last, intervals[i][1])
} else {
intervalSequence = append(intervalSequence, []int{first, last})
first, last = intervals[i][0], intervals[i][1]
}
}
intervalSequence = append(intervalSequence, []int{first, last})
return intervalSequence
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
/*
题目链接:
https://leetcode-cn.com/problems/merge-intervals/submissions/ 合并区间
*/
/*
总结
1. 这题就是给一堆区间给你,让你合并为一些没有重叠部分的区间。
2. 注意: 这题目对于区间 [1, 3] [3, 5] 是需要合并的,合并为[1, 5]
*/
|
package cmd
import (
"fmt"
"github.com/spf13/cobra"
"github.com/Zenika/marcel/version"
)
func init() {
Marcel.AddCommand(&cobra.Command{
Use: "version",
Short: "Displays version information",
Args: cobra.NoArgs,
Run: func(_ *cobra.Command, _ []string) {
fmt.Printf("%s rev: %s\n", version.Version(), version.Revision())
},
})
}
|
package client
import (
"encoding/json"
"errors"
"net/http"
"strings"
log "github.com/sirupsen/logrus"
)
const checkoutsSuffix = "/v1/checkouts"
//CreateCheckout creates a new checkout on Satispay Platform
func (client *Client) CreateCheckout(checkoutRequest *CheckoutRequest, idempotencyKey string) (checkout Checkout, err error) {
log.Debugf("Request is %s", checkoutRequest.String())
request, err := http.NewRequest("POST", client.endpoint+checkoutsSuffix, strings.NewReader(checkoutRequest.String()))
if err != nil {
log.Errorf("Got error creating http request %v", err)
return checkout, err
}
response, err := client.do(request, idempotencyKey)
if err != nil {
log.Errorf("Got error performing http request %v", err)
return checkout, err
}
dec := json.NewDecoder(response.Body)
if response.StatusCode != 200 {
satisErr := SatispayError{}
err = dec.Decode(&satisErr)
if err != nil {
log.Errorf("Error decoding satispay error %v", err)
return checkout, err
}
log.Debugf("Satispay Error is %s", satisErr.String())
return checkout, errors.New(satisErr.Message)
}
err = dec.Decode(&checkout)
if err != nil {
log.Errorf("Error deconding checkout %v", err)
}
return
}
|
package main
import (
"database/sql"
"fmt"
_ "github.com/go-sql-driver/mysql"
)
var Db *sql.DB
type User struct {
Id int `db:"id"`
Name sql.NullString `db:"name"`
}
func queryRowTest() {
sql := "select id, name from test_data where id=?"
row := Db.QueryRow(sql, 1)
var user User
err := row.Scan(&user.Id, &user.Name) // row.Scan() 后,才会释放连接。所以必须Scan!
if err != nil {
fmt.Println("scan row failed, err:", err)
return
}
fmt.Printf("user: %v \n", user)
}
func queryAllTest() {
sql := "select id, name from test_data"
rows, err := Db.Query(sql)
if err != nil {
fmt.Println("query all failed, err:", err)
return
}
defer func() {
if rows != nil {
rows.Close()
}
}()
for rows.Next() {
var user User
err := rows.Scan(&user.Id, &user.Name) // row.Scan() 后,才会释放连接。所以必须Scan!
if err != nil {
fmt.Println("scan row failed, err:", err)
return
}
fmt.Printf("user: %v \n", user)
}
}
func insertTest() {
sql := "insert test_data(id, name) values(?, ?)"
res, err := Db.Exec(sql, 2, "lisi")
if err != nil {
fmt.Println("insert data failed, err:", err)
return
}
id, err := res.LastInsertId()
count, err := res.RowsAffected()
fmt.Printf("插入的行数: %d,插入id:%d \n", count, id)
}
func updateTest() {
sql := "update test_data set name=? where id=?"
res, err := Db.Exec(sql, "lisi02", 2)
if err != nil {
fmt.Println("insert data failed, err:", err)
return
}
id, err := res.LastInsertId()
count, err := res.RowsAffected()
fmt.Printf("插入的行数: %d,插入id:%d \n", count, id)
}
func deleteTest() {
sql := "delete from test_data where id=?"
res, err := Db.Exec(sql, 2)
if err != nil {
fmt.Println("insert data failed, err:", err)
return
}
id, err := res.LastInsertId()
count, err := res.RowsAffected()
fmt.Printf("插入的行数: %d,插入id:%d \n", count, id)
}
func main() {
err := initDb()
if err != nil {
fmt.Println("init db failed, err:", err)
return
}
// queryRowTest()
// insertTest()
// updateTest()
deleteTest()
queryAllTest()
}
func initDb() error {
var err error
dsn := "root:123@tcp(localhost:3306)/caipiao_admin"
Db, err = sql.Open("mysql", dsn)
if err != nil {
fmt.Println("open mysql failed, err:", err)
return err
}
Db.SetMaxOpenConns(100) // 打开最大连接数
Db.SetMaxIdleConns(16) // 空闲连接数
return nil
}
|
package service
import (
"tesou.io/platform/brush-parent/brush-api/common/base"
"tesou.io/platform/brush-parent/brush-api/module/match/pojo"
"tesou.io/platform/brush-parent/brush-core/common/base/service/mysql"
)
type MatchHisService struct {
mysql.BaseService
}
func (this *MatchHisService) Exist(v *pojo.MatchHis) bool {
has, err := mysql.GetEngine().Table("`t_match_his`").Where(" `Id` = ? ", v.Id).Exist()
if err != nil {
base.Log.Error("Exist", err)
}
return has
}
func (this *MatchHisService) FindAll() []*pojo.MatchHis {
dataList := make([]*pojo.MatchHis, 0)
mysql.GetEngine().OrderBy("MatchDate").Find(&dataList)
return dataList
}
func (this *MatchHisService) FindById(matchId string) *pojo.MatchHis {
data := new(pojo.MatchHis)
data.Id = matchId
_, err := mysql.GetEngine().Get(data)
if err != nil {
base.Log.Error("FindById:", err)
}
return data
}
|
package apiclient
import (
"context"
"google.golang.org/grpc"
workflowtemplatepkg "github.com/argoproj/argo/pkg/apiclient/workflowtemplate"
"github.com/argoproj/argo/pkg/apis/workflow/v1alpha1"
grpcutil "github.com/argoproj/argo/util/grpc"
)
type errorTranslatingWorkflowTemplateServiceClient struct {
delegate workflowtemplatepkg.WorkflowTemplateServiceClient
}
var _ workflowtemplatepkg.WorkflowTemplateServiceClient = &errorTranslatingWorkflowTemplateServiceClient{}
func (a *errorTranslatingWorkflowTemplateServiceClient) CreateWorkflowTemplate(ctx context.Context, req *workflowtemplatepkg.WorkflowTemplateCreateRequest, _ ...grpc.CallOption) (*v1alpha1.WorkflowTemplate, error) {
template, err := a.delegate.CreateWorkflowTemplate(ctx, req)
if err != nil {
return nil, grpcutil.TranslateError(err)
}
return template, nil
}
func (a *errorTranslatingWorkflowTemplateServiceClient) GetWorkflowTemplate(ctx context.Context, req *workflowtemplatepkg.WorkflowTemplateGetRequest, _ ...grpc.CallOption) (*v1alpha1.WorkflowTemplate, error) {
template, err := a.delegate.GetWorkflowTemplate(ctx, req)
if err != nil {
return nil, grpcutil.TranslateError(err)
}
return template, nil
}
func (a *errorTranslatingWorkflowTemplateServiceClient) ListWorkflowTemplates(ctx context.Context, req *workflowtemplatepkg.WorkflowTemplateListRequest, _ ...grpc.CallOption) (*v1alpha1.WorkflowTemplateList, error) {
templates, err := a.delegate.ListWorkflowTemplates(ctx, req)
if err != nil {
return nil, grpcutil.TranslateError(err)
}
return templates, nil
}
func (a *errorTranslatingWorkflowTemplateServiceClient) UpdateWorkflowTemplate(ctx context.Context, req *workflowtemplatepkg.WorkflowTemplateUpdateRequest, _ ...grpc.CallOption) (*v1alpha1.WorkflowTemplate, error) {
template, err := a.delegate.UpdateWorkflowTemplate(ctx, req)
if err != nil {
return nil, grpcutil.TranslateError(err)
}
return template, nil
}
func (a *errorTranslatingWorkflowTemplateServiceClient) DeleteWorkflowTemplate(ctx context.Context, req *workflowtemplatepkg.WorkflowTemplateDeleteRequest, _ ...grpc.CallOption) (*workflowtemplatepkg.WorkflowTemplateDeleteResponse, error) {
template, err := a.delegate.DeleteWorkflowTemplate(ctx, req)
if err != nil {
return nil, grpcutil.TranslateError(err)
}
return template, nil
}
func (a *errorTranslatingWorkflowTemplateServiceClient) LintWorkflowTemplate(ctx context.Context, req *workflowtemplatepkg.WorkflowTemplateLintRequest, _ ...grpc.CallOption) (*v1alpha1.WorkflowTemplate, error) {
template, err := a.delegate.LintWorkflowTemplate(ctx, req)
if err != nil {
return nil, grpcutil.TranslateError(err)
}
return template, nil
}
|
package handlers
import (
"net/http"
"github.com/esrever001/toyserver/db"
"github.com/julienschmidt/httprouter"
)
type HttpMethod int
const (
GET HttpMethod = 1 + iota
POST
)
type BaseHandler interface {
Path() string
Method() HttpMethod
Handle(w http.ResponseWriter, r *http.Request, _ httprouter.Params)
}
func CreateHandlers(db *db.Database) []BaseHandler {
return []BaseHandler{
HealthHandler{},
EventsAddHandler{Database: db},
EventsGetByUserHandler{Database: db},
SummaryGetByUserHandler{Database: db},
}
}
|
package kvstore
import (
"path/filepath"
"github.com/Sirupsen/logrus"
"github.com/pkg/errors"
"github.com/rancher/longhorn-manager/types"
)
type Backend interface {
Set(key string, obj interface{}) error
Get(key string, obj interface{}) error
Delete(key string) error
Keys(prefix string) ([]string, error)
IsNotFoundError(err error) bool
}
type KVStore struct {
Prefix string
b Backend
}
const (
keyHosts = "hosts"
keySettings = "settings"
)
func NewKVStore(prefix string, backend Backend) (*KVStore, error) {
if backend == nil {
return nil, errors.Errorf("invalid empty backend")
}
return &KVStore{
Prefix: prefix,
b: backend,
}, nil
}
func (s *KVStore) key(key string) string {
// It's not file path, but we use it to deal with '/'
return filepath.Join(s.Prefix, key)
}
func (s *KVStore) hostKey(id string) string {
return filepath.Join(s.key(keyHosts), id)
}
func (s *KVStore) SetHost(host *types.HostInfo) error {
if err := s.b.Set(s.hostKey(host.UUID), host); err != nil {
return err
}
logrus.Infof("Add host %v name %v longhorn-manager address %v", host.UUID, host.Name, host.Address)
return nil
}
func (s *KVStore) GetHost(id string) (*types.HostInfo, error) {
host, err := s.getHostByKey(s.hostKey(id))
if err != nil {
return nil, errors.Wrap(err, "unable to get host")
}
return host, nil
}
func (s *KVStore) getHostByKey(key string) (*types.HostInfo, error) {
host := types.HostInfo{}
if err := s.b.Get(key, &host); err != nil {
if s.b.IsNotFoundError(err) {
return nil, nil
}
return nil, err
}
return &host, nil
}
func (s *KVStore) ListHosts() (map[string]*types.HostInfo, error) {
hostKeys, err := s.b.Keys(s.key(keyHosts))
if err != nil {
return nil, err
}
hosts := make(map[string]*types.HostInfo)
for _, key := range hostKeys {
host, err := s.getHostByKey(key)
if err != nil {
return nil, errors.Wrapf(err, "invalid key %v", key)
}
if host != nil {
hosts[host.UUID] = host
}
}
return hosts, nil
}
func (s *KVStore) settingsKey() string {
return s.key(keySettings)
}
func (s *KVStore) SetSettings(settings *types.SettingsInfo) error {
if err := s.b.Set(s.settingsKey(), settings); err != nil {
return err
}
return nil
}
func (s *KVStore) GetSettings() (*types.SettingsInfo, error) {
settings := &types.SettingsInfo{}
if err := s.b.Get(s.settingsKey(), &settings); err != nil {
if s.b.IsNotFoundError(err) {
return nil, nil
}
return nil, errors.Wrap(err, "unable to get settings")
}
return settings, nil
}
// kuNuclear is test only function, which will wipe all longhorn entries
func (s *KVStore) kvNuclear(nuclearCode string) error {
if nuclearCode != "nuke key value store" {
return errors.Errorf("invalid nuclear code!")
}
if err := s.b.Delete(s.key("")); err != nil {
return err
}
return nil
}
|
package main
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"strconv"
"strings"
)
func rootHandler(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "POST":
createTodoHandler(w, r)
case "GET":
getAllTodosHandler(w, r)
case "DELETE":
deleteAllTodosHandler()
default:
http.NotFoundHandler().ServeHTTP(w, r)
}
}
func resourceHandler(w http.ResponseWriter, r *http.Request) {
if path := strings.Split(r.URL.Path[1:], "/"); len(path) == 2 {
rawId := path[1]
id, err := strconv.Atoi(rawId)
if err != nil {
w.WriteHeader(400)
_, _ = w.Write([]byte(fmt.Sprintf("Invalid todo id given: '%v'", rawId)))
return
}
switch r.Method {
case "GET":
getTodoHandler(w, r, id)
case "PATCH":
updateTodoHandler(w, r, id)
case "DELETE":
deleteTodoHandler(id)
default:
http.NotFoundHandler().ServeHTTP(w, r)
}
} else {
panic(errors.New("reached resource handler but shouldn't have"))
}
}
func createTodoHandler(w http.ResponseWriter, r *http.Request) {
todo := Todo{}
err := json.NewDecoder(r.Body).Decode(&todo)
if err != nil {
errorResponse(w, err)
return
}
addTodo(&todo)
todo.setUrl(r)
err = json.NewEncoder(w).Encode(todo)
if err != nil {
errorResponse(w, err)
return
}
}
func getAllTodosHandler(w http.ResponseWriter, r *http.Request) {
todos := getTodos()
for i, todo := range todos {
todos[i] = todo
}
err := json.NewEncoder(w).Encode(todos)
if err != nil {
errorResponse(w, err)
}
}
func getTodoHandler(w http.ResponseWriter, r *http.Request, id int) {
todo, err := getTodo(id)
if err != nil {
http.NotFoundHandler().ServeHTTP(w, r)
return
}
err = json.NewEncoder(w).Encode(todo)
if err != nil {
errorResponse(w, err)
}
}
func updateTodoHandler(w http.ResponseWriter, r *http.Request, id int) {
todo, err := getTodo(id)
if err != nil {
http.NotFoundHandler().ServeHTTP(w, r)
return
}
updatedTodo := Todo{}
err = json.NewDecoder(r.Body).Decode(&updatedTodo)
if err != nil {
errorResponse(w, err)
return
}
todo.Order = updatedTodo.Order
todo.Completed = updatedTodo.Completed
todo.Title = updatedTodo.Title
updateTodo(todo)
err = json.NewEncoder(w).Encode(todo)
if err != nil {
errorResponse(w, err)
}
}
func deleteTodoHandler(id int) {
deleteTodo(id)
}
func deleteAllTodosHandler() {
deleteAllTodos()
}
func errorResponse(w http.ResponseWriter, err error) {
w.WriteHeader(500)
w.Write([]byte(err.Error()))
}
|
package internal
import "github.com/m3hm3t/customerapi3/internal/model"
type RepositoryPort interface {
RetrieveByID(uint) (*model.Customer, error)
RetrieveByEmail(string) (*model.Customer, error)
RetrieveByUsername(string) (*model.Customer, error)
Create(*model.Customer) error
Update(*model.Customer) error
Delete(*model.Customer) error
}
|
package main
import (
"net/http"
"github.com/gin-gonic/gin"
)
type books struct {
ID string `json:"id"`
ISBN string `json:"isbn"`
Title string `json:"title"`
Author string `json:"author"`
}
var Books = []books{
{ID: "1", ISBN: "978-0988262591", Title: "The Phoenix Project", Author: "Gene Kim, Kevin Behr, George Spafford"},
{ID: "2", ISBN: "978-0545582933", Title: "Harry Potter and the Prisoner of Azkaban", Author: "J. K. Rowling"},
{ID: "3", ISBN: "978-0385504201", Title: "The Da Vinci Code", Author: "Dan Brown"},
{ID: "4", ISBN: "978-1982149482", Title: "The Great Gatsby", Author: "F. Scott Fitzgerald"},
}
func getBooks(c *gin.Context) {
c.Header("Content-Type", "application/json")
c.IndentedJSON(http.StatusOK, Books)
}
func booksHandler(c *gin.Context) {
c.Header("Content-Type", "text/html")
c.HTML(http.StatusOK, "books.tmpl", gin.H{
"books": Books,
})
}
|
package store
import (
"context"
"errors"
"time"
"github.com/google/uuid"
"github.com/odpf/optimus/models"
)
var (
ErrResourceNotFound = errors.New("resource not found")
)
// ProjectJobSpecRepository represents a storage interface for Job specifications at a project level
type ProjectJobSpecRepository interface {
GetByName(context.Context, string) (models.JobSpec, models.NamespaceSpec, error)
GetByNameForProject(ctx context.Context, projectName, jobName string) (models.JobSpec, models.ProjectSpec, error)
GetAll(context.Context) ([]models.JobSpec, error)
GetByDestination(context.Context, string) (models.JobSpec, models.ProjectSpec, error)
}
// ProjectRepository represents a storage interface for registered projects
type ProjectRepository interface {
Save(context.Context, models.ProjectSpec) error
GetByName(context.Context, string) (models.ProjectSpec, error)
GetAll(context.Context) ([]models.ProjectSpec, error)
}
// ProjectSecretRepository stores secrets attached to projects
type ProjectSecretRepository interface {
Save(ctx context.Context, item models.ProjectSecretItem) error
GetByName(context.Context, string) (models.ProjectSecretItem, error)
GetAll(context.Context) ([]models.ProjectSecretItem, error)
}
// NamespaceRepository represents a storage interface for registered namespaces
type NamespaceRepository interface {
Save(context.Context, models.NamespaceSpec) error
GetByName(context.Context, string) (models.NamespaceSpec, error)
GetAll(context.Context) ([]models.NamespaceSpec, error)
}
// JobRunSpecRepository represents a storage interface for Job runs generated to
// represent a job in running state
type JobRunRepository interface {
// Save updates the run in place if it can else insert new
// Note: it doesn't insert the instances attached to job run in db
Save(context.Context, models.NamespaceSpec, models.JobRun) error
GetByScheduledAt(ctx context.Context, jobID uuid.UUID, scheduledAt time.Time) (models.JobRun, models.NamespaceSpec, error)
GetByID(context.Context, uuid.UUID) (models.JobRun, models.NamespaceSpec, error)
UpdateStatus(context.Context, uuid.UUID, models.JobRunState) error
GetByStatus(ctx context.Context, state ...models.JobRunState) ([]models.JobRun, error)
GetByTrigger(ctx context.Context, trigger models.JobRunTrigger, state ...models.JobRunState) ([]models.JobRun, error)
Delete(context.Context, uuid.UUID) error
AddInstance(ctx context.Context, namespace models.NamespaceSpec, run models.JobRun, spec models.InstanceSpec) error
// Clear will not delete the record but will reset all the run details
// for fresh start
Clear(ctx context.Context, runID uuid.UUID) error
ClearInstance(ctx context.Context, runID uuid.UUID, instanceType models.InstanceType, instanceName string) error
}
// JobRunSpecRepository represents a storage interface for Job run instances created
// during execution
type InstanceRepository interface {
Save(ctx context.Context, run models.JobRun, spec models.InstanceSpec) error
UpdateStatus(ctx context.Context, id uuid.UUID, status models.JobRunState) error
GetByName(ctx context.Context, runID uuid.UUID, instanceName, instanceType string) (models.InstanceSpec, error)
DeleteByJobRun(ctx context.Context, id uuid.UUID) error
}
// ProjectResourceSpecRepository represents a storage interface for Resource specifications at project level
type ProjectResourceSpecRepository interface {
GetByName(context.Context, string) (models.ResourceSpec, models.NamespaceSpec, error)
GetAll(context.Context) ([]models.ResourceSpec, error)
}
// ResourceSpecRepository represents a storage interface for Resource specifications at namespace level
type ResourceSpecRepository interface {
Save(context.Context, models.ResourceSpec) error
GetByName(context.Context, string) (models.ResourceSpec, error)
GetByURN(context.Context, string) (models.ResourceSpec, error)
GetAll(context.Context) ([]models.ResourceSpec, error)
Delete(context.Context, string) error
}
// ReplaySpecRepository represents a storage interface for replay objects
type ReplaySpecRepository interface {
Insert(ctx context.Context, replay *models.ReplaySpec) error
GetByID(ctx context.Context, id uuid.UUID) (models.ReplaySpec, error)
UpdateStatus(ctx context.Context, replayID uuid.UUID, status string, message models.ReplayMessage) error
GetByStatus(ctx context.Context, status []string) ([]models.ReplaySpec, error)
GetByJobIDAndStatus(ctx context.Context, jobID uuid.UUID, status []string) ([]models.ReplaySpec, error)
GetByProjectIDAndStatus(ctx context.Context, projectID uuid.UUID, status []string) ([]models.ReplaySpec, error)
GetByProjectID(ctx context.Context, projectID uuid.UUID) ([]models.ReplaySpec, error)
}
// BackupRepository represents a storage interface for backup objects
type BackupRepository interface {
Save(ctx context.Context, spec models.BackupSpec) error
GetAll(context.Context) ([]models.BackupSpec, error)
}
|
package virtual_security
import (
"sort"
"sync"
)
var (
marginPositionStoreSingleton iMarginPositionStore
marginPositionStoreSingletonMutex sync.Mutex
)
func getMarginPositionStore() iMarginPositionStore {
marginPositionStoreSingletonMutex.Lock()
defer marginPositionStoreSingletonMutex.Unlock()
if marginPositionStoreSingleton == nil {
marginPositionStoreSingleton = &marginPositionStore{
store: map[string]*marginPosition{},
}
}
return marginPositionStoreSingleton
}
// iMarginPositionStore - 信用株式ポジションストアのインターフェース
type iMarginPositionStore interface {
getAll() []*marginPosition
getByCode(code string) (*marginPosition, error)
getBySymbolCode(symbolCode string) ([]*marginPosition, error)
save(marginPosition *marginPosition)
removeByCode(code string)
}
// marginPositionStore - 信用株式ポジションのストア
type marginPositionStore struct {
store map[string]*marginPosition
mtx sync.Mutex
}
// getAll - ストアのすべてのポジションをコード順に並べて返す
func (s *marginPositionStore) getAll() []*marginPosition {
s.mtx.Lock()
defer s.mtx.Unlock()
positions := make([]*marginPosition, len(s.store))
var i int
for _, position := range s.store {
positions[i] = position
i++
}
sort.Slice(positions, func(i, j int) bool {
return positions[i].Code < positions[j].Code
})
return positions
}
// getByCode - コードを指定してデータを取得する
func (s *marginPositionStore) getByCode(code string) (*marginPosition, error) {
s.mtx.Lock()
defer s.mtx.Unlock()
if position, ok := s.store[code]; ok {
return position, nil
} else {
return nil, NoDataError
}
}
// getBySymbolCode - 銘柄コードを指定してデータを取得する
func (s *marginPositionStore) getBySymbolCode(symbolCode string) ([]*marginPosition, error) {
s.mtx.Lock()
defer s.mtx.Unlock()
positions := make([]*marginPosition, 0)
for _, position := range s.store {
if symbolCode == position.SymbolCode {
positions = append(positions, position)
}
}
sort.Slice(positions, func(i, j int) bool {
return positions[i].Code < positions[j].Code
})
return positions, nil
}
// save - ポジションをストアに追加する
func (s *marginPositionStore) save(marginPosition *marginPosition) {
if marginPosition == nil {
return
}
s.mtx.Lock()
defer s.mtx.Unlock()
s.store[marginPosition.Code] = marginPosition
}
// removeByCode - コードを指定して削除する
func (s *marginPositionStore) removeByCode(code string) {
s.mtx.Lock()
defer s.mtx.Unlock()
delete(s.store, code)
}
|
package main
import (
"bytes"
"time"
"fmt"
"strings"
)
//字符串连接的3中方式
func main() {
//1 最快
var buffer bytes.Buffer
s := time.Now()
for i := 0; i < 10000; i++ {
buffer.WriteString("test is here\n")
}
buffer.String()
e := time.Now()
fmt.Println("1 time is ", e.Sub(s).Seconds())
//2
s = time.Now()
var s1 []string
for i := 0; i < 100000; i++ {
s1 = append(s1, "test is here\n")
}
strings.Join(s1, "")
e = time.Now()
fmt.Println("2 time is ", e.Sub(s).Seconds())
}
|
package bot
import (
"encoding/binary"
"io"
"log"
"os"
)
func openFile(path string) ([][]byte, error) {
buffer := make([][]byte, 0)
file, err := os.Open(path)
if err != nil {
return buffer, err
}
var opuslen int16
for {
// Read opus frame length from dca file.
err = binary.Read(file, binary.LittleEndian, &opuslen)
// If this is the end of the file, just return.
if err == io.EOF || err == io.ErrUnexpectedEOF {
err := file.Close()
if err != nil {
return buffer, err
}
return buffer, nil
}
if err != nil {
log.Println("Error reading from file :", err)
return buffer, err
}
// Read encoded pcm from dca file.
inBuf := make([]byte, opuslen)
err = binary.Read(file, binary.LittleEndian, &inBuf)
// Should not be any end of file errors
if err != nil {
log.Println("Error reading from dca file :", err)
return buffer, err
}
// Append encoded pcm data to the buffer.
buffer = append(buffer, inBuf)
}
}
|
package main
import (
"log"
"net/http"
"time"
"github.com/gin-contrib/timeout"
"github.com/gin-gonic/gin"
)
func testResponse(c *gin.Context) {
c.String(http.StatusRequestTimeout, "timeout")
}
func timeoutMiddleware() gin.HandlerFunc {
return timeout.New(
timeout.WithTimeout(500*time.Millisecond),
timeout.WithHandler(func(c *gin.Context) {
c.Next()
}),
timeout.WithResponse(testResponse),
)
}
func main() {
r := gin.New()
r.Use(timeoutMiddleware())
r.GET("/slow", func(c *gin.Context) {
time.Sleep(800 * time.Millisecond)
c.Status(http.StatusOK)
})
if err := r.Run(":8080"); err != nil {
log.Fatal(err)
}
}
|
package main
import (
"fmt"
"sync"
"time"
)
func Start() <-chan int {
out := make(chan int)
go func() {
defer close(out)
for i := 0; i < 10; i++ {
out <- i
}
}()
return out
}
func Worker(in <-chan int) <-chan int {
out := make(chan int)
go func() {
defer close(out)
for i := range in {
out <- (1 + i)
time.Sleep(2 * time.Second)
}
}()
return out
}
func createWorkers(nums <-chan int) []<-chan int {
workers := make([]<-chan int, 0, 5)
for i := 0; i < cap(workers); i++ {
workers = append(workers, Worker(nums))
}
return workers
}
func closeChan(wg *sync.WaitGroup, out chan int) {
wg.Wait()
close(out)
}
// START OMIT
func FanIn(cs ...<-chan int) <-chan int {
var wg sync.WaitGroup
out := make(chan int)
output := func(c <-chan int) {
for n := range c {
out <- n
}
wg.Done()
}
wg.Add(len(cs))
for _, c := range cs {
go output(c)
}
go closeChan(&wg, out)
return out
}
func main() {
for o := range FanIn(createWorkers(Start())...) {
fmt.Println(o)
}
}
// END OMIT
|
package main
import (
"fmt"
tensorflow "github.com/tensorflow/tensorflow/tensorflow/go"
tf "github.com/tensorflow/tensorflow/tensorflow/go"
)
func makeTensor(data [][]float32) (*tf.Tensor, error) {
return tf.NewTensor(data)
}
func getResult(prediction [][]float32) bool {
if prediction[0][0] > prediction[0][1] {
return false
}
return true
}
type RL_AI struct {
model *tensorflow.SavedModel
}
func (rl *RL_AI) load_model() {
model, err := tf.LoadSavedModel("golang_model", []string{"tags"}, nil)
rl.model = model
if err != nil {
fmt.Printf("Error loading saved model: %s\n", err.Error())
return
}
}
func (rl *RL_AI) predict(data [][]float32) bool {
// defer rl.model.Session.Close()
tensor, err := makeTensor(data)
fmt.Println("Made Tensor")
if err != nil {
fmt.Println(err)
}
result, runErr := rl.model.Session.Run(
map[tf.Output]*tf.Tensor{
rl.model.Graph.Operation("dense_1_input").Output(0): tensor,
},
[]tf.Output{
rl.model.Graph.Operation("my_output/BiasAdd").Output(0),
},
nil,
)
if runErr != nil {
fmt.Println("ERROR!!! ", runErr)
}
fmt.Println("Result: ", result[0].Value())
temp := result[0].Value().([][]float32)
return getResult(temp)
// fmt.Println(temp)
// return true
}
func main() {
rl_ai := RL_AI{}
rl_ai.load_model()
// defer rl.model.Session.Close()
data := [][]float32{{1, 0, 0, 0, 5, 4, 3}}
prediction := rl_ai.predict(data)
fmt.Println("prediction ", prediction)
data_2 := [][]float32{{4, 2, 4, 0, 0, 4, 3}}
prediction_2 := rl_ai.predict(data_2)
fmt.Println("prediction 2 ", prediction_2)
}
|
package calendar
import (
"encoding/json"
"github.com/pkg/errors"
"github.com/andywow/golang-lessons/lesson-calendar/pkg/eventapi"
)
// CheckEventData check event data
func CheckEventData(e *eventapi.Event) error {
if e.StartTime == nil || e.Duration <= 0 || e.Header == "" || e.Description == "" || e.Username == "" {
return ErrIncorrectEvent
}
return nil
}
// ConvertToJSON convert to json
func ConvertToJSON(e *eventapi.Event) ([]byte, error) {
blob, err := json.Marshal(&e)
if err != nil {
return nil, errors.Wrap(err, "failed convert to json")
}
return blob, nil
}
// ConvertFromJSON convert from json
func ConvertFromJSON(blob []byte) (*eventapi.Event, error) {
var e eventapi.Event
err := json.Unmarshal(blob, &e)
if err != nil {
return nil, errors.Wrap(err, "failed convert from json")
}
return &e, nil
}
|
package main
import (
"crypto/md5"
"fmt"
"log"
)
//easyjson:json
type S struct {
I int
StringToFilter OrderedMapStringToFilter
}
//easyjson:json
type Filter struct {
Name string
Value int
StringToInt OrderedMapStringToInt
}
func main() {
s := S{
I: 1,
StringToFilter: OrderedMapStringToFilter{
"b": Filter{
Name: "filter B",
Value: 2,
StringToInt: OrderedMapStringToInt{
"z": 12,
"x": 10,
"y": 11,
},
},
"c": Filter{
Name: "filter B",
Value: 2,
StringToInt: OrderedMapStringToInt{
"z": 12,
"x": 10,
"y": 11,
},
},
"a": Filter{
Name: "filter B",
Value: 2,
StringToInt: OrderedMapStringToInt{
"z": 12,
"x": 10,
"y": 11,
},
}},
}
results := map[string]int{}
for i := 0; i < 15; i++ {
bytes, _ := s.MarshalJSON()
hash := fmt.Sprintf("%X", md5.Sum(bytes))
log.Printf("%v %v", hash, string(bytes))
if count, ok := results[hash]; ok {
results[hash] = count + 1
} else {
results[hash] = 1
}
}
log.Println("results: ")
for hash, count := range results {
log.Printf("%v: %v", hash, count)
}
}
|
package routers
import (
"qixijie/controllers"
"github.com/astaxie/beego"
)
func init() {
//正式路由器
//登陆
beego.Router("/seven_night/redirecturl", &controllers.MainController{}, "*:Redirecturl")
beego.Router("/seven_night/index", &controllers.MainController{}, "*:Index")
//分享接口
beego.Router("/seven_night/upimageAndmessage", &controllers.MainController{}, "*:UpImageAndMessage")
beego.Router("/seven_night/getpayid", &controllers.MainController{}, "*:GetWxPayId")
beego.Router("/seven_night/gethistorymessage", &controllers.MainController{}, "post:GetHistoryMessage")
//分享成功后通知后端添加一次机会
beego.Router("/seven_night/sharesucess", &controllers.MainController{}, "get:Sharesuccess")
//更新奖项记录
beego.Router("/seven_night/prize", &controllers.MainController{}, "post:Prize")
//查看支付状态
beego.Router("/seven_night/checkpay", &controllers.MainController{}, "*:CheckPay")
beego.Router("/seven_night/share/get_ticker", &controllers.MainController{}, "get:GetTicker")
beego.Router("/seven_night/share/get_user_token", &controllers.MainController{}, "post:GetToken")
}
|
package redcode
import (
"errors"
"regexp"
"strings"
)
//go:generate ragel -Z -G2 -o lex.go redcode.rl
//go:generate goyacc redcode.y
// Directives map Redcode directive names to values
//
// e.g. ";name Imp"
type Directives map[string]string
var scanDirective *regexp.Regexp
func init() {
var err error
scanDirective, err = regexp.Compile("^\\s*(name|author)\\s+(.*)")
if err != nil {
panic(err)
}
}
// ParseString parses a Redcode program
func ParseString(text, filename string) ([]Instruction, Directives, error) {
return ParseBytes([]byte(text), filename)
}
// ParseBytes parses a Redcode program
func ParseBytes(text []byte, filename string) ([]Instruction, Directives, error) {
lex := newLexer(text, filename)
e := yyParse(lex)
if lex.err != nil {
return nil, nil, lex.err
} else if e != 0 {
return nil, nil, errors.New("Unknown error during parsing")
}
for index, instruction := range lex.instructions {
var err error
lex.instructions[index], err = checkInstruction(instruction)
if err != nil {
return nil, nil, err
}
}
return lex.instructions, lex.directives, nil
}
func checkInstruction(instruction Instruction) (Instruction, error) {
switch instruction.Opcode {
case OpDat:
if !hasB(instruction) {
instruction.A, instruction.B = instruction.B, instruction.A
}
}
return instruction, nil
}
func hasA(instruction Instruction) bool {
return instruction.A.Expression != nil
}
func hasB(instruction Instruction) bool {
return instruction.B.Expression != nil
}
func parseDirective(lexer *lexer, comment string) {
parts := scanDirective.FindStringSubmatch(comment)
if parts == nil {
return
}
lexer.directives[parts[1]] = strings.Trim(parts[2], " \t")
}
|
/*
Given an infix expression, determine whether all constants are of the same type.
Operators will consist only of these dyadic operators: +-/*
Your program or function should take a valid expression string as input, and output a truthy value if the constants in the expression are of the same time, and a falsey value otherwise.
The expression will consist solely of constants, and may contain any of the following types:
String, of the form "String" (Always double quotes, can be empty, no escape characters, may contain any ASCII text)
Integer, of the form 14 (Always positive or zero)
Float, of the form 7.3f (Always positive or zero, always has a decimal component, eg 14.0f)
Byte, of the form 0x42 (0-255, Always 2 hexadecimal characters)
Boolean, of the form true (true or false, case insensitive)
The expression will not contain parentheses, as order of operation doesn't affect type when no type coercion is present.
A lone constant with no operators is a valid expression.
An empty expression is not a valid expression.
You may assume that the expression string contains no whitespace outside of string literals.
Note: Alternatively you may assume that there will always be spaces between constants and operators, as seen in the testcases. If you make this assumption, please specify as such in your answer
You do not have to handle invalid expressions such as 1 +.
Scoring
This is code-golf, so fewest bytes wins!
Test cases
(Whitespace added for readability)
2 + 3
True
"Hello" / "World"
True
true * false
True
"Hello" + 4
False
"Hello" + "4"
True
3 + 2.4f / 8
False
0xff * 0xff
True
0xff + 2
False
6
True
" " + ""
True
"4 + false" + "word"
True
*/
package main
import (
"bytes"
"go/ast"
"go/parser"
"go/token"
"strconv"
"strings"
"unicode"
)
func main() {
assert(typecheck(`2 + 3`) == true)
assert(typecheck(`"Hello" / "World"`) == true)
assert(typecheck(`true * false`) == true)
assert(typecheck(`"Hello" + 4`) == false)
assert(typecheck(`"Hello" + "4"`) == true)
assert(typecheck(`3 + 2.4f / 8`) == false)
assert(typecheck(`0xff * 0xff`) == true)
assert(typecheck(`0xff + 2`) == false)
assert(typecheck(`6`) == true)
assert(typecheck(`" " + ""`) == true)
assert(typecheck(`"4 + false" + "word"`) == true)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func clean(s string) string {
w := new(bytes.Buffer)
l := rune(0)
for _, r := range s {
if !(r == 'f' && unicode.IsDigit(l)) {
w.WriteRune(r)
}
l = r
}
return w.String()
}
func typecheck(s string) bool {
defer func() { recover() }()
e, err := parser.ParseExpr(clean(s))
if err != nil {
return false
}
return walk(e) > 0
}
func walk(e ast.Expr) int {
switch e := e.(type) {
case *ast.BinaryExpr:
x := walk(e.X)
y := walk(e.Y)
switch e.Op {
case token.ADD, token.SUB, token.MUL, token.QUO:
if x != y {
return 0
}
return x
}
case *ast.Ident:
switch strings.ToLower(e.Name) {
case "true", "false":
return 'b'
}
case *ast.BasicLit:
_, err := strconv.Atoi(e.Value)
if err == nil {
return 'i'
}
_, err = strconv.ParseInt(e.Value, 0, 0)
if err == nil {
return 'x'
}
_, err = strconv.ParseFloat(e.Value, 64)
if err == nil {
return 'f'
}
if strings.HasPrefix(e.Value, `"`) {
return 's'
}
}
panic("unsupported expression")
}
|
package stringconcat_test
import (
"testing"
"github.com/nandarimansyah/gobasicbenchmark/stringconcat"
)
const (
TEST_STRING = "test"
TEST_SIZE = 2
)
func benchmarkConcat(size int, SelfConcat func(string, int) string, b *testing.B) {
for n := 0; n < b.N; n++ {
SelfConcat(TEST_STRING, size)
}
}
func BenchmarkConcatOperator(b *testing.B) {
benchmarkConcat(TEST_SIZE, stringconcat.SelfConcatOperator, b)
}
func BenchmarkConcatBuffer(b *testing.B) { benchmarkConcat(TEST_SIZE, stringconcat.SelfConcatBuffer, b) }
|
// Copyright 2020 Thomas.Hoehenleitner [at] seerose.net
// Use of this source code is governed by a license that can be found in the LICENSE file.
package id_test
import "testing"
func TestUpdateZeroId(t *testing.T) {
sOri := []string{`
TRICE0 (Id(0), "---------------------------------------\n" );
TRICE0 (Id(0), "---------------------------------------\n" );
`, `
TRICE8_1( Id(0), "tst:TRICE8 %d\n", 1 );
TRICE8_2( Id(0), "tst:TRICE8 %d %d\n", 1, 2 );
TRICE8_3( Id(0), "tst:TRICE8 %d %d %d\n", 1, 2, 3 );
TRICE8_4( Id(0), "tst:TRICE8 %d %d %d %d\n", 1, 2, 3, 4 );
TRICE8_5( Id(0), "tst:TRICE8 %d %d %d %d %d\n", 1, 2, 3, 4, 5 );
TRICE8_6( Id(0), "tst:TRICE8 %d %d %d %d %d %d \n", 1, 2, 3, 4, 5, 6 );
TRICE8_7( Id(0), "tst:TRICE8 %d %d %d %d %d %d %d\n", 1, 2, 3, 4, 5, 6, 7 );
TRICE8_8( Id(0), "tst:TRICE8 %d %d %d %d %d %d %d %d\n", 1, 2, 3, 4, 5, 6, 7, 8 );
`, `
TRICE16_1( Id(0), "tst:TRICE16 %d\n", 1 );
TRICE16_2( Id(0), "tst:TRICE16 %d %d\n", 1, 2 );
TRICE16_3( Id(0), "tst:TRICE16 %d %d %d\n", 1, 2, 3 );
TRICE16_4( Id(0), "tst:TRICE16 %d %d %d %d\n", 1, 2, 3, 4 );
`, `
TRICE32_2( Id(0), "tst:TRICE32 %%09x -> %09x %09x", 1, 0x7fffffff );
TRICE32_2( Id(0), "tst: %09x %09x\n", 0x80000000, 0xffffffff );
TRICE32_2( Id(0), "tst:TRICE32 %%11d -> %11d %11d", 1, 0x7fffffff );
TRICE32_2( Id(0), "tst: %11d %11d\n", 0x80000000, 0xffffffff );
TRICE32_2( Id(0), "tst:TRICE32 %%12o -> %12o %12o", 1, 0x7fffffff );
TRICE32_2( Id(0), "tst: %12o %12o\n", 0x80000000, 0xffffffff );
TRICE_S( Id(0), "sig:generated=%s\n", x );
TRICE_S ( Id(0), "sig:generated=%s\n", x );
`}
sExp := []string{`
TRICE0 (Id(43274), "---------------------------------------\n" );
TRICE0 (Id( 9089), "---------------------------------------\n" );
`, `
TRICE8_1( Id(46083), "tst:TRICE8 %d\n", 1 );
TRICE8_2( Id(50226), "tst:TRICE8 %d %d\n", 1, 2 );
TRICE8_3( Id(18460), "tst:TRICE8 %d %d %d\n", 1, 2, 3 );
TRICE8_4( Id(55041), "tst:TRICE8 %d %d %d %d\n", 1, 2, 3, 4 );
TRICE8_5( Id(60612), "tst:TRICE8 %d %d %d %d %d\n", 1, 2, 3, 4, 5 );
TRICE8_6( Id(16507), "tst:TRICE8 %d %d %d %d %d %d \n", 1, 2, 3, 4, 5, 6 );
TRICE8_7( Id(59378), "tst:TRICE8 %d %d %d %d %d %d %d\n", 1, 2, 3, 4, 5, 6, 7 );
TRICE8_8( Id(62543), "tst:TRICE8 %d %d %d %d %d %d %d %d\n", 1, 2, 3, 4, 5, 6, 7, 8 );
`, `
TRICE16_1( Id(34028), "tst:TRICE16 %d\n", 1 );
TRICE16_2( Id( 472), "tst:TRICE16 %d %d\n", 1, 2 );
TRICE16_3( Id( 1669), "tst:TRICE16 %d %d %d\n", 1, 2, 3 );
TRICE16_4( Id(21923), "tst:TRICE16 %d %d %d %d\n", 1, 2, 3, 4 );
`, `
TRICE32_2( Id(42242), "tst:TRICE32 %%09x -> %09x %09x", 1, 0x7fffffff );
TRICE32_2( Id(60761), "tst: %09x %09x\n", 0x80000000, 0xffffffff );
TRICE32_2( Id(16891), "tst:TRICE32 %%11d -> %11d %11d", 1, 0x7fffffff );
TRICE32_2( Id(53650), "tst: %11d %11d\n", 0x80000000, 0xffffffff );
TRICE32_2( Id(15285), "tst:TRICE32 %%12o -> %12o %12o", 1, 0x7fffffff );
TRICE32_2( Id(61124), "tst: %12o %12o\n", 0x80000000, 0xffffffff );
TRICE_S( Id(63805), "sig:generated=%s\n", x );
TRICE_S ( Id(44213), "sig:generated=%s\n", x );
`}
listExp := `[
{
"id": 43274,
"fmtType": "TRICE0",
"fmtStrg": "---------------------------------------\\n",
"created": 0,
"removed": 0
},
{
"id": 9089,
"fmtType": "TRICE0",
"fmtStrg": "---------------------------------------\\n",
"created": 0,
"removed": 0
},
{
"id": 46083,
"fmtType": "TRICE8_1",
"fmtStrg": "tst:TRICE8 %d\\n",
"created": 0,
"removed": 0
},
{
"id": 50226,
"fmtType": "TRICE8_2",
"fmtStrg": "tst:TRICE8 %d %d\\n",
"created": 0,
"removed": 0
},
{
"id": 18460,
"fmtType": "TRICE8_3",
"fmtStrg": "tst:TRICE8 %d %d %d\\n",
"created": 0,
"removed": 0
},
{
"id": 55041,
"fmtType": "TRICE8_4",
"fmtStrg": "tst:TRICE8 %d %d %d %d\\n",
"created": 0,
"removed": 0
},
{
"id": 60612,
"fmtType": "TRICE8_5",
"fmtStrg": "tst:TRICE8 %d %d %d %d %d\\n",
"created": 0,
"removed": 0
},
{
"id": 16507,
"fmtType": "TRICE8_6",
"fmtStrg": "tst:TRICE8 %d %d %d %d %d %d \\n",
"created": 0,
"removed": 0
},
{
"id": 59378,
"fmtType": "TRICE8_7",
"fmtStrg": "tst:TRICE8 %d %d %d %d %d %d %d\\n",
"created": 0,
"removed": 0
},
{
"id": 62543,
"fmtType": "TRICE8_8",
"fmtStrg": "tst:TRICE8 %d %d %d %d %d %d %d %d\\n",
"created": 0,
"removed": 0
},
{
"id": 34028,
"fmtType": "TRICE16_1",
"fmtStrg": "tst:TRICE16 %d\\n",
"created": 0,
"removed": 0
},
{
"id": 472,
"fmtType": "TRICE16_2",
"fmtStrg": "tst:TRICE16 %d %d\\n",
"created": 0,
"removed": 0
},
{
"id": 1669,
"fmtType": "TRICE16_3",
"fmtStrg": "tst:TRICE16 %d %d %d\\n",
"created": 0,
"removed": 0
},
{
"id": 21923,
"fmtType": "TRICE16_4",
"fmtStrg": "tst:TRICE16 %d %d %d %d\\n",
"created": 0,
"removed": 0
},
{
"id": 42242,
"fmtType": "TRICE32_2",
"fmtStrg": "tst:TRICE32 %%09x -\u003e %09x %09x",
"created": 0,
"removed": 0
},
{
"id": 60761,
"fmtType": "TRICE32_2",
"fmtStrg": "tst: %09x %09x\\n",
"created": 0,
"removed": 0
},
{
"id": 16891,
"fmtType": "TRICE32_2",
"fmtStrg": "tst:TRICE32 %%11d -\u003e %11d %11d",
"created": 0,
"removed": 0
},
{
"id": 53650,
"fmtType": "TRICE32_2",
"fmtStrg": "tst: %11d %11d\\n",
"created": 0,
"removed": 0
},
{
"id": 15285,
"fmtType": "TRICE32_2",
"fmtStrg": "tst:TRICE32 %%12o -\u003e %12o %12o",
"created": 0,
"removed": 0
},
{
"id": 61124,
"fmtType": "TRICE32_2",
"fmtStrg": "tst: %12o %12o\\n",
"created": 0,
"removed": 0
},
{
"id": 63805,
"fmtType": "TRICE_S",
"fmtStrg": "sig:generated=%s\\n",
"created": 0,
"removed": 0
},
{
"id": 44213,
"fmtType": "TRICE_S",
"fmtStrg": "sig:generated=%s\\n",
"created": 0,
"removed": 0
}
]`
doUpdate(t, sOri, sExp, listExp)
}
func TestUpdateZeroIds(t *testing.T) {
sOri := []string{`
TRICE_S( Id(0), "tst:runtime string %s.\n", x)
`, `
`, `
`, `
`}
sExp := []string{`
TRICE_S( Id(43274), "tst:runtime string %s.\n", x)
`, `
`, `
`, `
`}
listExp := `[
{
"id": 43274,
"fmtType": "TRICE_S",
"fmtStrg": "tst:runtime string %s.\\n",
"created": 0,
"removed": 0
}
]`
doUpdate(t, sOri, sExp, listExp)
}
|
// +build it
package main
import (
"encoding/json"
"log"
"net/http"
"os"
"testing"
"github.com/TempleEight/spec-golang/auth/comm"
"github.com/TempleEight/spec-golang/auth/dao"
"github.com/TempleEight/spec-golang/auth/util"
"github.com/dgrijalva/jwt-go"
"github.com/google/uuid"
)
var environment env
func TestMain(m *testing.M) {
config, err := util.GetConfig("/etc/auth-service/config.json")
if err != nil {
log.Fatal(err)
}
d, err := dao.Init(config)
if err != nil {
log.Fatal(err)
}
c := comm.Init(config)
jwtCredential, err := c.CreateJWTCredential()
if err != nil {
log.Fatal(err)
}
environment = env{d, c, jwtCredential, Hook{}}
os.Exit(m.Run())
}
func TestIntegrationAuth(t *testing.T) {
// Create a single auth
res, err := makeRequest(environment, http.MethodPost, "/auth/register", `{"email": "jay@test.com", "password": "BlackcurrantCrush123"}`)
if err != nil {
t.Fatalf("Could not make request: %s", err.Error())
}
if res.Code != http.StatusOK {
t.Errorf("Wrong status code: %v", res.Code)
}
// Validate the JWT returned
var decoded map[string]string
err = json.Unmarshal([]byte(res.Body.String()), &decoded)
if err != nil {
t.Fatalf("Could not decode json: %s", err.Error())
}
rawToken, ok := decoded["AccessToken"]
if !ok {
t.Fatalf("Token doesn't contain an access token: %s", err.Error())
}
token, _, err := new(jwt.Parser).ParseUnverified(rawToken, jwt.MapClaims{})
if err != nil {
t.Fatalf("Could not decode JWT: %s", err.Error())
}
claims, ok := token.Claims.(jwt.MapClaims)
if !ok {
t.Fatalf("Could not decode claims")
}
id, ok := claims["id"]
if !ok {
t.Fatalf("Claims doesn't contain an ID key")
}
_, err = uuid.Parse(id.(string))
if err != nil {
t.Fatalf("ID is not a valid UUID")
}
iss, ok := claims["iss"]
if !ok {
t.Fatalf("Claims doesn't contain an iss key")
}
if iss.(string) != environment.jwtCredential.Key {
t.Fatalf("iss is incorrect: found %v, wanted %s", iss, environment.jwtCredential.Key)
}
// Access that same auth
res, err = makeRequest(environment, http.MethodPost, "/auth/login", `{"email": "jay@test.com", "password":"BlackcurrantCrush123"}`)
if err != nil {
t.Fatalf("Could not make GET request: %s", err.Error())
}
if res.Code != http.StatusOK {
t.Errorf("Wrong status code: %v", res.Code)
}
// Validate the JWT returned
err = json.Unmarshal([]byte(res.Body.String()), &decoded)
if err != nil {
t.Fatalf("Could not decode json: %s", err.Error())
}
rawToken, ok = decoded["AccessToken"]
if !ok {
t.Fatalf("Token doesn't contain an access token: %s", err.Error())
}
token, _, err = new(jwt.Parser).ParseUnverified(rawToken, jwt.MapClaims{})
if err != nil {
t.Fatalf("Could not decode JWT: %s", err.Error())
}
claims, ok = token.Claims.(jwt.MapClaims)
if !ok {
t.Fatalf("Could not decode claims")
}
id, ok = claims["id"]
if !ok {
t.Fatalf("Claims doesn't contain an ID key")
}
_, err = uuid.Parse(id.(string))
if err != nil {
t.Fatalf("ID is not a valid UUID")
}
iss, ok = claims["iss"]
if !ok {
t.Fatalf("Claims doesn't contain an iss key")
}
if iss.(string) != environment.jwtCredential.Key {
t.Fatalf("iss is incorrect: found %v, wanted %s", iss, environment.jwtCredential.Key)
}
}
|
package clouddatastore
import (
"cloud.google.com/go/datastore"
w "go.mercari.io/datastore"
)
func toOriginalKey(key w.Key) *datastore.Key {
if key == nil {
return nil
}
return &datastore.Key{
Kind: key.Kind(),
ID: key.ID(),
Name: key.Name(),
Parent: toOriginalKey(key.ParentKey()),
Namespace: key.Namespace(),
}
}
func toOriginalKeys(keys []w.Key) []*datastore.Key {
if keys == nil {
return nil
}
origKeys := make([]*datastore.Key, len(keys))
for idx, key := range keys {
origKeys[idx] = toOriginalKey(key)
}
return origKeys
}
func toWrapperKey(key *datastore.Key) *keyImpl {
if key == nil {
return nil
}
return &keyImpl{
kind: key.Kind,
id: key.ID,
name: key.Name,
parent: toWrapperKey(key.Parent),
namespace: key.Namespace,
}
}
func toOriginalPendingKey(pKey w.PendingKey) *datastore.PendingKey {
if pKey == nil {
return nil
}
pk, ok := pKey.StoredContext().Value(contextPendingKey{}).(*pendingKeyImpl)
if !ok {
return nil
}
if pk == nil || pk.pendingKey == nil {
return nil
}
return pk.pendingKey
}
func toWrapperKeys(keys []*datastore.Key) []w.Key {
if keys == nil {
return nil
}
wKeys := make([]w.Key, len(keys))
for idx, key := range keys {
wKeys[idx] = toWrapperKey(key)
}
return wKeys
}
func toWrapperPendingKey(pendingKey *datastore.PendingKey) *pendingKeyImpl {
if pendingKey == nil {
return nil
}
return &pendingKeyImpl{
pendingKey: pendingKey,
}
}
func toWrapperPendingKeys(keys []*datastore.PendingKey) []w.PendingKey {
if keys == nil {
return nil
}
wKeys := make([]w.PendingKey, len(keys))
for idx, key := range keys {
wKeys[idx] = toWrapperPendingKey(key)
}
return wKeys
}
func toWrapperError(err error) error {
if err == nil {
return nil
}
switch {
case err == datastore.ErrNoSuchEntity:
return w.ErrNoSuchEntity
case err == datastore.ErrConcurrentTransaction:
return w.ErrConcurrentTransaction
case err == datastore.ErrInvalidEntityType:
return w.ErrInvalidEntityType
case err == datastore.ErrInvalidKey:
return w.ErrInvalidKey
default:
switch err := err.(type) {
case *datastore.ErrFieldMismatch:
return &w.ErrFieldMismatch{
StructType: err.StructType,
FieldName: err.FieldName,
Reason: err.Reason,
}
case datastore.MultiError:
merr := err
newErr := make(w.MultiError, 0, len(merr))
for _, err := range merr {
if err != nil {
newErr = append(newErr, toWrapperError(err))
continue
}
newErr = append(newErr, nil)
}
return newErr
}
return err
}
}
func toOriginalEntity(entity *w.Entity) *datastore.Entity {
if entity == nil {
return nil
}
return &datastore.Entity{
Key: toOriginalKey(entity.Key),
Properties: toOriginalPropertyList(entity.Properties),
}
}
func toOriginalValue(v interface{}) interface{} {
switch v := v.(type) {
case []interface{}:
vs := v
origVs := make([]interface{}, 0, len(v))
for _, v := range vs {
origVs = append(origVs, toOriginalValue(v))
}
return origVs
case *w.Entity:
return toOriginalEntity(v)
case []*w.Entity:
vs := v
origVs := make([]*datastore.Entity, 0, len(v))
for _, v := range vs {
origVs = append(origVs, toOriginalValue(v).(*datastore.Entity))
}
return origVs
case w.Key:
return toOriginalKey(v)
case []w.Key:
return toOriginalKeys(v)
case w.GeoPoint:
return datastore.GeoPoint{Lat: v.Lat, Lng: v.Lng}
case []w.GeoPoint:
vs := v
origVs := make([]datastore.GeoPoint, 0, len(v))
for _, v := range vs {
origVs = append(origVs, toOriginalValue(v).(datastore.GeoPoint))
}
return origVs
default:
return v
}
}
func toWrapperValue(v interface{}) interface{} {
switch v := v.(type) {
case []interface{}:
vs := v
wVs := make([]interface{}, 0, len(v))
for _, v := range vs {
wVs = append(wVs, toWrapperValue(v))
}
return wVs
case *datastore.Entity:
if v == nil {
return nil
}
return toWrapperEntity(v)
case []*datastore.Entity:
vs := v
wVs := make([]*w.Entity, 0, len(v))
for _, v := range vs {
wVs = append(wVs, toWrapperValue(v).(*w.Entity))
}
return wVs
case *datastore.Key:
return toWrapperKey(v)
case []*datastore.Key:
return toWrapperKeys(v)
case datastore.GeoPoint:
return w.GeoPoint{Lat: v.Lat, Lng: v.Lng}
case []datastore.GeoPoint:
vs := v
wVs := make([]w.GeoPoint, 0, len(v))
for _, v := range vs {
wVs = append(wVs, toWrapperValue(v).(w.GeoPoint))
}
return wVs
default:
return v
}
}
func toOriginalProperty(p w.Property) datastore.Property {
return datastore.Property{
Name: p.Name,
Value: toOriginalValue(p.Value),
NoIndex: p.NoIndex,
}
}
func toOriginalPropertyList(ps w.PropertyList) datastore.PropertyList {
if ps == nil {
return nil
}
newPs := make([]datastore.Property, 0, len(ps))
for _, p := range ps {
newPs = append(newPs, toOriginalProperty(p))
}
return newPs
}
func toOriginalPropertyListList(pss []w.PropertyList) []datastore.PropertyList {
if pss == nil {
return nil
}
newPss := make([]datastore.PropertyList, 0, len(pss))
for _, ps := range pss {
newPss = append(newPss, toOriginalPropertyList(ps))
}
return newPss
}
func toWrapperEntity(entity *datastore.Entity) *w.Entity {
if entity == nil {
return nil
}
wrapperEntity := &w.Entity{
Properties: toWrapperPropertyList(entity.Properties),
}
if entity.Key == nil {
wrapperEntity.Key = nil
} else {
wrapperEntity.Key = toWrapperKey(entity.Key)
}
return wrapperEntity
}
func toWrapperProperty(p datastore.Property) w.Property {
return w.Property{
Name: p.Name,
Value: toWrapperValue(p.Value),
NoIndex: p.NoIndex,
}
}
func toWrapperPropertyList(ps datastore.PropertyList) w.PropertyList {
if ps == nil {
return nil
}
newPs := make([]w.Property, 0, len(ps))
for _, p := range ps {
newPs = append(newPs, toWrapperProperty(p))
}
return newPs
}
func toWrapperPropertyListList(pss []datastore.PropertyList) []w.PropertyList {
if pss == nil {
return nil
}
newPss := make([]w.PropertyList, 0, len(pss))
for _, ps := range pss {
newPss = append(newPss, toWrapperPropertyList(ps))
}
return newPss
}
func toOriginalTransaction(tx w.Transaction) *datastore.Transaction {
baseTx := getTx(tx.(*transactionImpl).client.ctx)
if tx == nil {
panic("not in transaction")
}
return baseTx
}
|
package Problem0140
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
// tcs is testcase slice
var tcs = []struct {
s string
wordDict []string
ans []string
}{
{
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
[]string{"a", "aa", "aaa", "aaaa", "aaaaa", "aaaaaa", "aaaaaaa", "aaaaaaaa", "aaaaaaaaa", "aaaaaaaaaa"},
[]string{},
},
{
"catsanddog",
[]string{"cat", "cats", "and", "sand", "dog"},
[]string{"cat sand dog", "cats and dog"},
},
{
"catsanddog",
[]string{},
[]string{},
},
// 可以有多个 testcase
}
func Test_wordBreak(t *testing.T) {
ast := assert.New(t)
for _, tc := range tcs {
fmt.Printf("~~%v~~\n", tc)
ast.Equal(tc.ans, wordBreak(tc.s, tc.wordDict), "输入:%v", tc)
}
}
func Benchmark_wordBreak(b *testing.B) {
for i := 0; i < b.N; i++ {
for _, tc := range tcs {
wordBreak(tc.s, tc.wordDict)
}
}
}
|
package main
import (
"fmt"
"io"
"os"
"github.com/urfave/cli"
"github.com/polydawn/refmt/cbor"
"github.com/polydawn/refmt/json"
"github.com/polydawn/refmt/pretty"
"github.com/polydawn/refmt/shared"
)
func main() {
os.Exit(Main(os.Args, os.Stdin, os.Stdout, os.Stderr))
}
func Main(args []string, stdin io.Reader, stdout, stderr io.Writer) int {
app := cli.NewApp()
app.Name = "refmt"
app.Authors = []cli.Author{
cli.Author{Name: "Eric Myhre", Email: "hash@exultant.us"},
}
app.Commands = []cli.Command{
//
// Prettyprinters
//
cli.Command{
Category: "prettyprint",
Name: "json=pretty",
Usage: "read json, then pretty print it",
Action: func(c *cli.Context) error {
return shared.TokenPump{
json.NewDecoder(stdin),
pretty.NewEncoder(stdout),
}.Run()
},
},
cli.Command{
Category: "prettyprint",
Name: "cbor=pretty",
Usage: "read cbor, then pretty print it",
Action: func(c *cli.Context) error {
return shared.TokenPump{
cbor.NewDecoder(cbor.DecodeOptions{}, stdin),
pretty.NewEncoder(stdout),
}.Run()
},
},
cli.Command{
Category: "prettyprint",
Name: "cbor.hex=pretty",
Usage: "read cbor in hex, then pretty print it",
Action: func(c *cli.Context) error {
return shared.TokenPump{
cbor.NewDecoder(cbor.DecodeOptions{}, hexReader(stdin)),
pretty.NewEncoder(stdout),
}.Run()
},
},
cli.Command{
Category: "prettyprint",
Name: "yaml=pretty",
Usage: "read yaml, then pretty print it",
Action: func(c *cli.Context) error {
return shared.TokenPump{
newYamlTokenSource(stdin),
pretty.NewEncoder(stdout),
}.Run()
},
},
//
// Converters
//
cli.Command{
Category: "convert",
Name: "json=cbor",
Usage: "read json, emit equivalent cbor",
Action: func(c *cli.Context) error {
return shared.TokenPump{
json.NewDecoder(stdin),
cbor.NewEncoder(stdout),
}.Run()
},
},
cli.Command{
Category: "convert",
Name: "json=cbor.hex",
Usage: "read json, emit equivalent cbor in hex",
Action: func(c *cli.Context) error {
return shared.TokenPump{
json.NewDecoder(stdin),
cbor.NewEncoder(hexWriter{stdout}),
}.Run()
},
},
cli.Command{
Category: "convert",
Name: "cbor=json",
Usage: "read cbor, emit equivalent json",
Action: func(c *cli.Context) error {
return shared.TokenPump{
cbor.NewDecoder(cbor.DecodeOptions{}, stdin),
json.NewEncoder(stdout, json.EncodeOptions{}),
}.Run()
},
},
cli.Command{
Category: "convert",
Name: "cbor.hex=json",
Usage: "read cbor in hex, emit equivalent json",
Action: func(c *cli.Context) error {
return shared.TokenPump{
cbor.NewDecoder(cbor.DecodeOptions{}, hexReader(stdin)),
json.NewEncoder(stdout, json.EncodeOptions{}),
}.Run()
},
},
cli.Command{
Category: "convert",
Name: "yaml=json",
Usage: "read yaml, emit equivalent json",
Action: func(c *cli.Context) error {
return shared.TokenPump{
newYamlTokenSource(stdin),
json.NewEncoder(stdout, json.EncodeOptions{}),
}.Run()
},
},
cli.Command{
Category: "convert",
Name: "yaml=cbor",
Usage: "read yaml, emit equivalent cbor",
Action: func(c *cli.Context) error {
return shared.TokenPump{
newYamlTokenSource(stdin),
cbor.NewEncoder(stdout),
}.Run()
},
},
cli.Command{
Category: "convert",
Name: "yaml=cbor.hex",
Usage: "read yaml, emit equivalent cbor in hex",
Action: func(c *cli.Context) error {
return shared.TokenPump{
newYamlTokenSource(stdin),
cbor.NewEncoder(hexWriter{stdout}),
}.Run()
},
},
}
app.Writer = stdout
app.ErrWriter = stderr
err := app.Run(args)
if err != nil {
fmt.Fprintf(stderr, "error: %s\n", err)
return 1
}
return 0
}
|
package client
import (
"errors"
"fmt"
"strings"
"github.com/wish/ctl/pkg/client/types"
)
// Helpers for finding a specific pod
func (c *Client) findPod(contexts []string, namespace, name string, options ListOptions) (*types.PodDiscovery, error) {
list, err := c.ListPodsOverContexts(contexts, namespace, options)
if err != nil {
return nil, err
}
var pod types.PodDiscovery
for _, p := range list {
if p.Name == name {
pod = p
break
}
}
if pod.Name != name { // Pod not found
return nil, errors.New("pod not found") // TODO return value
}
return &pod, nil
}
// FindPodWithContainer returns a pod with a container. If multiple containers are available and none is specified, then the first one is returned.
func (c *Client) FindPodWithContainer(contexts []string, namespace, name, optionalContainer string, options ListOptions) (pod *types.PodDiscovery, container string, err error) {
pod, err = c.findPod(contexts, namespace, name, options)
if err != nil {
return
}
if optionalContainer == "" {
if len(pod.Spec.Containers) > 0 {
container = pod.Spec.Containers[0].Name
var s []string
for _, c := range pod.Spec.Containers {
s = append(s, c.Name)
}
fmt.Println("Available containers are:", strings.Join(s, ", "))
fmt.Println("No container specified, defaulting to the first container:", container)
} else {
err = errors.New("there are no containers on this pod")
}
} else {
container = optionalContainer
}
return
}
|
package main
import (
"fmt"
"os/exec"
"github.com/mattn/go-tty"
)
func runTinyGo(dockerImage, currentDir, targetPath string, args []string, verbose, cmdMode bool) error {
cmd := exec.Command(
`docker`, `run`, `-it`, `--rm`,
`-v`, fmt.Sprintf(`%s:/go/%s`, currentDir, targetPath),
`-w`, fmt.Sprintf(`/go/%s`, targetPath),
`-e`, `GOPATH=/go`,
dockerImage,
`tinygo`)
if cmdMode {
cmd = exec.Command(
`docker`, `run`, `-it`, `--rm`,
`-v`, fmt.Sprintf(`%s:/go/%s`, currentDir, targetPath),
`-w`, fmt.Sprintf(`/go/%s`, targetPath),
`-e`, `GOPATH=/go`,
dockerImage,
)
}
cmd.Args = append(cmd.Args, args...)
if verbose {
fmt.Println(cmd)
}
tty, err := tty.Open()
if err != nil {
return err
}
defer tty.Close()
cmd.Stdin = tty.Input()
cmd.Stdout = tty.Output()
cmd.Stderr = tty.Output()
err = cmd.Run()
if err != nil {
return err
}
return nil
}
|
package p2pNetwork
import (
"errors"
"fmt"
"github.com/HNB-ECO/HNB-Blockchain/HNB/config"
"github.com/HNB-ECO/HNB-Blockchain/HNB/p2pNetwork/common"
"github.com/HNB-ECO/HNB-Blockchain/HNB/p2pNetwork/message/bean"
"github.com/HNB-ECO/HNB-Blockchain/HNB/p2pNetwork/message/reqMsg"
"github.com/HNB-ECO/HNB-Blockchain/HNB/p2pNetwork/peer"
"github.com/HNB-ECO/HNB-Blockchain/HNB/p2pNetwork/server"
"math/rand"
"net"
"strconv"
"strings"
"sync"
"time"
)
func NewNetServer() server.P2P {
n := &NetSubServer{
SyncChan: make(chan *bean.MsgPayload, common.CHAN_CAPABILITY),
ConsChan: make(chan *bean.MsgPayload, common.CHAN_CAPABILITY),
}
n.PeerAddrMap.PeerSyncAddress = make(map[string]*peer.Peer)
n.PeerAddrMap.PeerConsAddress = make(map[string]*peer.Peer)
n.init()
return n
}
type NetSubServer struct {
peerConn peer.PeerCom
synclistener net.Listener
conslistener net.Listener
SyncChan chan *bean.MsgPayload
ConsChan chan *bean.MsgPayload
ConnectingNodes
PeerAddrMap
Np *peer.NbrPeers
connectLock sync.Mutex
inConnRecord InConnectionRecord
outConnRecord OutConnectionRecord
OwnAddress string
}
type InConnectionRecord struct {
sync.RWMutex
InConnectingAddrs []string
}
type OutConnectionRecord struct {
sync.RWMutex
OutConnectingAddrs []string
}
type ConnectingNodes struct {
sync.RWMutex
ConnectingAddrs []string
}
type PeerAddrMap struct {
sync.RWMutex
PeerSyncAddress map[string]*peer.Peer
PeerConsAddress map[string]*peer.Peer
}
func (ns *NetSubServer) init() error {
ns.peerConn.SetVersion(common.PROTOCOL_VERSION)
if config.Config.EnableConsensus {
ns.peerConn.SetServices(uint64(common.VERIFY_NODE))
} else {
ns.peerConn.SetServices(uint64(common.SERVICE_NODE))
}
ns.peerConn.SetSyncPort(config.Config.SyncPort)
ns.peerConn.SetConsPort(config.Config.ConsPort)
ns.peerConn.SetRelay(true)
rand.Seed(time.Now().UnixNano())
id := rand.Uint64()
ns.peerConn.SetID(id)
ns.Np = &peer.NbrPeers{}
ns.Np.Init()
logmsg := fmt.Sprintf("peerID:%v, sync port:%v, cons port:%v",
id, config.Config.SyncPort, config.Config.ConsPort)
P2PLog.Info(LOGTABLE_NETWORK, logmsg)
return nil
}
func (ns *NetSubServer) Start() {
ns.startListening()
}
func (ns *NetSubServer) GetVersion() uint32 {
return ns.peerConn.GetVersion()
}
func (ns *NetSubServer) GetID() uint64 {
return ns.peerConn.GetID()
}
func (ns *NetSubServer) SetHeight(height uint64) {
ns.peerConn.SetHeight(height)
}
func (ns *NetSubServer) GetHeight() uint64 {
return ns.peerConn.GetHeight()
}
func (ns *NetSubServer) GetTime() int64 {
t := time.Now()
return t.UnixNano()
}
func (ns *NetSubServer) GetServices() uint64 {
return ns.peerConn.GetServices()
}
func (ns *NetSubServer) GetSyncPort() uint16 {
return ns.peerConn.GetSyncPort()
}
func (ns *NetSubServer) GetConsPort() uint16 {
return ns.peerConn.GetConsPort()
}
func (ns *NetSubServer) GetHttpInfoPort() uint16 {
return ns.peerConn.GetHttpInfoPort()
}
func (ns *NetSubServer) GetRelay() bool {
return ns.peerConn.GetRelay()
}
func (ns *NetSubServer) GetPeer(id uint64) *peer.Peer {
return ns.Np.GetPeer(id)
}
func (ns *NetSubServer) GetNp() *peer.NbrPeers {
return ns.Np
}
func (ns *NetSubServer) GetNeighborAddrs() []common.PeerAddr {
return ns.Np.GetNeighborAddrs()
}
func (ns *NetSubServer) GetConnectionCnt() uint32 {
return ns.Np.GetNbrNodeCnt()
}
func (ns *NetSubServer) AddNbrNode(remotePeer *peer.Peer) {
ns.Np.AddNbrNode(remotePeer)
}
func (ns *NetSubServer) DelNbrNode(id uint64) (*peer.Peer, bool) {
return ns.Np.DelNbrNode(id)
}
func (ns *NetSubServer) GetNeighbors() []*peer.Peer {
return ns.Np.GetNeighbors()
}
func (ns *NetSubServer) NodeEstablished(id uint64) bool {
return ns.Np.NodeEstablished(id)
}
func (ns *NetSubServer) Xmit(msg bean.Message, isCons bool) {
ns.Np.Broadcast(msg, isCons)
}
func (ns *NetSubServer) GetMsgChan(isConsensus bool) chan *bean.MsgPayload {
if isConsensus {
return ns.ConsChan
} else {
return ns.SyncChan
}
}
func (ns *NetSubServer) Send(p *peer.Peer, msg bean.Message, isConsensus bool) error {
if p != nil {
return p.Send(msg, isConsensus)
}
return errors.New("send to a invalid peer")
}
func (ns *NetSubServer) IsPeerEstablished(p *peer.Peer) bool {
if p != nil {
return ns.Np.NodeEstablished(p.GetID())
}
return false
}
func (ns *NetSubServer) Connect(addr string, isConsensus bool) error {
if ns.IsAddrInOutConnRecord(addr) {
P2PLog.Warningf(LOGTABLE_NETWORK, "addr:%s is out conn", addr)
return nil
}
if ns.IsOwnAddress(addr) {
return nil
}
if !ns.AddrValid(addr) {
return nil
}
ns.connectLock.Lock()
connCount := uint(ns.GetOutConnRecordLen())
if connCount >= config.Config.MaxConnOutBound {
P2PLog.Warningf(LOGTABLE_NETWORK, "Connect: out connections(%d) reach the max limit(%d)",
connCount, config.Config.MaxConnOutBound)
ns.connectLock.Unlock()
return errors.New("connect: out connections reach the max limit")
}
ns.connectLock.Unlock()
if ns.IsNbrPeerAddr(addr, isConsensus) {
return nil
}
ns.connectLock.Lock()
if added := ns.AddOutConnectingList(addr); added == false {
p := ns.GetPeerFromAddr(addr)
if p != nil {
if p.SyncLink.Valid() {
//log.Info("node exist in connecting list", addr)
ns.connectLock.Unlock()
return errors.New("node exist in connecting list")
}
}
ns.RemoveFromConnectingList(addr)
}
ns.connectLock.Unlock()
isTls := config.Config.IsPeersTLS
var conn net.Conn
var err error
var remotePeer *peer.Peer
if isTls {
conn, err = TLSDial(addr)
if err != nil {
ns.RemoveFromConnectingList(addr)
//log.Error("connect failed: ", err)
return err
}
} else {
conn, err = nonTLSDial(addr)
if err != nil {
ns.RemoveFromConnectingList(addr)
//log.Error("connect failed: ", err)
return err
}
}
addr = conn.RemoteAddr().String()
msg := fmt.Sprintf("isConsensus:%v peer %s connect with %s with %s",
isConsensus, conn.LocalAddr().String(), conn.RemoteAddr().String(),
conn.RemoteAddr().Network())
P2PLog.Info(LOGTABLE_NETWORK, msg)
if !isConsensus {
ns.AddOutConnRecord(addr)
remotePeer = peer.NewPeer()
ns.AddPeerSyncAddress(addr, remotePeer)
remotePeer.SyncLink.SetAddr(addr)
remotePeer.SyncLink.SetConn(conn)
remotePeer.AttachSyncChan(ns.SyncChan)
go remotePeer.SyncLink.Rx()
remotePeer.SetSyncState(common.HAND)
} else {
remotePeer = peer.NewPeer()
ns.AddPeerConsAddress(addr, remotePeer)
remotePeer.ConsLink.SetAddr(addr)
remotePeer.ConsLink.SetConn(conn)
remotePeer.AttachConsChan(ns.ConsChan)
go remotePeer.ConsLink.Rx()
remotePeer.SetConsState(common.HAND)
}
version := reqMsg.NewVersion(ns, isConsensus, 0)
err = remotePeer.Send(version, isConsensus)
if err != nil {
if !isConsensus {
ns.RemoveFromOutConnRecord(addr)
}
P2PLog.Error(LOGTABLE_NETWORK, err.Error())
return err
}
return nil
}
func (ns *NetSubServer) Halt() {
peers := ns.Np.GetNeighbors()
for _, p := range peers {
p.CloseSync()
p.CloseCons()
}
if ns.synclistener != nil {
ns.synclistener.Close()
}
if ns.conslistener != nil {
ns.conslistener.Close()
}
}
func (ns *NetSubServer) startListening() error {
var err error
syncPort := ns.peerConn.GetSyncPort()
consPort := ns.peerConn.GetConsPort()
err = ns.startSyncListening(syncPort)
if err != nil {
//log.Error("start sync listening fail")
return err
}
err = ns.startConsListening(consPort)
if err != nil {
return err
}
return nil
}
func (ns *NetSubServer) startSyncListening(port uint16) error {
var err error
ns.synclistener, err = createListener(port)
if err != nil {
msg := fmt.Sprintf("failed to create sync listener err: %s", err.Error())
P2PLog.Error(LOGTABLE_NETWORK, msg)
return errors.New("failed to create sync listener")
}
go ns.startSyncAccept(ns.synclistener)
msg := fmt.Sprintf("start listen on sync port %d", port)
P2PLog.Info(LOGTABLE_NETWORK, msg)
return nil
}
func (ns *NetSubServer) startConsListening(port uint16) error {
var err error
ns.conslistener, err = createListener(port)
if err != nil {
msg := fmt.Sprintf("failed to create cons listener err: %s", err.Error())
P2PLog.Error(LOGTABLE_NETWORK, msg)
return errors.New("failed to create cons listener")
}
go ns.startConsAccept(ns.conslistener)
msg := fmt.Sprintf("start listen on consensus port %d", port)
P2PLog.Info(LOGTABLE_NETWORK, msg)
return nil
}
func (ns *NetSubServer) startSyncAccept(listener net.Listener) {
for {
conn, err := listener.Accept()
if err != nil {
msg := fmt.Sprintf("accept sync err:%s", err.Error())
P2PLog.Warning(LOGTABLE_NETWORK, msg)
return
}
if !ns.AddrValid(conn.RemoteAddr().String()) {
P2PLog.Warningf(LOGTABLE_NETWORK, "remote %s not in reserved list, close it ", conn.RemoteAddr())
conn.Close()
continue
}
msg := fmt.Sprintf("remote sync node connect with remote:%s load:%s",
conn.RemoteAddr(), conn.LocalAddr())
P2PLog.Info(LOGTABLE_NETWORK, msg)
if ns.IsAddrInInConnRecord(conn.RemoteAddr().String()) {
conn.Close()
continue
}
syncAddrCount := uint(ns.GetInConnRecordLen())
if syncAddrCount >= config.Config.MaxConnInBound {
msg := fmt.Sprintf("SyncAccept: total connections(%d) reach the max limit(%d), conn closed",
syncAddrCount, config.Config.MaxConnInBound)
P2PLog.Warning(LOGTABLE_NETWORK, msg)
conn.Close()
continue
}
remoteIp, err := common.ParseIPAddr(conn.RemoteAddr().String())
if err != nil {
msg := fmt.Sprintf("parse ip err:%s ", err.Error())
P2PLog.Warning(LOGTABLE_NETWORK, msg)
conn.Close()
continue
}
connNum := ns.GetIpCountInInConnRecord(remoteIp)
if connNum >= config.Config.MaxConnInBoundForSingleIP {
msg := fmt.Sprintf("SyncAccept: connections(%d) with ip(%s) has reach the max limit(%d), "+
"conn closed", connNum, remoteIp, config.Config.MaxConnInBoundForSingleIP)
P2PLog.Warning(LOGTABLE_NETWORK, msg)
conn.Close()
continue
}
remotePeer := peer.NewPeer()
addr := conn.RemoteAddr().String()
ns.AddInConnRecord(addr)
ns.AddPeerSyncAddress(addr, remotePeer)
remotePeer.SyncLink.SetAddr(addr)
remotePeer.SyncLink.SetConn(conn)
remotePeer.AttachSyncChan(ns.SyncChan)
go remotePeer.SyncLink.Rx()
}
}
func (ns *NetSubServer) startConsAccept(listener net.Listener) {
for {
conn, err := listener.Accept()
if err != nil {
msg := fmt.Sprintf("accept cons err:%s", err.Error())
P2PLog.Warning(LOGTABLE_NETWORK, msg)
return
}
if !ns.AddrValid(conn.RemoteAddr().String()) {
// log.Warnf("remote %s not in reserved list, close it ", conn.RemoteAddr())
conn.Close()
continue
}
msg := fmt.Sprintf("remote cons node connect with remote:%s load:%s",
conn.RemoteAddr(), conn.LocalAddr())
P2PLog.Info(LOGTABLE_NETWORK, msg)
remoteIp, err := common.ParseIPAddr(conn.RemoteAddr().String())
if err != nil {
msg := fmt.Sprintf("parse ip err:%s ", err.Error())
P2PLog.Warning(LOGTABLE_NETWORK, msg)
conn.Close()
continue
}
if !ns.IsIPInInConnRecord(remoteIp) {
conn.Close()
continue
}
remotePeer := peer.NewPeer()
addr := conn.RemoteAddr().String()
ns.AddPeerConsAddress(addr, remotePeer)
remotePeer.ConsLink.SetAddr(addr)
remotePeer.ConsLink.SetConn(conn)
remotePeer.AttachConsChan(ns.ConsChan)
go remotePeer.ConsLink.Rx()
}
}
func (ns *NetSubServer) AddOutConnectingList(addr string) (added bool) {
ns.ConnectingNodes.Lock()
defer ns.ConnectingNodes.Unlock()
for _, a := range ns.ConnectingAddrs {
if strings.Compare(a, addr) == 0 {
return false
}
}
ns.ConnectingAddrs = append(ns.ConnectingAddrs, addr)
return true
}
func (ns *NetSubServer) RemoveFromConnectingList(addr string) {
ns.ConnectingNodes.Lock()
defer ns.ConnectingNodes.Unlock()
addrs := ns.ConnectingAddrs[:0]
for _, a := range ns.ConnectingAddrs {
if a != addr {
addrs = append(addrs, a)
}
}
ns.ConnectingAddrs = addrs
}
func (ns *NetSubServer) GetOutConnectingListLen() (count uint) {
ns.ConnectingNodes.RLock()
defer ns.ConnectingNodes.RUnlock()
return uint(len(ns.ConnectingAddrs))
}
func (ns *NetSubServer) IsAddrFromConnecting(addr string) bool {
ns.ConnectingNodes.Lock()
defer ns.ConnectingNodes.Unlock()
for _, a := range ns.ConnectingAddrs {
if strings.Compare(a, addr) == 0 {
return true
}
}
return false
}
func (ns *NetSubServer) GetPeerFromAddr(addr string) *peer.Peer {
var p *peer.Peer
ns.PeerAddrMap.RLock()
defer ns.PeerAddrMap.RUnlock()
p, ok := ns.PeerSyncAddress[addr]
if ok {
return p
}
p, ok = ns.PeerConsAddress[addr]
if ok {
return p
}
return nil
}
func (ns *NetSubServer) IsNbrPeerAddr(addr string, isConsensus bool) bool {
var addrNew string
ns.Np.RLock()
defer ns.Np.RUnlock()
for _, p := range ns.Np.List {
if p.GetSyncState() == common.HAND || p.GetSyncState() == common.HAND_SHAKE ||
p.GetSyncState() == common.ESTABLISH {
if isConsensus {
addrNew = p.ConsLink.GetAddr()
} else {
addrNew = p.SyncLink.GetAddr()
}
if strings.Compare(addrNew, addr) == 0 {
return true
}
}
}
return false
}
func (ns *NetSubServer) AddPeerSyncAddress(addr string, p *peer.Peer) {
ns.PeerAddrMap.Lock()
defer ns.PeerAddrMap.Unlock()
ns.PeerSyncAddress[addr] = p
}
func (ns *NetSubServer) AddPeerConsAddress(addr string, p *peer.Peer) {
ns.PeerAddrMap.Lock()
defer ns.PeerAddrMap.Unlock()
ns.PeerConsAddress[addr] = p
}
func (ns *NetSubServer) RemovePeerSyncAddress(addr string) {
ns.PeerAddrMap.Lock()
defer ns.PeerAddrMap.Unlock()
if _, ok := ns.PeerSyncAddress[addr]; ok {
delete(ns.PeerSyncAddress, addr)
}
}
func (ns *NetSubServer) RemovePeerConsAddress(addr string) {
ns.PeerAddrMap.Lock()
defer ns.PeerAddrMap.Unlock()
if _, ok := ns.PeerConsAddress[addr]; ok {
delete(ns.PeerConsAddress, addr)
}
}
func (ns *NetSubServer) GetPeerSyncAddressCount() (count uint) {
ns.PeerAddrMap.RLock()
defer ns.PeerAddrMap.RUnlock()
return uint(len(ns.PeerSyncAddress))
}
func (ns *NetSubServer) AddInConnRecord(addr string) {
ns.inConnRecord.Lock()
defer ns.inConnRecord.Unlock()
for _, a := range ns.inConnRecord.InConnectingAddrs {
if strings.Compare(a, addr) == 0 {
return
}
}
ns.inConnRecord.InConnectingAddrs = append(ns.inConnRecord.InConnectingAddrs, addr)
}
func (ns *NetSubServer) IsAddrInInConnRecord(addr string) bool {
ns.inConnRecord.RLock()
defer ns.inConnRecord.RUnlock()
for _, a := range ns.inConnRecord.InConnectingAddrs {
if strings.Compare(a, addr) == 0 {
return true
}
}
return false
}
func (ns *NetSubServer) IsIPInInConnRecord(ip string) bool {
ns.inConnRecord.RLock()
defer ns.inConnRecord.RUnlock()
var ipRecord string
for _, addr := range ns.inConnRecord.InConnectingAddrs {
ipRecord, _ = common.ParseIPAddr(addr)
if 0 == strings.Compare(ipRecord, ip) {
return true
}
}
return false
}
func (ns *NetSubServer) RemoveFromInConnRecord(addr string) {
ns.inConnRecord.Lock()
defer ns.inConnRecord.Unlock()
addrs := []string{}
for _, a := range ns.inConnRecord.InConnectingAddrs {
if strings.Compare(a, addr) != 0 {
addrs = append(addrs, a)
}
}
ns.inConnRecord.InConnectingAddrs = addrs
}
func (ns *NetSubServer) GetInConnRecordLen() int {
ns.inConnRecord.RLock()
defer ns.inConnRecord.RUnlock()
return len(ns.inConnRecord.InConnectingAddrs)
}
func (ns *NetSubServer) GetIpCountInInConnRecord(ip string) uint {
ns.inConnRecord.RLock()
defer ns.inConnRecord.RUnlock()
var count uint
var ipRecord string
for _, addr := range ns.inConnRecord.InConnectingAddrs {
ipRecord, _ = common.ParseIPAddr(addr)
if 0 == strings.Compare(ipRecord, ip) {
count++
}
}
return count
}
func (ns *NetSubServer) AddOutConnRecord(addr string) {
ns.outConnRecord.Lock()
defer ns.outConnRecord.Unlock()
for _, a := range ns.outConnRecord.OutConnectingAddrs {
if strings.Compare(a, addr) == 0 {
return
}
}
ns.outConnRecord.OutConnectingAddrs = append(ns.outConnRecord.OutConnectingAddrs, addr)
}
func (ns *NetSubServer) IsAddrInOutConnRecord(addr string) bool {
ns.outConnRecord.RLock()
defer ns.outConnRecord.RUnlock()
for _, a := range ns.outConnRecord.OutConnectingAddrs {
if strings.Compare(a, addr) == 0 {
return true
}
}
return false
}
func (ns *NetSubServer) RemoveFromOutConnRecord(addr string) {
ns.outConnRecord.Lock()
defer ns.outConnRecord.Unlock()
addrs := []string{}
for _, a := range ns.outConnRecord.OutConnectingAddrs {
if strings.Compare(a, addr) != 0 {
addrs = append(addrs, a)
}
}
ns.outConnRecord.OutConnectingAddrs = addrs
}
func (ns *NetSubServer) GetOutConnRecordLen() int {
ns.outConnRecord.RLock()
defer ns.outConnRecord.RUnlock()
return len(ns.outConnRecord.OutConnectingAddrs)
}
func (ns *NetSubServer) AddrValid(addr string) bool {
//if config.DefConfig.P2PNode.ReservedPeersOnly && len(config.DefConfig.P2PNode.ReservedCfg.ReservedPeers) > 0 {
// for _, ip := range config.DefConfig.P2PNode.ReservedCfg.ReservedPeers {
// if strings.HasPrefix(addr, ip) {
// log.Info("found reserved peer :", addr)
// return true
// }
// }
// return false
//}
return true
}
func (ns *NetSubServer) IsOwnAddress(addr string) bool {
if addr == ns.OwnAddress {
return true
}
return false
}
func (ns *NetSubServer) SetOwnAddress(addr string) {
if addr != ns.OwnAddress {
//log.Infof("set own address %s", addr)
ns.OwnAddress = addr
}
}
func createListener(port uint16) (net.Listener, error) {
var listener net.Listener
var err error
isTls := config.Config.IsPeersTLS
if isTls {
listener, err = initTlsListen(port)
if err != nil {
//log.Error("initTlslisten failed")
return nil, errors.New("initTlslisten failed")
}
} else {
listener, err = initNonTlsListen(port)
if err != nil {
//log.Error("initNonTlsListen failed")
return nil, errors.New("initNonTlsListen failed")
}
}
return listener, nil
}
func nonTLSDial(addr string) (net.Conn, error) {
//log.Debug()
conn, err := net.DialTimeout("tcp", addr, time.Second*common.DIAL_TIMEOUT)
if err != nil {
return nil, err
}
return conn, nil
}
func TLSDial(nodeAddr string) (net.Conn, error) {
//todo
return nil, nil
}
func initNonTlsListen(port uint16) (net.Listener, error) {
listener, err := net.Listen("tcp", ":"+strconv.Itoa(int(port)))
if err != nil {
// log.Error("Error listening\n", err.Error())
return nil, err
}
return listener, nil
}
func initTlsListen(port uint16) (net.Listener, error) {
//todo
return nil, nil
}
|
package data
type IpInfo struct {
Ip string `json:"ip"`
}
type WeatherInfo struct {
Temp float32 `json:"temp"`
Pressure float32 `json:"pressure"`
Day bool `json:"day"`
Humidity float32 `json:"humid"`
Lux float32 `json:"lux"`
LastPressure float32 `json:"lastPressure"`
Date string `json:"date"`
}
|
package main_test
import (
. "thesaurus_similarity"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Document", func() {
It("should return random elements that do no repeat", func() {
doc := []Pair{
Pair{
Word: "firstWord",
WordDescription: WordDescription{},
},
Pair{
Word: "secondWord",
WordDescription: WordDescription{},
},
Pair{
Word: "thirdWord",
WordDescription: WordDescription{},
},
}
document := NewDocument(doc)
seenElements := []Pair{}
seenElements = append(seenElements, document.GetNextRandomKeyword())
nextRandomElement := document.GetNextRandomKeyword()
Ω(sliceContainsPair(seenElements, nextRandomElement)).Should(BeFalse())
seenElements = append(seenElements, nextRandomElement)
nextRandomElement = document.GetNextRandomKeyword()
Ω(sliceContainsPair(seenElements, nextRandomElement)).Should(BeFalse())
})
})
func sliceContainsPair(slice []Pair, pair Pair) bool {
for _, elem := range slice {
if elem == pair {
return true
}
}
return false
} |
package database
type TbEdRX2 struct {
ID uint
EdID uint
DCC int `gorm:"column:dCC"`
DFC int `gorm:"column:dFC"`
SF int `gorm:"column:SF"`
}
func (TbEdRX2) TableName() string {
return "tb_ed_rx2"
}
|
package carfinder
import (
"github.com/iLLeniumStudios/FiveMCarsMerger/pkg/flags"
sliceutils "github.com/iLLeniumStudios/FiveMCarsMerger/pkg/utils/slice"
log "github.com/sirupsen/logrus"
"io/ioutil"
"os"
"regexp"
"strings"
)
type CarFinder interface {
FindValidCars(dataFileCars []string, streamFileCars []string) []string
FindStreamFileCars() ([]string, error)
FindDataFileCars() ([]string, error)
}
type carFinder struct {
Flags flags.Flags
}
func New(_flags flags.Flags) CarFinder {
return &carFinder{Flags: _flags}
}
func (cf *carFinder) FindValidCars(dataFileCars []string, streamFileCars []string) []string {
var validCars []string
var noStreamCars, noDataCars []string
for _, dataFileCar := range dataFileCars {
if sliceutils.ContainsElement(streamFileCars, dataFileCar) {
validCars = append(validCars, dataFileCar)
} else {
noStreamCars = append(noStreamCars, dataFileCar)
}
}
for _, streamFileCar := range streamFileCars {
if sliceutils.ContainsElement(dataFileCars, streamFileCar) {
validCars = append(validCars, streamFileCar)
} else {
noDataCars = append(noDataCars, streamFileCar)
}
}
if len(noStreamCars) > 0 {
log.Warn("Following cars have no stream files: ", noStreamCars)
}
if len(noDataCars) > 0 {
log.Warn("Following cars have no data files: ", noDataCars)
}
return validCars
}
func (cf *carFinder) FindStreamFileCars() ([]string, error) {
var streamFileCars []string
outputStreamPath := cf.Flags.OutputPath + "/stream"
files, err := ioutil.ReadDir(outputStreamPath)
if err != nil {
return nil, err
}
for _, file := range files {
if strings.HasSuffix(file.Name(), ".yft") && !strings.Contains(file.Name(), "_") {
streamFileCars = append(streamFileCars, strings.ToLower(file.Name()[:len(file.Name())-4]))
}
}
return streamFileCars, nil
}
func (cf *carFinder) FindDataFileCars() ([]string, error) {
var dataFileCars []string
outputDataPath := cf.Flags.OutputPath + "/data/vehicles/"
files, err := ioutil.ReadDir(outputDataPath)
if err != nil {
return nil, err
}
for _, file := range files {
if strings.HasPrefix(file.Name(), "vehicles_") {
xmlFile, err := os.Open(outputDataPath + "/" + file.Name())
if err != nil {
return nil, err
}
byteValue, err := ioutil.ReadAll(xmlFile)
if err != nil {
return nil, err
}
re1 := regexp.MustCompile(`<modelName.*?>(.*)</modelName>`)
matches := re1.FindAllStringSubmatch(string(byteValue), -1)
for _, v := range matches {
if !sliceutils.ContainsElement(dataFileCars, strings.ToLower(v[1])) {
dataFileCars = append(dataFileCars, strings.ToLower(v[1]))
}
}
err = xmlFile.Close()
if err != nil {
return nil, err
}
}
}
return dataFileCars, nil
}
|
package main
import (
"github.com/micro/go-micro"
"log"
"moriaty.com/cia/cia-publisher/service"
)
/**
* @author 16计算机 Moriaty
* @version 1.0
* @copyright :Moriaty 版权所有 © 2020
* @date 2020/4/19 18:40
* @Description TODO
* CIA-Publisher
*/
/**
1、拉取 apk
1. 获取拉取配置
2. 根据配置拉取 apk
2、整合 zip
1. 根据配置将 apk 整合成 zip
2. 调用任务生成
*/
func main() {
server := micro.NewService(
micro.Name("publisher"),
)
server.Init()
service.Init(server)
service.PullHandleApk()
select {}
}
func init() {
log.SetPrefix("【CIA-Publisher】")
log.SetFlags(log.Lshortfile)
}
|
package validator
import (
"fmt"
"strconv"
"strings"
)
//ValidateCpf from https://github.com/miguelpragier/handy/blob/master/handybra.go
func ValidateCpf(cpf string) bool {
// Se o comprimento da string estiver diferente de 11, falhar
if len(cpf) != 11 {
return false
}
// Testa seqüências de 11 dígitos iguais, cujo cálculo é válido mas são inaceitáveis como documento.
for i := 0; i <= 9; i++ {
if cpf == strings.Repeat(fmt.Sprintf("%d", i), 11) {
return false
}
}
intval := func(b byte) int {
i, _ := strconv.Atoi(string(b))
return i
}
soma1 := intval(cpf[0])*10 + intval(cpf[1])*9 + intval(cpf[2])*8 + intval(cpf[3])*7 + intval(cpf[4])*6 + intval(cpf[5])*5 + intval(cpf[6])*4 + intval(cpf[7])*3 + intval(cpf[8])*2
resto1 := (soma1 * 10) % 11
if resto1 == 10 {
resto1 = 0
}
soma2 := intval(cpf[0])*11 + intval(cpf[1])*10 + intval(cpf[2])*9 + intval(cpf[3])*8 + intval(cpf[4])*7 + intval(cpf[5])*6 + intval(cpf[6])*5 + intval(cpf[7])*4 + intval(cpf[8])*3 + intval(cpf[9])*2
resto2 := (soma2 * 10) % 11
if resto2 == 10 {
resto2 = 0
}
return resto1 == intval(cpf[9]) && resto2 == intval(cpf[10])
}
//ValidateCNPJ from https://github.com/miguelpragier/handy/blob/master/handybra.go
func ValidateCNPJ(cnpj string) bool {
if len(cnpj) != 14 {
return false
}
algs := []int{5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2}
algProdCpfDig1 := make([]int, 12)
for key, val := range algs {
intParsed, _ := strconv.Atoi(string(cnpj[key]))
sumTmp := val * intParsed
algProdCpfDig1[key] = sumTmp
}
sum := 0
for _, val := range algProdCpfDig1 {
sum += val
}
digit1 := sum % 11
if digit1 < 2 {
digit1 = 0
} else {
digit1 = 11 - digit1
}
char12, _ := strconv.Atoi(string(cnpj[12]))
if char12 != digit1 {
return false
}
algs = append([]int{6}, algs...)
var algProdCpfDig2 = make([]int, 13)
for key, val := range algs {
intParsed, _ := strconv.Atoi(string(cnpj[key]))
sumTmp := val * intParsed
algProdCpfDig2[key] = sumTmp
}
sum = 0
for _, val := range algProdCpfDig2 {
sum += val
}
digit2 := sum % 11
if digit2 < 2 {
digit2 = 0
} else {
digit2 = 11 - digit2
}
char13, _ := strconv.Atoi(string(cnpj[13]))
return char13 == digit2
}
|
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testutil
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/cloudformation"
"github.com/aws/aws-sdk-go/service/cloudformation/cloudformationiface"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
var log = logrus.New()
func PathToOSFile(relativePath string) (*os.File, error) {
path, err := filepath.Abs(relativePath)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("failed generate absolute file path of %s", relativePath))
}
manifest, err := os.Open(path)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("failed to open file %s", path))
}
return manifest, nil
}
func KubectlApply(manifestRelativePath string) error {
kubectlBinaryPath, err := exec.LookPath("kubectl")
if err != nil {
panic(err)
}
path, err := filepath.Abs(manifestRelativePath)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("failed generate absolut file path of %s", manifestRelativePath))
}
applyArgs := []string{"apply", "-f", path}
cmd := exec.Command(kubectlBinaryPath, applyArgs...)
log.Printf("Executing: %v %v", kubectlBinaryPath, applyArgs)
err = cmd.Start()
if err != nil {
return errors.Wrap(err, fmt.Sprintf("Could not exec kubectl: "))
}
err = cmd.Wait()
if err != nil {
return errors.Wrap(err, fmt.Sprintf("Command resulted in error: "))
}
return nil
}
func isNodeReady(n corev1.Node) bool {
for _, condition := range n.Status.Conditions {
if condition.Type == "Ready" {
if condition.Status == "True" {
return true
}
}
}
return false
}
func WaitForNodesDelete(k kubernetes.Interface, role string) bool {
// poll every 20 seconds
var pollingInterval = time.Second * 20
// timeout after 24 occurrences = 480 seconds = 8 minutes
var timeoutCounter = 24
var pollingCounter = 0
var labelSelector = fmt.Sprintf("node-role.kubernetes.io/%v=", role)
for {
log.Printf("waiting for nodes, attempt %v/%v", pollingCounter, timeoutCounter)
nodeList, _ := k.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: labelSelector})
nodeCount := len(nodeList.Items)
if nodeCount == 0 {
return true
}
time.Sleep(pollingInterval)
log.Println("nodes did not terminate yet, retrying")
pollingCounter++
if pollingCounter == timeoutCounter {
break
}
}
return false
}
func IsStackExist(w cloudformationiface.CloudFormationAPI, stackName string) bool {
out, _ := w.DescribeStacks(&cloudformation.DescribeStacksInput{StackName: aws.String(stackName)})
stackCount := len(out.Stacks)
if stackCount != 0 {
return true
}
return false
}
func GetStackState(w cloudformationiface.CloudFormationAPI, stackName string) string {
out, err := w.DescribeStacks(&cloudformation.DescribeStacksInput{StackName: aws.String(stackName)})
if err != nil {
log.Println(err)
return ""
}
for _, stack := range out.Stacks {
scanStackName := aws.StringValue(stack.StackName)
stackStatus := aws.StringValue(stack.StackStatus)
log.Printf("Stack %v state is: %v", scanStackName, stackStatus)
if scanStackName == stackName {
return stackStatus
}
}
return ""
}
func WaitForNodesCreate(k kubernetes.Interface, role string, expectedReadyCount int) bool {
// poll every 40 seconds
var pollingInterval = time.Second * 40
// timeout after 24 occurrences = 960 seconds = 16 minutes
var timeoutCounter = 24
var pollingCounter = 0
var labelSelector = fmt.Sprintf("node-role.kubernetes.io/%v=", role)
for {
log.Printf("waiting for nodes, attempt %v/%v", pollingCounter, timeoutCounter)
nodeList, _ := k.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: labelSelector})
nodeCount := len(nodeList.Items)
var seenReady = 0
if nodeCount == expectedReadyCount {
for _, node := range nodeList.Items {
log.Printf("found %v", node.ObjectMeta.Name)
if !isNodeReady(node) {
log.Printf("%v is not ready", node.ObjectMeta.Name)
break
} else {
seenReady++
}
}
if seenReady == expectedReadyCount {
return true
}
}
log.Println("nodes did not join yet, retrying")
time.Sleep(pollingInterval)
pollingCounter++
if pollingCounter == timeoutCounter {
break
}
}
return false
}
func WaitForNodesRotate(k kubernetes.Interface, role string) bool {
var initialNodeNames []string
var pollingInterval = time.Second * 30
var timeoutCounter = 48
var pollingCounter = 0
var labelSelector = fmt.Sprintf("node-role.kubernetes.io/%v=", role)
initialNodes, _ := k.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: labelSelector})
for _, node := range initialNodes.Items {
initialNodeNames = append(initialNodeNames, node.Name)
}
log.Printf("Found nodes %v, waiting for rotation", initialNodeNames)
for {
var scannedNodeNames []string
var nodeMatch int
log.Printf("waiting for rotation, attempt %v/%v", pollingCounter, timeoutCounter)
nodeList, _ := k.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: labelSelector})
for _, node := range nodeList.Items {
scannedNodeNames = append(scannedNodeNames, node.Name)
}
log.Printf("found nodes %v, comparing to %v", scannedNodeNames, initialNodeNames)
if initialNodeNames != nil && scannedNodeNames != nil {
if len(initialNodeNames) == len(scannedNodeNames) {
for _, value := range initialNodeNames {
if ContainsString(scannedNodeNames, value) {
nodeMatch++
}
}
if nodeMatch == 0 {
return true
}
}
}
time.Sleep(pollingInterval)
log.Println("nodes did not rotate yet, retrying")
pollingCounter++
if pollingCounter == timeoutCounter {
break
}
}
return false
}
func ContainsString(slice []string, s string) bool {
for _, item := range slice {
if item == s {
return true
}
}
return false
}
|
package server
import (
"bufio"
"os"
"server/libs/log"
"server/libs/rpc"
"text/template"
)
var (
remotes = make(map[string]interface{})
handlers = make(map[string]interface{})
)
func GetRemote(name string) interface{} {
if k, ok := remotes[name]; ok {
return k
}
return nil
}
func GetHandler(name string) interface{} {
if k, ok := handlers[name]; ok {
return k
}
return nil
}
func GetAllHandler() map[string]interface{} {
return handlers
}
func RegisterRemote(name string, remote interface{}) {
if remote == nil {
log.LogFatalf("rpc: Register remote is nil")
}
if _, dup := remotes[name]; dup {
log.LogFatalf("rpc: Register called twice for remote " + name)
}
remotes[name] = remote
}
func RegisterHandler(name string, handler interface{}) {
if handler == nil {
log.LogFatalf("rpc: Register handler is nil")
}
if _, dup := handlers[name]; dup {
log.LogFatalf("rpc: Register called twice for handler " + name)
}
handlers[name] = handler
}
var (
tpl = `<rpc>{{range .Infos}}
<call>
<service>{{.RpcService}}</service>
<method>{{.RpcMethod}}</method>
<id>{{.Id}}</id>
</call>{{end}}
</rpc>`
)
type RpcInfo struct {
RpcService string
RpcMethod string
Id int16
}
type RpcCollection struct {
Infos []RpcInfo
}
func createRpc(ch chan *rpc.RpcCall) *rpc.Server {
rpc, err := rpc.CreateRpcService(remotes, handlers, ch)
if err != nil {
log.LogFatalf(err)
}
id := 0
var collection RpcCollection
for service, _ := range handlers {
if service != "C2SHelper" {
id++
sid := id * 100
info := rpc.GetRpcInfo("C2S" + service)
for _, m := range info {
sid++
rinfo := RpcInfo{service, m, int16(sid)}
collection.Infos = append(collection.Infos, rinfo)
}
}
}
if len(collection.Infos) > 0 {
t, err := template.New("maker").Parse(tpl)
if err != nil {
log.LogError(err.Error())
}
if err != nil {
log.LogError("template", err)
}
//save file
file, err := os.Create("interface/" + core.Name + "_rpc.xml")
if err != nil {
log.LogError("writer", err)
return rpc
}
defer file.Close()
writer := bufio.NewWriter(file)
err = t.Execute(writer, collection)
if err != nil {
log.LogError("writer", err)
}
writer.Flush()
}
return rpc
}
|
package hutoma
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
)
const HUTOMA_BASE_URL = "https://api.hutoma.ai"
type HutomaClient struct {
BotID string
DevKey string
ClientKey string
ChatID string
}
func (c *HutomaClient) Chat(query string) (hutomaChatResponse, error) {
query = url.QueryEscape(query) // prepare query
req, err := http.NewRequest("GET", fmt.Sprintf("%s/v1/ai/%s/chat?q=%s&chatId=%s", HUTOMA_BASE_URL, c.BotID, query, c.ChatID), nil)
if err != nil {
return hutomaChatResponse{}, errors.New("failed creating get chat request: " + err.Error())
}
client := http.Client{}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.DevKey))
res, err := client.Do(req)
if err != nil {
return hutomaChatResponse{}, errors.New("failed performing get chat request: " + err.Error())
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return hutomaChatResponse{}, errors.New("failed reading body from response: " + err.Error())
}
var chatRes hutomaChatResponse
err = json.Unmarshal(body, &chatRes)
if err != nil {
fmt.Println(string(body))
return hutomaChatResponse{}, errors.New("failed unmarshaling json response: " + err.Error())
}
if c.ChatID == "" {
c.ChatID = chatRes.ChatID
}
return chatRes, nil
}
|
/* Use the “defer” keyword to show that a deferred func runs after the func containing it exits. */
package main
import "fmt"
func createDbConnection() {
fmt.Println("Openning db connection ...")
}
func closeDbConnection() {
fmt.Println("DB Connection closed!!!")
}
func saveData() {
fmt.Println("Writing some data to DB ...")
}
func main() {
defer closeDbConnection()
createDbConnection()
saveData()
}
|
package elements
import (
"fmt"
"strings"
"github.com/Nv7-Github/Nv7Haven/eod/base"
"github.com/Nv7-Github/Nv7Haven/eod/types"
"github.com/Nv7-Github/Nv7Haven/eod/util"
)
var invalidNames = []string{
"+",
"@everyone",
"@here",
"<@",
"İ",
"\n",
}
var charReplace = map[rune]rune{
'’': '\'',
'‘': '\'',
'`': '\'',
'”': '"',
'“': '"',
}
const maxSuggestionLength = 240
var remove = []string{"\uFE0E", "\uFE0F", "\u200B", "\u200E", "\u200F", "\u2060", "\u2061", "\u2062", "\u2063", "\u2064", "\u2065", "\u2066", "\u2067", "\u2068", "\u2069", "\u206A", "\u206B", "\u206C", "\u206D", "\u206E", "\u206F", "\u3000", "\uFE00", "\uFE01", "\uFE02", "\uFE03", "\uFE04", "\uFE05", "\uFE06", "\uFE07", "\uFE08", "\uFE09", "\uFE0A", "\uFE0B", "\uFE0C", "\uFE0D"}
func (b *Elements) SuggestCmd(suggestion string, autocapitalize bool, m types.Msg, rsp types.Rsp) {
rsp.Acknowledge()
if base.IsFoolsMode && !base.IsFool(suggestion) {
rsp.ErrorMessage(base.MakeFoolResp(suggestion))
return
}
if autocapitalize && strings.ToLower(suggestion) == suggestion {
suggestion = util.ToTitle(suggestion)
}
if strings.HasPrefix(suggestion, "?") {
rsp.ErrorMessage("Element names can't start with '?'!")
return
}
if len(suggestion) >= maxSuggestionLength {
rsp.ErrorMessage(fmt.Sprintf("Element names must be under %d characters!", maxSuggestionLength))
return
}
for _, name := range invalidNames {
if strings.Contains(suggestion, name) {
rsp.ErrorMessage(fmt.Sprintf("Can't have letters '%s' in an element name!", name))
return
}
}
// Clean up suggestions with weird quotes
cleaned := []rune(suggestion)
for i, char := range cleaned {
newVal, exists := charReplace[char]
if exists {
cleaned[i] = newVal
}
}
suggestion = string(cleaned)
for _, val := range remove {
suggestion = strings.ReplaceAll(suggestion, val, "")
}
suggestion = strings.TrimSpace(suggestion)
if len(suggestion) > 1 && suggestion[0] == '#' {
suggestion = suggestion[1:]
}
if len(suggestion) == 0 {
rsp.Resp("You need to suggest something!")
return
}
b.lock.RLock()
dat, exists := b.dat[m.GuildID]
b.lock.RUnlock()
if !exists {
rsp.ErrorMessage("Guild not set up!")
return
}
_, exists = dat.PlayChannels[m.ChannelID]
if !exists {
rsp.ErrorMessage("You can only suggest in play channels!")
return
}
comb, res := dat.GetComb(m.Author.ID)
if !res.Exists {
rsp.ErrorMessage(res.Message)
return
}
data := util.Elems2Txt(comb.Elems)
_, res = dat.GetCombo(data)
if res.Exists {
rsp.ErrorMessage("That combo already has a result!")
return
}
el, res := dat.GetElement(suggestion)
if res.Exists {
suggestion = el.Name
}
_, res = dat.GetElement(suggestion)
err := b.polls.CreatePoll(types.Poll{
Channel: dat.VotingChannel,
Guild: m.GuildID,
Kind: types.PollCombo,
Value3: suggestion,
Value4: m.Author.ID,
Data: map[string]interface{}{"elems": comb.Elems, "exists": res.Exists},
Upvotes: 0,
Downvotes: 0,
})
if rsp.Error(err) {
return
}
txt := "Suggested **"
for _, val := range comb.Elems {
el, _ := dat.GetElement(val)
txt += el.Name + " + "
}
txt = txt[:len(txt)-3]
if len(comb.Elems) == 1 {
el, _ := dat.GetElement(comb.Elems[0])
txt += " + " + el.Name
}
txt += " = " + suggestion + "** "
if !res.Exists {
txt += "✨"
} else {
txt += "🌟"
}
id := rsp.Message(txt)
dat.SetMsgElem(id, suggestion)
b.lock.Lock()
b.dat[m.GuildID] = dat
b.lock.Unlock()
}
|
package main
import (
"flag"
"fmt"
"github.com/jccroft1/KeychronChecker/keychron"
"github.com/jccroft1/KeychronChecker/telegram"
)
var (
token = flag.String("token", "", "the bot token")
channel = flag.String("channel", "", "the target channel for the stock alert")
)
func main() {
flag.Parse()
telegram.Token = *token
telegram.Channel = *channel
err := telegram.GetMe()
if err != nil {
fmt.Println(err)
return
}
keychron.Alert = telegram.SendMessage
keychron.Start()
}
|
package main
import (
"fmt"
)
func plusTwo() func(int) int {
f := func(x int) int {
return x + 2
}
return f
}
func plusX(x int) func(int) int {
f := func(y int) int {
return x + y
}
return f
}
func main() {
p := plusTwo()
fmt.Printf("%v\n", p(2))
q := plusX(2)
fmt.Printf("%v\n", q(7))
}
|
/*
Copyright 2021 CodeNotary, Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"context"
"os"
"testing"
"github.com/codenotary/immudb/embedded/sql"
"github.com/codenotary/immudb/pkg/api/schema"
"github.com/codenotary/immudb/pkg/server"
"github.com/codenotary/immudb/pkg/server/servertest"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
func TestImmuClient_SQL(t *testing.T) {
options := server.DefaultOptions().WithAuth(true)
bs := servertest.NewBufconnServer(options)
defer os.RemoveAll(options.Dir)
defer os.Remove(".state-")
bs.Start()
defer bs.Stop()
client, err := NewImmuClient(DefaultOptions().WithDialOptions(&[]grpc.DialOption{grpc.WithContextDialer(bs.Dialer), grpc.WithInsecure()}))
require.NoError(t, err)
lr, err := client.Login(context.TODO(), []byte(`immudb`), []byte(`immudb`))
require.NoError(t, err)
md := metadata.Pairs("authorization", lr.Token)
ctx := metadata.NewOutgoingContext(context.Background(), md)
_, err = client.SQLExec(ctx, `CREATE TABLE table1(
id INTEGER,
title VARCHAR,
active BOOLEAN,
payload BLOB,
PRIMARY KEY id
);`, nil)
require.NoError(t, err)
params := make(map[string]interface{})
params["id"] = 1
params["title"] = "title1"
params["active"] = true
params["payload"] = []byte{1, 2, 3}
_, err = client.SQLExec(ctx, "INSERT INTO table1(id, title, active, payload) VALUES (@id, @title, @active, @payload), (2, 'title2', false, NULL), (3, NULL, NULL, x'AED0393F')", params)
require.NoError(t, err)
res, err := client.SQLQuery(ctx, "SELECT t.id as id, title FROM (table1 as t) WHERE id <= 3 AND active = @active", params, true)
require.NoError(t, err)
require.NotNil(t, res)
_, err = client.SQLQuery(ctx, "SELECT id as uuid FROM table1", nil, true)
require.NoError(t, err)
for _, row := range res.Rows {
err := client.VerifyRow(ctx, row, "table1", row.Values[0])
require.Equal(t, sql.ErrColumnDoesNotExist, err)
}
res, err = client.SQLQuery(ctx, "SELECT id, title, active, payload FROM table1 WHERE id <= 3 AND active = @active", params, true)
require.NoError(t, err)
require.NotNil(t, res)
for _, row := range res.Rows {
err := client.VerifyRow(ctx, row, "table1", row.Values[0])
require.NoError(t, err)
row.Values[1].Value = &schema.SQLValue_S{S: "tampered title"}
err = client.VerifyRow(ctx, row, "table1", row.Values[0])
require.Equal(t, sql.ErrCorruptedData, err)
}
res, err = client.SQLQuery(ctx, "SELECT id, active FROM table1", nil, true)
require.NoError(t, err)
require.NotNil(t, res)
for _, row := range res.Rows {
err := client.VerifyRow(ctx, row, "table1", row.Values[0])
require.NoError(t, err)
}
res, err = client.SQLQuery(ctx, "SELECT active FROM table1 WHERE id = 1", nil, true)
require.NoError(t, err)
require.NotNil(t, res)
for _, row := range res.Rows {
err := client.VerifyRow(ctx, row, "table1", &schema.SQLValue{Value: &schema.SQLValue_N{N: 1}})
require.NoError(t, err)
}
}
|
package binance_websocket
import (
. "exchange_websocket/common"
"strings"
)
// Binance symbols
type BinanceSymbol struct {
BinanceUsdtSymbol []string
BinanceBtcSymbol []string
BinanceEthSymbol []string
BinanceSymbols []string
}
func NewBinanceSymbol() *BinanceSymbol {
ba := new(BinanceSymbol)
return ba.binanceSymbolInit()
}
func (o *BinanceSymbol) binanceSymbolInit() *BinanceSymbol {
for _, symbol := range append(CommonUsdt, BinanceUsdt...) {
//[symbol.replace('_', '').lower() for symbol in self.USDT]
o.BinanceUsdtSymbol = append(o.BinanceUsdtSymbol, strings.ToLower(strings.Replace(symbol, "_", "", -1)))
}
for _, symbol := range append(CommonBtc, BinanceBtc...) {
o.BinanceBtcSymbol = append(o.BinanceBtcSymbol, strings.ToLower(strings.Replace(symbol, "_", "", -1)))
}
for _, symbol := range append(CommonEth, BinanceEth...) {
o.BinanceEthSymbol = append(o.BinanceEthSymbol, strings.ToLower(strings.Replace(symbol, "_", "", -1)))
}
o.BinanceSymbols = append(o.BinanceUsdtSymbol, append(o.BinanceBtcSymbol, o.BinanceEthSymbol...)...)
return o
}
func (o *BinanceSymbol) BinanceSymbolTransfer(symbol string) string {
isExist1, _ := Contain(symbol, o.BinanceUsdtSymbol)
if isExist1 {
return strings.ToUpper(strings.Replace(symbol, "usdt", "_usdt", -1))
}
isExist2, _ := Contain(symbol, o.BinanceBtcSymbol)
if isExist2 {
return strings.ToUpper(strings.Replace(symbol, "btc", "_btc", -1))
}
isExist3, _ := Contain(symbol, o.BinanceEthSymbol)
if isExist3 {
return strings.ToUpper(strings.Replace(symbol, "eth", "_eth", -1))
}
return ""
}
|
package collectors
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
const MEMSTATFILE = "/proc/meminfo"
const NUMA_MEMSTAT_BASE = "/sys/devices/system/node"
type MemstatCollectorConfig struct {
ExcludeMetrics []string `json:"exclude_metrics"`
NodeStats bool `json:"node_stats,omitempty"`
NumaStats bool `json:"numa_stats,omitempty"`
}
type MemstatCollectorNode struct {
file string
tags map[string]string
}
type MemstatCollector struct {
metricCollector
stats map[string]int64
tags map[string]string
matches map[string]string
config MemstatCollectorConfig
nodefiles map[int]MemstatCollectorNode
sendMemUsed bool
}
type MemstatStats struct {
value float64
unit string
}
func getStats(filename string) map[string]MemstatStats {
stats := make(map[string]MemstatStats)
file, err := os.Open(filename)
if err != nil {
cclog.Error(err.Error())
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
linefields := strings.Fields(line)
if len(linefields) == 3 {
v, err := strconv.ParseFloat(linefields[1], 64)
if err == nil {
stats[strings.Trim(linefields[0], ":")] = MemstatStats{
value: v,
unit: linefields[2],
}
}
} else if len(linefields) == 5 {
v, err := strconv.ParseFloat(linefields[3], 64)
if err == nil {
cclog.ComponentDebug("getStats", strings.Trim(linefields[2], ":"), v, linefields[4])
stats[strings.Trim(linefields[2], ":")] = MemstatStats{
value: v,
unit: linefields[4],
}
}
}
}
return stats
}
func (m *MemstatCollector) Init(config json.RawMessage) error {
var err error
m.name = "MemstatCollector"
m.parallel = true
m.config.NodeStats = true
m.config.NumaStats = false
if len(config) > 0 {
err = json.Unmarshal(config, &m.config)
if err != nil {
return err
}
}
m.meta = map[string]string{"source": m.name, "group": "Memory"}
m.stats = make(map[string]int64)
m.matches = make(map[string]string)
m.tags = map[string]string{"type": "node"}
matches := map[string]string{
"MemTotal": "mem_total",
"SwapTotal": "swap_total",
"SReclaimable": "mem_sreclaimable",
"Slab": "mem_slab",
"MemFree": "mem_free",
"Buffers": "mem_buffers",
"Cached": "mem_cached",
"MemAvailable": "mem_available",
"SwapFree": "swap_free",
"MemShared": "mem_shared",
}
for k, v := range matches {
_, skip := stringArrayContains(m.config.ExcludeMetrics, k)
if !skip {
m.matches[k] = v
}
}
m.sendMemUsed = false
if _, skip := stringArrayContains(m.config.ExcludeMetrics, "mem_used"); !skip {
m.sendMemUsed = true
}
if len(m.matches) == 0 {
return errors.New("no metrics to collect")
}
m.setup()
if m.config.NodeStats {
if stats := getStats(MEMSTATFILE); len(stats) == 0 {
return fmt.Errorf("cannot read data from file %s", MEMSTATFILE)
}
}
if m.config.NumaStats {
globPattern := filepath.Join(NUMA_MEMSTAT_BASE, "node[0-9]*", "meminfo")
regex := regexp.MustCompile(filepath.Join(NUMA_MEMSTAT_BASE, "node(\\d+)", "meminfo"))
files, err := filepath.Glob(globPattern)
if err == nil {
m.nodefiles = make(map[int]MemstatCollectorNode)
for _, f := range files {
if stats := getStats(f); len(stats) == 0 {
return fmt.Errorf("cannot read data from file %s", f)
}
rematch := regex.FindStringSubmatch(f)
if len(rematch) == 2 {
id, err := strconv.Atoi(rematch[1])
if err == nil {
f := MemstatCollectorNode{
file: f,
tags: map[string]string{
"type": "memoryDomain",
"type-id": fmt.Sprintf("%d", id),
},
}
m.nodefiles[id] = f
}
}
}
}
}
m.init = true
return err
}
func (m *MemstatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
sendStats := func(stats map[string]MemstatStats, tags map[string]string) {
for match, name := range m.matches {
var value float64 = 0
var unit string = ""
if v, ok := stats[match]; ok {
value = v.value
if len(v.unit) > 0 {
unit = v.unit
}
}
y, err := lp.New(name, tags, m.meta, map[string]interface{}{"value": value}, time.Now())
if err == nil {
if len(unit) > 0 {
y.AddMeta("unit", unit)
}
output <- y
}
}
if m.sendMemUsed {
memUsed := 0.0
unit := ""
if totalVal, total := stats["MemTotal"]; total {
if freeVal, free := stats["MemFree"]; free {
memUsed = totalVal.value - freeVal.value
if len(totalVal.unit) > 0 {
unit = totalVal.unit
} else if len(freeVal.unit) > 0 {
unit = freeVal.unit
}
if bufVal, buffers := stats["Buffers"]; buffers {
memUsed -= bufVal.value
if len(bufVal.unit) > 0 && len(unit) == 0 {
unit = bufVal.unit
}
if cacheVal, cached := stats["Cached"]; cached {
memUsed -= cacheVal.value
if len(cacheVal.unit) > 0 && len(unit) == 0 {
unit = cacheVal.unit
}
}
}
}
}
y, err := lp.New("mem_used", tags, m.meta, map[string]interface{}{"value": memUsed}, time.Now())
if err == nil {
if len(unit) > 0 {
y.AddMeta("unit", unit)
}
output <- y
}
}
}
if m.config.NodeStats {
nodestats := getStats(MEMSTATFILE)
sendStats(nodestats, m.tags)
}
if m.config.NumaStats {
for _, nodeConf := range m.nodefiles {
stats := getStats(nodeConf.file)
sendStats(stats, nodeConf.tags)
}
}
}
func (m *MemstatCollector) Close() {
m.init = false
}
|
package rtrserver
import (
"bytes"
"encoding/binary"
"errors"
"github.com/cpusoft/goutil/belogs"
"github.com/cpusoft/goutil/jsonutil"
)
func ParseToCacheResponse(buf *bytes.Reader, protocolVersion uint8) (rtrPduModel RtrPduModel, err error) {
var sessionId uint16
var length uint32
// get sessionId
err = binary.Read(buf, binary.BigEndian, &sessionId)
if err != nil {
belogs.Error("ParseToCacheResponse(): PDU_TYPE_CACHE_RESPONSE get sessionId fail, buf:", buf, err)
rtrError := NewRtrError(
err,
true, protocolVersion, PDU_TYPE_ERROR_CODE_CORRUPT_DATA,
buf, "Fail to get sessionId")
return rtrPduModel, rtrError
}
// get length
err = binary.Read(buf, binary.BigEndian, &length)
if err != nil {
belogs.Error("ParseToCacheResponse(): PDU_TYPE_CACHE_RESPONSE get length fail, buf:", buf, err)
rtrError := NewRtrError(
err,
true, protocolVersion, PDU_TYPE_ERROR_CODE_CORRUPT_DATA,
buf, "Fail to get length")
return rtrPduModel, rtrError
}
if length != 8 {
belogs.Error("ParseToCacheResponse():PDU_TYPE_CACHE_RESPONSE, length must be 8, buf:", buf, " length:", length)
rtrError := NewRtrError(
errors.New("pduType is CACHE RESPONSE, length must be 8"),
true, protocolVersion, PDU_TYPE_ERROR_CODE_CORRUPT_DATA,
buf, "Fail to get length")
return rtrPduModel, rtrError
}
sq := NewRtrCacheResponseModel(protocolVersion, sessionId)
belogs.Debug("ParseToCacheResponse():get PDU_TYPE_CACHE_RESPONSE, buf:", buf, " sq:", jsonutil.MarshalJson(sq))
return sq, nil
}
|
package glog
import (
"log"
"github.com/dalixu/glogger"
)
//GLoggerFactory 实现gloggerFactory
type GLoggerFactory struct {
manager Manager
}
//GetLogger implement GLogger
func (gf *GLoggerFactory) GetLogger(name string) glogger.GLogger {
return gf.manager.GetLogger(name)
}
//NewGLoggerFactory 返回1个glogger.Factory
func NewGLoggerFactory(path string) glogger.Factory {
manager := New(path)
return &GLoggerFactory{
manager: manager,
}
}
//全局配置
func init() {
globalSerializer = make(map[string]Serializer)
globalSerializer["plain"] = &DefaultSerializer{}
globalSerializer["json"] = &JSONSerializer{}
globalTarget = make(map[string]TargetCtor)
globalTarget["file"] = createFileTarget
globalTarget["console"] = createConsoleTarget
}
var globalSerializer map[string]Serializer
//TargetCtor 实现自定义Target
type TargetCtor func(config map[string]interface{}) Target
var globalTarget map[string]TargetCtor
//RegisterSerializer 添加一个序列化 在配置文件里指定相同的name 则可以调用这个序列化
func RegisterSerializer(name string, serial Serializer) {
globalSerializer[name] = serial
}
//RegisterTarget 添加一个Target
func RegisterTarget(name string, ctor TargetCtor) {
globalTarget[name] = ctor
}
func findSerializer(name string) Serializer {
var seria = globalSerializer[name]
if seria == nil {
seria = globalSerializer["plain"]
}
return seria
}
func findTarget(name string, config map[string]interface{}) Target {
var target = globalTarget[name]
if target == nil {
target = globalTarget["file"]
}
return target(config)
}
//New 返回1个Manager对象 通常1个程序1个manager就可以了
func New(path string) Manager {
file := newConfigFile()
config, err := file.Load(path)
if err != nil {
log.Println(err)
return nil
}
return newManager(config, file)
}
|
package main
import "fmt"
func search(nums []int, target int) int {
if len(nums) == 0 {
return -1
}
rotateIndex := findRotateIndex(nums, 0, len(nums)-1)
fmt.Println(rotateIndex)
if rotateIndex == 0 {
return binarySearch(nums, 0, len(nums)-1, target)
}
if nums[rotateIndex] == target {
return rotateIndex
} else if nums[rotateIndex] > target {
return -1
} else {
if target >= nums[0] {
return binarySearch(nums, 0, rotateIndex-1, target)
} else {
return binarySearch(nums, rotateIndex+1, len(nums)-1, target)
}
}
}
func binarySearch(nums []int, left, right, target int) int {
for left <= right {
pivot := left + (right-left)/2
if nums[pivot] == target {
return pivot
} else if nums[pivot] < target {
left = pivot + 1
} else {
right = pivot - 1
}
}
return -1
}
func findRotateIndex(nums []int, left, right int) int {
if nums[left] < nums[right] {
return 0
}
for left <= right {
pivot := left + (right-left)/2
if pivot+1 < len(nums) && nums[pivot] > nums[pivot+1] {
return pivot + 1
} else {
if nums[pivot] >= nums[left] {
left = pivot + 1
} else {
right = pivot - 1
}
}
}
return 0
}
|
// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0.
package backup
import (
"github.com/prometheus/client_golang/prometheus"
)
var (
backupRegionCounters = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "br",
Subsystem: "raw",
Name: "backup_region",
Help: "Backup region statistic.",
}, []string{"type"})
backupRegionHistogram = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: "br",
Subsystem: "raw",
Name: "backup_region_seconds",
Help: "Backup region latency distributions.",
Buckets: prometheus.ExponentialBuckets(0.05, 2, 16),
})
)
func init() { // nolint:gochecknoinits
prometheus.MustRegister(backupRegionCounters)
prometheus.MustRegister(backupRegionHistogram)
}
|
package utils
import (
"encoding/json"
"strconv"
"time"
)
func StringToInt(e string) (int, error) {
return strconv.Atoi(e)
}
func GetCurrentTimeStr() string {
return time.Now().Format("2006-01-02 15:04:05")
}
// 时间戳转时间
func UnixToTime(e string) (datatime time.Time, err error) {
data, err := strconv.ParseInt(e, 10, 64)
datatime = time.Unix(data/1000, 0)
return
}
// 时间转时间戳
func TimeToUnix(e time.Time) int64 {
timeUnix, _ := time.Parse("2006-01-02 15:04:05", e.Format("2006-01-02 15:04:05"))
return timeUnix.UnixNano() / 1e6
}
func GetCurrentTimeUnix() int64 {
return time.Now().UnixNano() / 1e6
}
func GetCurrentTime() time.Time {
return time.Now()
}
func StructToJsonStr(e interface{}) (string, error) {
if b, err := json.Marshal(e); err == nil {
return string(b), err
} else {
return "", err
}
}
|
package straw
import (
"fmt"
"os"
"sort"
)
var _ StreamStore = &OsStreamStore{}
type OsStreamStore struct {
}
func (_ *OsStreamStore) Lstat(filename string) (os.FileInfo, error) {
return os.Lstat(filename)
}
func (_ *OsStreamStore) Stat(filename string) (os.FileInfo, error) {
return os.Stat(filename)
}
func (_ *OsStreamStore) Mkdir(path string, mode os.FileMode) error {
return os.Mkdir(path, mode)
}
func (_ *OsStreamStore) OpenReadCloser(name string) (StrawReader, error) {
f, err := os.Open(name)
if err != nil {
return nil, err
}
fi, err := f.Stat()
if err != nil {
f.Close()
return nil, err
}
if fi.IsDir() {
f.Close()
return nil, fmt.Errorf("%s is a directory", name)
}
return f, nil
}
func (_ *OsStreamStore) Remove(name string) error {
return os.Remove(name)
}
func (_ *OsStreamStore) CreateWriteCloser(name string) (StrawWriter, error) {
return os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
}
func (_ *OsStreamStore) Readdir(name string) ([]os.FileInfo, error) {
f, err := os.Open(name)
if err != nil {
return nil, err
}
fi, err := f.Readdir(-1)
if err != nil {
f.Close()
return nil, err
}
err = f.Close()
if err != nil {
return nil, err
}
sort.Slice(fi, func(i, j int) bool { return fi[i].Name() < fi[j].Name() })
return fi, nil
}
|
package loghelper
import (
"os"
log "github.com/sirupsen/logrus"
)
var logFields = log.Fields{"Client": "Publisher"}
// Init - to make one time initial setup for logrus
func Init() {
log.SetFormatter(&log.JSONFormatter{})
log.SetOutput(os.Stdout)
log.SetLevel(log.InfoLevel)
}
// LogInfo logs a message at level Info.
func LogInfo(args ...interface{}) {
log.WithFields(logFields).Info(args...)
}
// LogWarn logs a message at level Warn.
func LogWarn(args ...interface{}) {
log.WithFields(logFields).Warn(args...)
}
// LogError logs a message at level Error.
func LogError(args ...interface{}) {
log.WithFields(logFields).Error(args...)
}
// LogFatal logs a message at level Fatal.
func LogFatal(args ...interface{}) {
log.WithFields(logFields).Info(args...)
}
|
package noise
import (
"crypto/rand"
"testing"
"github.com/katzenpost/noise"
"github.com/stretchr/testify/require"
)
func TestNoiseXX(t *testing.T) {
clientStaticKeypair, err := noise.DH25519.GenerateKeypair(rand.Reader)
require.NoError(t, err)
serverStaticKeypair, err := noise.DH25519.GenerateKeypair(rand.Reader)
require.NoError(t, err)
cs := noise.NewCipherSuite(noise.DH25519, noise.CipherChaChaPoly, noise.HashBLAKE2b)
client, err := noise.NewHandshakeState(noise.Config{
CipherSuite: cs,
Random: rand.Reader,
Pattern: noise.HandshakeXX,
Initiator: true,
StaticKeypair: clientStaticKeypair,
})
server, err := noise.NewHandshakeState(noise.Config{
CipherSuite: cs,
Random: rand.Reader,
Pattern: noise.HandshakeXX,
Initiator: false,
StaticKeypair: serverStaticKeypair,
})
// -> e
msg, _, _, err := client.WriteMessage(nil, nil)
require.NoError(t, err)
t.Logf("msg 1 len is %d", len(msg))
res, _, _, err := server.ReadMessage(nil, msg)
require.NoError(t, err)
require.Equal(t, string(res), "")
// <- e, ee, s, es
msg, _, _, err = server.WriteMessage(nil, nil)
require.NoError(t, err)
t.Logf("msg 2 len is %d", len(msg))
res, _, _, err = client.ReadMessage(nil, msg)
require.NoError(t, err)
require.Equal(t, string(res), "")
// -> s, se
msg, clientTx, clientRx, err := client.WriteMessage(nil, nil)
require.NoError(t, err)
t.Logf("msg 3 len is %d", len(msg))
res, serverRx, serverTx, err := server.ReadMessage(nil, msg)
require.NoError(t, err)
require.Equal(t, string(res), "")
msg = clientTx.Encrypt(nil, nil, []byte("aleph"))
res, err = serverRx.Decrypt(nil, nil, msg)
require.NoError(t, err)
require.Equal(t, string(res), "aleph")
msg = serverTx.Encrypt(nil, nil, []byte("wubba"))
res, err = clientRx.Decrypt(nil, nil, msg)
require.NoError(t, err)
require.Equal(t, string(res), "wubba")
}
|
package gbinterface
type IRequest interface {
GetConnection() IConnection
GetData() []byte
GetMessageID() uint32
GetMessageLen() uint32
}
|
package config
import (
"log"
"github.com/jinzhu/gorm"
"github.com/spf13/viper"
)
type Config struct {
Version string
Port int
DebugMode bool
LogFilePath string
DBConnection *gorm.DB
}
func Load(environment string) *Config {
cfg := new(Config)
var configFile *viper.Viper = viper.New()
configFile.SetConfigType("json")
configFile.SetConfigName(environment)
configFile.AddConfigPath("config/env")
var err error = configFile.ReadInConfig()
if err != nil {
log.Print("Error while loading configuration file!")
log.Fatal(err)
}
// var dbInstance Database = Database{
// Driver: configFile.GetString("database.driver"),
// Host: configFile.GetString("database.host"),
// Port: configFile.GetString("database.port"),
// Database: configFile.GetString("database.name"),
// Username: configFile.GetString("database.username"),
// Password: configFile.GetString("database.password"),
// SslMode: configFile.GetString("database.sslmode"),
// }
cfg.Version = configFile.GetString("application.version")
cfg.Port = configFile.GetInt("server.port")
cfg.DebugMode = configFile.GetBool("application.debug_mode")
cfg.LogFilePath = configFile.GetString("application.log_file_path")
// cfg.DBConnection = dbInstance.Connect()
return cfg
}
|
package main
//we need main package and fmt or format import to print anything basically.
//GO isn't object oriented , does not have classes and shit, so like make functions.
import "fmt"
func main() {
fmt.Println("Hello, go")
}
|
package common
import (
"gopkg.in/yaml.v2"
"io/ioutil"
"os"
)
type Config struct {
// Log config
Logdir string
Loglevel string
Logname string
// RabbitMQ config
Rabbithost string
Rabbitport int
Rabbituser string
Rabbitpw string
// queue information
Udrqueue string
Reqreciever string
// redis config
Redisclusters []string
// web template
Templatedir string
}
var conf Config
func ReadConfigFile(pname string) {
cfgpath := os.Getenv("CFG_DIR")
data, err := ioutil.ReadFile(cfgpath + "/" + pname + ".yaml")
// test code for windows
// data, err := ioutil.ReadFile(cfgpath + "\\cdrgen.yaml")
if err != nil {
log.Panic(err)
}
err = yaml.Unmarshal(data, &conf)
if err != nil {
log.Panic(err)
}
}
func GetConfig() *Config {
return &conf
}
|
package core
import (
"net/http"
"github.com/gin-gonic/gin"
peer "github.com/libp2p/go-libp2p-core/peer"
)
// ping godoc
// @Summary Ping a network peer
// @Description Pings another peer on the network, returning online|offline.
// @Tags utils
// @Produce text/plain
// @Param X-Textile-Args header string true "peerid"
// @Success 200 {string} string "One of online|offline"
// @Failure 400 {string} string "Bad Request"
// @Failure 500 {string} string "Internal Server Error"
// @Router /ping [get]
func (a *api) ping(g *gin.Context) {
args, err := a.readArgs(g)
if err != nil {
a.abort500(g, err)
return
}
if len(args) == 0 {
g.String(http.StatusBadRequest, "missing peer id")
return
}
pid, err := peer.IDB58Decode(args[0])
if err != nil {
g.String(http.StatusBadRequest, err.Error())
return
}
status, err := a.node.Ping(pid)
if err != nil {
a.abort500(g, err)
return
}
g.String(http.StatusOK, string(status))
}
|
// tgbot-go -
// https://github.com/modern-dev/tgbot-go
// Copyright (c) 2020 Bohdan Shtepan
// Licensed under the MIT license.
package tgbot
type InputFile struct {
FileId string
FileURL string
FilePath string
}
func InputFileFromURL(url string) InputFile {
return InputFile{FileURL:url}
}
func InputFileFromDisk(path string) InputFile {
return InputFile{FilePath:path}
}
func (f *InputFile) IsOnDisk() bool {
return f.FilePath != ""
}
|
package osbuild2
type MkfsFATStageOptions struct {
VolID string `json:"volid"`
Label string `json:"label,omitempty"`
FATSize *int `json:"fat-size,omitempty"`
}
func (MkfsFATStageOptions) isStageOptions() {}
type MkfsFATStageDevices struct {
Device Device `json:"device"`
}
func (MkfsFATStageDevices) isStageDevices() {}
func NewMkfsFATStage(options *MkfsFATStageOptions, devices *MkfsFATStageDevices) *Stage {
return &Stage{
Type: "org.osbuild.mkfs.fat",
Options: options,
Devices: devices,
}
}
|
package shamir_test
import (
"fmt"
"reflect"
"github.com/renproject/surge/surgeutil"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/renproject/shamir"
)
var _ = Describe("Surge marshalling", func() {
trials := 100
types := []reflect.Type{
reflect.TypeOf(Share{}),
reflect.TypeOf(Shares{}),
reflect.TypeOf(Commitment{}),
reflect.TypeOf(VerifiableShare{}),
reflect.TypeOf(VerifiableShares{}),
}
for _, t := range types {
t := t
Context(fmt.Sprintf("surge marshalling and unmarshalling for %v", t), func() {
It("should be the same after marshalling and unmarshalling", func() {
for i := 0; i < trials; i++ {
Expect(surgeutil.MarshalUnmarshalCheck(t)).To(Succeed())
}
})
It("should not panic when fuzzing", func() {
for i := 0; i < trials; i++ {
Expect(func() { surgeutil.Fuzz(t) }).ToNot(Panic())
}
})
Context("marshalling", func() {
It("should return an error when the buffer is too small", func() {
for i := 0; i < trials; i++ {
Expect(surgeutil.MarshalBufTooSmall(t)).To(Succeed())
}
})
It("should return an error when the memory quota is too small", func() {
for i := 0; i < trials; i++ {
Expect(surgeutil.MarshalRemTooSmall(t)).To(Succeed())
}
})
})
Context("unmarshalling", func() {
It("should return an error when the buffer is too small", func() {
for i := 0; i < trials; i++ {
Expect(surgeutil.UnmarshalBufTooSmall(t)).To(Succeed())
}
})
It("should return an error when the memory quota is too small", func() {
for i := 0; i < trials; i++ {
Expect(surgeutil.UnmarshalRemTooSmall(t)).To(Succeed())
}
})
})
})
}
})
|
package server
import (
"context"
"crypto/tls"
"fmt"
"net/http"
"path/filepath"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/filemonitor"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/profile"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/sirupsen/logrus"
)
// Option applies a configuration option to the given config.
type Option func(s *serverConfig)
func GetListenAndServeFunc(options ...Option) (func() error, error) {
sc := defaultServerConfig()
sc.apply(options)
return sc.getListenAndServeFunc()
}
func WithTLS(tlsCertPath, tlsKeyPath, clientCAPath *string) Option {
return func(sc *serverConfig) {
sc.tlsCertPath = tlsCertPath
sc.tlsKeyPath = tlsKeyPath
sc.clientCAPath = clientCAPath
}
}
func WithLogger(logger *logrus.Logger) Option {
return func(sc *serverConfig) {
sc.logger = logger
}
}
func WithDebug(debug bool) Option {
return func(sc *serverConfig) {
sc.debug = debug
}
}
type serverConfig struct {
logger *logrus.Logger
tlsCertPath *string
tlsKeyPath *string
clientCAPath *string
debug bool
}
func (sc *serverConfig) apply(options []Option) {
for _, o := range options {
o(sc)
}
}
func defaultServerConfig() serverConfig {
return serverConfig{
tlsCertPath: nil,
tlsKeyPath: nil,
clientCAPath: nil,
logger: nil,
debug: false,
}
}
func (sc *serverConfig) tlsEnabled() (bool, error) {
if *sc.tlsCertPath != "" && *sc.tlsKeyPath != "" {
return true, nil
}
if *sc.tlsCertPath != "" || *sc.tlsKeyPath != "" {
return false, fmt.Errorf("both --tls-key and --tls-crt must be provided for TLS to be enabled")
}
return false, nil
}
func (sc *serverConfig) getAddress(tlsEnabled bool) string {
if tlsEnabled {
return ":8443"
}
return ":8080"
}
func (sc serverConfig) getListenAndServeFunc() (func() error, error) {
tlsEnabled, err := sc.tlsEnabled()
if err != nil {
return nil, fmt.Errorf("both --tls-key and --tls-crt must be provided for TLS to be enabled")
}
mux := http.NewServeMux()
mux.Handle("/metrics", promhttp.Handler())
mux.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
})
profile.RegisterHandlers(mux, profile.WithTLS(tlsEnabled || !sc.debug))
s := http.Server{
Handler: mux,
Addr: sc.getAddress(tlsEnabled),
}
if !tlsEnabled {
return s.ListenAndServe, nil
}
sc.logger.Info("TLS keys set, using https for metrics")
certStore, err := filemonitor.NewCertStore(*sc.tlsCertPath, *sc.tlsKeyPath)
if err != nil {
return nil, fmt.Errorf("certificate monitoring for metrics (https) failed: %v", err)
}
csw, err := filemonitor.NewWatch(sc.logger, []string{filepath.Dir(*sc.tlsCertPath), filepath.Dir(*sc.tlsKeyPath)}, certStore.HandleFilesystemUpdate)
if err != nil {
return nil, fmt.Errorf("error creating cert file watcher: %v", err)
}
csw.Run(context.Background())
certPoolStore, err := filemonitor.NewCertPoolStore(*sc.clientCAPath)
if err != nil {
return nil, fmt.Errorf("certificate monitoring for client-ca failed: %v", err)
}
cpsw, err := filemonitor.NewWatch(sc.logger, []string{filepath.Dir(*sc.clientCAPath)}, certPoolStore.HandleCABundleUpdate)
if err != nil {
return nil, fmt.Errorf("error creating cert file watcher: %v", err)
}
cpsw.Run(context.Background())
s.TLSConfig = &tls.Config{
GetCertificate: func(_ *tls.ClientHelloInfo) (*tls.Certificate, error) {
return certStore.GetCertificate(), nil
},
GetConfigForClient: func(_ *tls.ClientHelloInfo) (*tls.Config, error) {
var certs []tls.Certificate
if cert := certStore.GetCertificate(); cert != nil {
certs = append(certs, *cert)
}
return &tls.Config{
Certificates: certs,
ClientCAs: certPoolStore.GetCertPool(),
ClientAuth: tls.VerifyClientCertIfGiven,
}, nil
},
}
return func() error {
return s.ListenAndServeTLS("", "")
}, nil
}
|
package http
const (
// Info Logger信息
Info = iota
// Error 错误信息
Error
// Warn 警告信息
Warn
)
// SendLog 日志处理
func (server *Server) SendLog(status int, format string, a ...interface{}) {
if !server.Logger {
return
}
switch status {
case Info:
server.logger.Printf("[I] "+format, a...)
break
case Error:
server.logger.Printf("[E] "+format, a...)
break
case Warn:
server.logger.Printf("[W] "+format, a...)
break
default:
break
}
}
|
package day04
import (
"fmt"
"reflect"
)
func CallMethod() {
man := Man{"male", Human{"杨一帆", 22}}
rValue := reflect.ValueOf(man)
rType := reflect.TypeOf(man)
for i := 0; i < rType.NumMethod(); i++ {
m := rType.Method(i)
fmt.Printf("%s\t %v\n", m.Name, m.Type)
}
rValue.Method(0).Call([]reflect.Value{reflect.ValueOf("苹果")})
rValue.Method(1).Call([]reflect.Value{reflect.ValueOf(80)})
}
|
package db
import (
"context"
"fmt"
"github.com/yandex-cloud/examples/serverless/alice-shareable-todolist/app/errors"
"github.com/yandex-cloud/examples/serverless/alice-shareable-todolist/app/log"
"github.com/yandex-cloud/ydb-go-sdk/table"
"go.uber.org/zap"
)
type TxManager interface {
InTx(ctx context.Context, opts ...TxOpt) *TxRunner
}
func NewTxManager(deps Deps) (TxManager, error) {
sp, err := initSessionPool(deps.GetContext(), deps.GetConfig())
if err != nil {
return nil, err
}
return &txManagerImpl{sp: sp}, nil
}
var _ TxManager = &txManagerImpl{}
type txManagerImpl struct {
sp *table.SessionPool
}
func (t *txManagerImpl) InTx(ctx context.Context, opts ...TxOpt) *TxRunner {
conf := txOpts{}
for _, opt := range opts {
opt(&conf)
}
return &TxRunner{
ctx: ctx,
opts: &conf,
sp: t.sp,
}
}
type txOpts struct {
ro bool
}
type TxOpt func(o *txOpts)
func TxRO() TxOpt {
return func(o *txOpts) {
o.ro = true
}
}
type TxRunner struct {
ctx context.Context
opts *txOpts
sp *table.SessionPool
}
func (r *TxRunner) Do(action func(ctx context.Context) error) errors.Err {
err := table.Retry(r.ctx, r.sp, table.OperationFunc(
func(ctx context.Context, session *table.Session) error {
tx := &txCtx{
readonly: r.opts.ro,
session: session,
}
ctx = ctxWithTx(ctx, tx)
defer tx.close(ctx)
err := action(ctx)
if err == nil {
err = tx.commit(ctx)
if err != nil {
return errors.NewInternal(err)
}
return nil
}
if appErr, ok := err.(errors.Err); ok {
return appErr
}
return err
},
))
if err != nil {
return errors.NewInternal(err)
}
return nil
}
type txCtxKey struct{}
func ctxWithTx(ctx context.Context, tx *txCtx) context.Context {
return context.WithValue(ctx, txCtxKey{}, tx)
}
func txFromCtx(ctx context.Context) *txCtx {
res := ctx.Value(txCtxKey{})
if res == nil {
return nil
}
return res.(*txCtx)
}
type txCtx struct {
readonly bool
session *table.Session
tx *table.Transaction
}
func (c *txCtx) commit(ctx context.Context) error {
if c.readonly {
return nil
}
if c.tx == nil {
return nil
}
_, err := c.tx.CommitTx(ctx)
c.tx = nil
return err
}
func (c *txCtx) close(ctx context.Context) {
if c.readonly {
return
}
if c.tx != nil {
err := c.tx.Rollback(ctx)
if err != nil {
log.Warn(ctx, "rollback failed", zap.Error(err))
}
c.tx = nil
}
}
type queryFunc func(ctx context.Context, s *table.Session, txc *table.TransactionControl) (*table.Transaction, error)
func (r *repository) execute(ctx context.Context, action queryFunc) error {
txCtx := txFromCtx(ctx)
if txCtx == nil {
return fmt.Errorf("transaction required")
}
var err error
var txc *table.TransactionControl
if txCtx.tx != nil {
txc = table.TxControl(table.WithTx(txCtx.tx))
} else if txCtx.readonly {
txc = table.TxControl(table.BeginTx(table.WithOnlineReadOnly()), table.CommitTx())
} else {
txc = table.TxControl(table.BeginTx(table.WithSerializableReadWrite()))
}
tx, err := action(ctx, txCtx.session, txc)
if err != nil {
return err
}
if txCtx.tx == nil && !txCtx.readonly {
txCtx.tx = tx
}
return nil
}
|
package gocacher
import (
"bytes"
"crypto/md5"
"encoding/gob"
"encoding/hex"
"errors"
"time"
)
type cacher interface {
Init(config map[string]interface{}) cacher
Clone(config map[string]interface{}) cacher
Set(key string, value interface{}) error
SetExpire(key string, value interface{}, exp time.Duration) error
Has(key string) bool
Get(key string) (interface{}, error)
Pull(key string) (interface{}, error)
Remove(key string) bool
Clear() error
Keys() []string
Len() int
}
type cacherItem struct {
Key string
Val interface{}
Exp time.Time
}
// 该数据是否过期
func (ci *cacherItem) expired() bool {
return ci.Exp.Before(time.Now())
}
// Gob Encode
func gobEncode(ci *cacherItem) (string, error) {
gob.Register(ci.Val)
buffer := bytes.NewBuffer(nil)
encoder := gob.NewEncoder(buffer)
err := encoder.Encode(ci)
if err != nil {
return "", err
}
return buffer.String(), nil
}
// Gob Decode
func gobDecode(data []byte) (ci *cacherItem, err error) {
buffer := bytes.NewBuffer(data)
decoder := gob.NewDecoder(buffer)
err = decoder.Decode(&ci)
return
}
// Md5
func md5Encode(ori string) string {
h := md5.New()
h.Write([]byte(ori))
return hex.EncodeToString(h.Sum(nil))
}
var (
gcTime = time.Hour
farTime = time.Date(3018, 11, 23, 22, 44, 0, 0, time.Local)
)
var (
KeyNotExistError = errors.New("key not exist")
KeyExpireError = errors.New("key expired")
)
|
package dao
import (
log "github.com/sirupsen/logrus"
"zhiyuan/scaffold/internal/model"
"zhiyuan/zyutil_v1.5"
)
func (d *Dao) AddCamera(data model.Camera)(Camera_obj model.Camera,err error){
if err := d.crmdb.Create(&data);err.Error!=nil{
log.WithFields(log.Fields{
"Camera": "insert",
}).Error("camera insert db err")
return model.Camera{}, err.Error
}
if err := d.crmdb.Last(&Camera_obj);err.Error!=nil{
log.WithFields(log.Fields{
"Camera": "select",
}).Error("select camera in last time err")
return model.Camera{}, err.Error
}
return Camera_obj,nil
}
func (d *Dao) UpdateCamera(data model.Camera,id int)(Camera_obj model.Camera,err error){
if err := d.crmdb.Model(&model.Camera{}).Where("id = ?",id).Updates(data);err.Error!=nil{
log.WithFields(log.Fields{
"Camera": "update",
}).Error("camera update db err")
return model.Camera{}, err.Error
}
if err := d.crmdb.Model(&model.Camera{}).Where("id = ?",id).Last(&Camera_obj);err.Error!=nil{
log.WithFields(log.Fields{
"Camera": "select",
}).Error("select updated camera in last time err")
return model.Camera{}, err.Error
}
return Camera_obj,nil
}
func (d *Dao) DeleteCamera(id int)( err error){
if err := d.crmdb.Where("id = ?",id).Delete(&model.Camera{});err.Error!=nil{
log.WithFields(log.Fields{
"Camera": "delete",
}).Error("camera delete db err")
return err.Error
}
return nil
}
func(d *Dao) GetCameras(camera_pisition string,camera_status ,page , size int)(result []model.Camera,err error,count int,total int){
DBdate := d.crmdb
if camera_status != 0 {
DBdate = DBdate.Where("camera_status = ?", camera_status)
}
if camera_pisition != "" {
DBdate = DBdate.Where("camera_position = ?", camera_pisition)
}
DBdate = DBdate.Order("id desc")
if page > 0 {
DBdate = DBdate.Limit(size).Offset((page - 1) * size)
}
if err := DBdate.Model(model.Camera{}).Find(&result);err.Error!= nil {
return result, err.Error, 0,0
}
DBcount := d.crmdb
if camera_status != 0 {
DBcount = DBcount.Where("camera_status = ?", camera_status)
}
if camera_pisition != "" {
DBcount = DBcount.Where("camera_position = ?", camera_pisition)
}
if err := DBcount.Model(model.Camera{}).Count(&count); err.Error != nil {
return result, err.Error, 0,0
}
total = zyutil.GetTotal(count, size)
return result, nil, count, total
}
func(d *Dao) CheckCameras(camera_address string)(bool,error){
var count int
DBcount := d.crmdb
DBcount = DBcount.Where("camera_address = ?", camera_address)
if err := DBcount.Model(model.Camera{}).Count(&count); err.Error != nil {
return false,err.Error
}
if count != 0{
return false,nil
}
return true, nil
}
func(d *Dao) GetAllCameras()(result []model.Camera,err error){
DBdate := d.crmdb
DBdate = DBdate.Order("id desc")
if err := DBdate.Model(model.Camera{}).Find(&result);err.Error!= nil {
return result, err.Error
}
return result, nil
} |
package main
import (
"fmt"
"github.com/astaxie/beego/toolbox"
_ "tokensky_bg_admin/manage_tick/sysinit"
"tokensky_bg_admin/manage_tick/tick"
)
//定时任务
/*
符号 含义 示例
* 表示任何时刻
, 表示分割 如第三段里:2,4,表示 2 点和 4 点执行
- 表示一个段 如第三端里: 1-5,就表示 1 到 5 点
/n 表示每个n的单位执行一次 如第三段里,1, 就表示每隔 1 个小时执行一次命令。也可以写成1-23/1
示例 详细含义
0/30 * * * * * 每 30 秒 执行
0 43 21 * * * 21:43 执行
0 0 17 * * 1 每周一的 17:00 执行
0 0,10 17 * * 0,2,3 每周日,周二,周三的 17:00和 17:10 执行
0 0 21 * * 1-6 周一到周六 21:00 执行
0 0/10 * * * 每隔 10 分 执行
*/
const (
//用户地址簿维护时间间隔
TBI_SERVER_ADDRESS_TICK string = "300"
)
func main() {
defer func() { select {} }()
//算计资源发放
sendBalance := toolbox.NewTask("sendBalance", "0 30 12 * * *", tick.TickHashrateOrderSendBalance)
toolbox.AddTask("sendBalance", sendBalance)
//用户地址簿维护
tickTokenskyUserAddressUp := toolbox.NewTask("tickTokenskyUserAddressUp", "0/"+TBI_SERVER_ADDRESS_TICK+" * * * * *", tick.TickTokenskyUserAddressUp)
toolbox.AddTask("tickTokenskyUserAddressUp", tickTokenskyUserAddressUp)
//理财
financialTick := toolbox.NewTask("financialTick", "0 0 0 * * *", tick.FinancialTick)
toolbox.AddTask("financialTick", financialTick)
//行情
coinGlobalSoider := toolbox.NewTask("coinGlobalSoider", "0 0 0/1 * * *", tick.CoinGlobalSoider)
toolbox.AddTask("coinGlobalSoider", coinGlobalSoider)
//借贷风控系统
borrowWindControlSystem := toolbox.NewTask("borrowWindControlSystem", "0 0/10 * * *", tick.BorrowWindControlSystem)
toolbox.AddTask("borrowWindControlSystem", borrowWindControlSystem)
//开始
toolbox.StartTask()
fmt.Println("定时器服务开启")
//开启自动执行操作
tick.CoinGlobalSoider()
}
|
package webdav
import (
"encoding/xml"
"fmt"
"github.com/Sirupsen/logrus"
"github.com/julienschmidt/httprouter"
"github.com/sanato/sanato-lib/storage"
"net/http"
"time"
)
func (api *API) propfind(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
authRes, err := api.basicAuth(r)
if err != nil {
logrus.Error(err)
w.Header().Set("WWW-Authenticate", "Basic Real='WhiteDAV credentials'")
http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
return
}
resource := p.ByName("path")
if resource == "" {
resource = "/"
}
logrus.Info(fmt.Sprintf("api:webdav user:%s op:propfind path:%s", authRes.Username, resource))
var children bool
depth := r.Header.Get("Depth")
if depth == "1" {
children = true
}
meta, err := api.storageProvider.Stat(resource, children)
if err != nil {
if storage.IsNotExistError(err) {
logrus.Error(err)
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
return
}
logrus.Error(err)
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
return
}
responses := getPropFindFromMeta(meta)
responsesXML, err := xml.Marshal(&responses)
if err != nil {
logrus.Error(err)
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "text/xml; charset=utf-8")
w.WriteHeader(207)
w.Write([]byte(`<?xml version="1.0" encoding="UTF-8"?><d:multistatus xmlns:d="DAV:">`))
w.Write(responsesXML)
w.Write([]byte(`</d:multistatus>`))
return
}
func getPropFindFromMeta(meta *storage.MetaData) []ResponseXML {
responses := []ResponseXML{}
responses = append(responses, getResponseFromMeta(meta))
if len(meta.Children) > 0 {
for _, m := range meta.Children {
responses = append(responses, getResponseFromMeta(m))
}
}
return responses
}
func getResponseFromMeta(meta *storage.MetaData) ResponseXML {
propList := []PropertyXML{}
t := time.Unix(meta.Modified, 0)
lasModifiedString := t.Format(time.RFC1123)
getContentLegnth := PropertyXML{xml.Name{"", "d:getcontentlength"}, "", []byte(fmt.Sprintf("%d", meta.Size))}
getLastModified := PropertyXML{xml.Name{"", "d:getlastmodified"}, "", []byte(lasModifiedString)}
getETag := PropertyXML{xml.Name{"", "d:getetag"}, "", []byte(meta.ETag)}
getContentType := PropertyXML{xml.Name{"", "d:getcontenttype"}, "", []byte(meta.MimeType)}
if meta.IsCol {
getResourceType := PropertyXML{xml.Name{"", "d:resourcetype"}, "", []byte("<d:collection/>")}
getContentType.InnerXML = []byte("inode/directory")
propList = append(propList, getResourceType)
}
propList = append(propList, getContentLegnth, getLastModified, getETag, getContentType)
propStatList := []PropStatXML{}
propStat := PropStatXML{}
propStat.Prop = propList
propStat.Status = "HTTP/1.1 200 OK"
propStatList = append(propStatList, propStat)
response := ResponseXML{}
response.Href = "/webdav" + meta.Path
response.Propstat = propStatList
return response
}
type ResponseXML struct {
XMLName xml.Name `xml:"d:response"`
Href string `xml:"d:href"`
Propstat []PropStatXML `xml:"d:propstat"`
Status string `xml:"d:status,omitempty"`
Error *ErrorXML `xml:"d:error"`
ResponseDescription string `xml:"d:responsedescription,omitempty"`
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat
type PropStatXML struct {
// Prop requires DAV: to be the default namespace in the enclosing
// XML. This is due to the standard encoding/xml package currently
// not honoring namespace declarations inside a xmltag with a
// parent element for anonymous slice elements.
// Use of multistatusWriter takes care of this.
Prop []PropertyXML `xml:"d:prop>_ignored_"`
Status string `xml:"d:status"`
Error *ErrorXML `xml:"d:error"`
ResponseDescription string `xml:"d:responsedescription,omitempty"`
}
// Property represents a single DAV resource property as defined in RFC 4918.
// See http://www.webdav.org/specs/rfc4918.html#data.model.for.resource.properties
type PropertyXML struct {
// XMLName is the fully qualified name that identifies this property.
XMLName xml.Name
// Lang is an optional xml:lang attribute.
Lang string `xml:"xml:lang,attr,omitempty"`
// InnerXML contains the XML representation of the property value.
// See http://www.webdav.org/specs/rfc4918.html#property_values
//
// Property values of complex type or mixed-content must have fully
// expanded XML namespaces or be self-contained with according
// XML namespace declarations. They must not rely on any XML
// namespace declarations within the scope of the XML document,
// even including the DAV: namespace.
InnerXML []byte `xml:",innerxml"`
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_error
type ErrorXML struct {
XMLName xml.Name `xml:"d:error"`
InnerXML []byte `xml:",innerxml"`
}
|
package gosqrl
import (
"bytes"
"encoding/base64"
)
var (
twoEqBytes = []byte("==")
)
// B64TruncatedEncode base64 URL encodes a string, and removes the trailing '='
// bytes.
func B64TruncatedEncode(data []byte) []byte {
b64 := make([]byte, base64.URLEncoding.EncodedLen(len(data)))
base64.URLEncoding.Encode(b64, data)
return bytes.TrimRight(b64, "=")
}
// B64TruncatedDecode base64 URL decodes a string missing the trailing '='
// bytes.
func B64TruncatedDecode(b64 []byte) ([]byte, error) {
switch len(b64) % 4 {
case 0:
case 2:
b64 = append(b64, twoEqBytes...)
case 3:
b64 = append(b64, '=')
}
data := make([]byte, base64.URLEncoding.DecodedLen(len(b64)))
n, err := base64.URLEncoding.Decode(data, b64)
return data[:n], err
}
|
package main
import (
"github.com/funkygao/gobench/util"
"testing"
)
func main() {
b := testing.Benchmark(benchmarkRecover)
util.ShowBenchResult("recover", b)
}
func benchmarkRecover(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
recover()
}
}
|
package nsp
import (
"context"
"errors"
"fmt"
"net"
"strconv"
"sync"
"time"
"github.com/golang/protobuf/ptypes/empty"
nspAPI "github.com/nordix/meridio/api/nsp"
"github.com/sirupsen/logrus"
"google.golang.org/grpc"
)
type NetworkServicePlateformService struct {
Listener net.Listener
Server *grpc.Server
Port int
targets sync.Map // map[string]*nspAPI.Target
monitorStreams sync.Map // map[nspAPI.NetworkServicePlateformService_MonitorServer]bool
}
func (nsps *NetworkServicePlateformService) targetExists(target *nspAPI.Target) bool {
_, exists := nsps.targets.Load(target.Ip)
return exists
}
func (nsps *NetworkServicePlateformService) addTarget(target *nspAPI.Target) error {
if nsps.targetExists(target) {
return errors.New("target already exists")
}
logrus.Infof("Add Target: %v", target)
target.Status = nspAPI.Status_Register
nsps.notifyMonitorStreams(target)
nsps.targets.Store(target.Ip, target)
return nil
}
func (nsps *NetworkServicePlateformService) removeTarget(target *nspAPI.Target) error {
t, exists := nsps.targets.Load(target.Ip)
if !exists {
return errors.New("target is not existing")
}
target = t.(*nspAPI.Target)
logrus.Infof("Remove Target: %v", target)
target.Status = nspAPI.Status_Unregister
nsps.notifyMonitorStreams(target)
nsps.targets.Delete(target.Ip)
return nil
}
func (nsps *NetworkServicePlateformService) getTargetSlice() []*nspAPI.Target {
targets := []*nspAPI.Target{}
nsps.targets.Range(func(key interface{}, value interface{}) bool {
targets = append(targets, value.(*nspAPI.Target))
return true
})
return targets
}
func (nsps *NetworkServicePlateformService) notifyMonitorStreams(target *nspAPI.Target) {
nsps.monitorStreams.Range(func(key interface{}, value interface{}) bool {
nsps.notifyMonitorStream(key.(nspAPI.NetworkServicePlateformService_MonitorServer), target)
return true
})
}
func (nsps *NetworkServicePlateformService) notifyMonitorStream(stream nspAPI.NetworkServicePlateformService_MonitorServer, target *nspAPI.Target) {
if !nsps.streamAlive(stream) {
return
}
err := stream.Send(target)
if err != nil {
nsps.monitorStreams.Store(stream, false)
}
}
func (nsps *NetworkServicePlateformService) Register(ctx context.Context, target *nspAPI.Target) (*empty.Empty, error) {
err := nsps.addTarget(target)
return &empty.Empty{}, err
}
func (nsps *NetworkServicePlateformService) Unregister(ctx context.Context, target *nspAPI.Target) (*empty.Empty, error) {
err := nsps.removeTarget(target)
return &empty.Empty{}, err
}
func (nsps *NetworkServicePlateformService) Monitor(empty *empty.Empty, stream nspAPI.NetworkServicePlateformService_MonitorServer) error {
nsps.monitorStreams.Store(stream, true)
nsps.targets.Range(func(key interface{}, value interface{}) bool {
nsps.notifyMonitorStream(stream, value.(*nspAPI.Target))
return true
})
for nsps.streamAlive(stream) {
time.Sleep(1 * time.Second)
}
nsps.monitorStreams.Delete(stream)
return nil
}
func (nsps *NetworkServicePlateformService) streamAlive(stream nspAPI.NetworkServicePlateformService_MonitorServer) bool {
value, ok := nsps.monitorStreams.Load(stream)
return ok && value.(bool)
}
func (nsps *NetworkServicePlateformService) GetTargets(ctx context.Context, target *empty.Empty) (*nspAPI.GetTargetsResponse, error) {
response := &nspAPI.GetTargetsResponse{
Targets: nsps.getTargetSlice(),
}
return response, nil
}
// Start -
func (nsps *NetworkServicePlateformService) Start() {
logrus.Infof("NSP Service: Start the service (port: %v)", nsps.Port)
if err := nsps.Server.Serve(nsps.Listener); err != nil {
logrus.Errorf("NSP Service: failed to serve: %v", err)
}
}
// NewNetworkServicePlateformService -
func NewNetworkServicePlateformService(port int) (*NetworkServicePlateformService, error) {
lis, err := net.Listen("tcp", fmt.Sprintf("[::]:%s", strconv.Itoa(port)))
if err != nil {
logrus.Errorf("NSP Service: failed to listen: %v", err)
return nil, err
}
s := grpc.NewServer()
networkServicePlateformService := &NetworkServicePlateformService{
Listener: lis,
Server: s,
Port: port,
}
nspAPI.RegisterNetworkServicePlateformServiceServer(s, networkServicePlateformService)
return networkServicePlateformService, nil
}
|
package cmd
import (
"fmt"
"strings"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
func newXFlagsCmd() (cmd *cobra.Command) {
cmd = &cobra.Command{
Use: "xflags",
Short: cmdXFlagsShort,
Long: cmdXFlagsLong,
Example: cmdXFlagsExample,
Args: cobra.NoArgs,
Run: cmdXFlagsRun,
DisableAutoGenTag: true,
}
cmd.Flags().StringP("build", "b", "0", "Sets the BuildNumber flag value")
cmd.Flags().StringP("extra", "e", "", "Sets the BuildExtra flag value")
return cmd
}
func cmdXFlagsRun(cobraCmd *cobra.Command, _ []string) {
build, err := cobraCmd.Flags().GetString("build")
if err != nil {
log.Fatal(err)
}
extra, err := cobraCmd.Flags().GetString("extra")
if err != nil {
log.Fatal(err)
}
buildMetaData, err := getBuild("", build, extra)
if err != nil {
log.Fatal(err)
}
fmt.Println(strings.Join(buildMetaData.XFlags(), " "))
}
|
package bridge
import (
"bytes"
"github.com/sujit-baniya/smpp/pdu"
"github.com/sujit-baniya/smpp/sms"
)
func ToDeliverSM(deliver *sms.Deliver) (sm *pdu.DeliverSM, err error) {
var message pdu.ShortMessage
if deliver.Flags.UDHIndicator {
message.UDHeader = pdu.UserDataHeader{}
}
_, err = message.ReadFrom(bytes.NewReader(deliver.UserData))
sm = &pdu.DeliverSM{
SourceAddr: pdu.Address{
TON: deliver.OriginatingAddress.TON,
NPI: deliver.OriginatingAddress.NPI,
No: deliver.OriginatingAddress.No,
},
ESMClass: pdu.ESMClass{
MessageType: deliver.Flags.MessageType.Type(),
UDHIndicator: deliver.Flags.UDHIndicator,
ReplyPath: deliver.Flags.ReplyPath,
},
ProtocolID: deliver.ProtocolIdentifier,
Message: message,
}
return
}
|
package graphql
import (
"context"
"errors"
"github.com/nomkhonwaan/myblog/pkg/auth"
"github.com/samsarahq/thunder/graphql"
"net/http"
)
// AuthorizedID is a context.Context key where an authorized ID value stored
const AuthorizedID = "authID"
var (
protectedResources = map[string]bool{
"myPosts": true,
"createPost": true,
"updatePostTitle": true,
"updatePostStatus": true,
"updatePostContent": true,
"updatePostCategories": true,
"updatePostTags": true,
"updatePostFeaturedImage": true,
"updatePostAttachments": true,
}
)
// VerifyAuthorityMiddleware looks on the request header for the authorization token
func VerifyAuthorityMiddleware(input *graphql.ComputationInput, next graphql.MiddlewareNextFunc) *graphql.ComputationOutput {
authID := auth.GetAuthorizedUserID(input.Ctx)
for _, sel := range input.ParsedQuery.Selections {
if yes := protectedResources[sel.Name]; yes {
if authID == nil {
return &graphql.ComputationOutput{
Error: errors.New(http.StatusText(http.StatusUnauthorized)),
}
}
}
}
input.Ctx = context.WithValue(input.Ctx, AuthorizedID, authID)
return next(input)
}
|
package utils
import (
"app-auth/db"
"context"
"fmt"
"log"
"github.com/mongodb/mongo-go-driver/bson"
)
var AndEmptyString = ""
var AndTrue = true
var AndFalse = false
//when you have a slice of string
// you want to remove a specific value from an index
func RemoveIndex(s []string, index int) []string {
return append(s[:index], s[index+1:]...)
}
// when you want to find the index of the
func FindIndex(s []string, element string) int {
for p, v := range s {
if v == element {
return p
}
}
return -1
}
func DestroyRecord(collectionType, recordId string) (bool, error) {
foundRecord := false
if recordId == "" {
return foundRecord, nil
}
filter := bson.D{{"id", recordId}}
ok, err := db.TeamCollection.DeleteOne(context.Background(), filter)
if err != nil {
log.Println(err)
return foundRecord, nil
}
fmt.Println(ok)
foundRecord = true
return foundRecord, nil
}
|
package middlewares
import (
"github.com/labstack/echo"
"github.com/labstack/echo/middleware"
)
func SetLogger(e *echo.Echo) {
e.Use(middleware.LoggerWithConfig(middleware.LoggerConfig{
Format: `[${time_rfc3339}][remote_ip:${remote_ip}][status: ${status}][method: ${method}][url: ${host}${path}]` + "\n",
}))
}
|
package shamir
import (
"math/rand"
"reflect"
"github.com/renproject/secp256k1"
"github.com/renproject/surge"
)
// VShareSize is the size of a verifiable share in bytes.
const VShareSize = ShareSize + secp256k1.FnSizeMarshalled
// VerifiableShares is a alias for a slice of VerifiableShare(s).
type VerifiableShares []VerifiableShare
// SizeHint implements the surge.SizeHinter interface.
func (vshares VerifiableShares) SizeHint() int { return surge.SizeHintU32 + VShareSize*len(vshares) }
// Marshal implements the surge.Marshaler interface.
func (vshares VerifiableShares) Marshal(buf []byte, rem int) ([]byte, int, error) {
buf, rem, err := surge.MarshalU32(uint32(len(vshares)), buf, rem)
if err != nil {
return buf, rem, err
}
for i := range vshares {
buf, rem, err = vshares[i].Marshal(buf, rem)
if err != nil {
return buf, rem, err
}
}
return buf, rem, nil
}
// Unmarshal implements the surge.Unmarshaler interface.
func (vshares *VerifiableShares) Unmarshal(buf []byte, rem int) ([]byte, int, error) {
var l uint32
buf, rem, err := surge.UnmarshalLen(&l, VShareSize, buf, rem)
if err != nil {
return buf, rem, err
}
if *vshares == nil {
*vshares = make(VerifiableShares, 0, l)
}
*vshares = (*vshares)[:0]
for i := uint32(0); i < l; i++ {
*vshares = append(*vshares, VerifiableShare{})
buf, rem, err = (*vshares)[i].Unmarshal(buf, rem)
if err != nil {
return buf, rem, err
}
}
return buf, rem, nil
}
// Shares returns the underlying (unverified) shares.
func (vshares VerifiableShares) Shares() Shares {
shares := make(Shares, len(vshares))
for i, vshare := range vshares {
shares[i] = vshare.Share
}
return shares
}
// A VerifiableShare is a Share but with additional information that allows it
// to be verified as correct for a given commitment to a sharing.
type VerifiableShare struct {
Share Share
Decommitment secp256k1.Fn
}
// Generate implements the quick.Generator interface.
func (vs VerifiableShare) Generate(_ *rand.Rand, _ int) reflect.Value {
return reflect.ValueOf(
NewVerifiableShare(
NewShare(secp256k1.RandomFn(), secp256k1.RandomFn()),
secp256k1.RandomFn(),
),
)
}
// NewVerifiableShare constructs a new VerifiableShare from the given Share and
// decommitment value. This function allows the manual construction of a
// VerifiableShare, and should be only used if such fine grained control is
// needed. In general, shares should be constructed by using a VSSharer.
func NewVerifiableShare(share Share, r secp256k1.Fn) VerifiableShare {
return VerifiableShare{share, r}
}
// Eq returns true if the two verifiable shares are equal, and false otherwise.
func (vs *VerifiableShare) Eq(other *VerifiableShare) bool {
return vs.Share.Eq(&other.Share) && vs.Decommitment.Eq(&other.Decommitment)
}
// SizeHint implements the surge.SizeHinter interface.
func (vs VerifiableShare) SizeHint() int { return vs.Share.SizeHint() + vs.Decommitment.SizeHint() }
// Marshal implements the surge.Marshaler interface.
func (vs VerifiableShare) Marshal(buf []byte, rem int) ([]byte, int, error) {
buf, rem, err := vs.Share.Marshal(buf, rem)
if err != nil {
return buf, rem, err
}
buf, rem, err = vs.Decommitment.Marshal(buf, rem)
return buf, rem, err
}
// Unmarshal implements the surge.Unmarshaler interface.
func (vs *VerifiableShare) Unmarshal(buf []byte, rem int) ([]byte, int, error) {
buf, rem, err := vs.Share.Unmarshal(buf, rem)
if err != nil {
return buf, rem, err
}
return vs.Decommitment.Unmarshal(buf, rem)
}
// Add computes the addition of the two input shares and stores the result in
// the caller. This is defined as adding the respective normal (unverifiable)
// shares and adding the respective decommitment values. In general, the
// resulting share will be a share with secret value equal to the sum of the
// two secrets corresponding to the (respective sharings of the) input shares.
func (vs *VerifiableShare) Add(a, b *VerifiableShare) {
vs.Share.Add(&a.Share, &b.Share)
vs.Decommitment.Add(&a.Decommitment, &b.Decommitment)
}
// AddConstant computes the addition of the input share and the constant and
// stores the result in the caller. This is defined as adding the constant to
// the normal (unverifiable) share and leaving the decommitment value
// unchanged. In general, the resulting share will be a share with secret value
// equal to the sum of the secret corresponding to the input share and the
// constant.
func (vs *VerifiableShare) AddConstant(other *VerifiableShare, c *secp256k1.Fn) {
vs.Decommitment = other.Decommitment
vs.Share.AddConstant(&other.Share, c)
}
// Scale computes the scaling of the input share by given scale factor and
// stores the result in the caller. This is defined as scaling the normal
// (unverifiable) share by the scaling factor and multiplying the decommitment
// value also by the scaling factor. In general, the resulting share will be a
// share with secret value equal to the scale factor multiplied by the secret
// corresponding to the (sharing of the) input share.
func (vs *VerifiableShare) Scale(other *VerifiableShare, scale *secp256k1.Fn) {
vs.Share.Scale(&other.Share, scale)
vs.Decommitment.Mul(&other.Decommitment, scale)
}
// A Commitment is used to verify that a sharing has been performed correctly.
type Commitment []secp256k1.Point
// Generate implements the quick.Generator interface.
func (c Commitment) Generate(rand *rand.Rand, size int) reflect.Value {
com := make(Commitment, rand.Intn(size))
for i := range com {
com[i] = secp256k1.RandomPoint()
}
return reflect.ValueOf(com)
}
// Eq returns true if the two commitments are equal (each curve point is
// equal), and false otherwise.
func (c Commitment) Eq(other Commitment) bool {
if len(c) != len(other) {
return false
}
for i := range c {
if !c[i].Eq(&other[i]) {
return false
}
}
return true
}
// Append a point to the commitment.
func (c *Commitment) Append(p secp256k1.Point) {
*c = append(*c, p)
}
// Len returns the number of curve points in the commitment. This is equal to
// the reconstruction threshold of the associated verifiable sharing.
func (c Commitment) Len() int {
return len(c)
}
// Set the calling commitment to be equal to the given commitment.
func (c *Commitment) Set(other Commitment) {
if len(*c) < len(other) {
*c = NewCommitmentWithCapacity(len(other))
}
*c = (*c)[:len(other)]
for i := range *c {
(*c)[i] = other[i]
}
}
// SizeHint implements the surge.SizeHinter interface.
func (c Commitment) SizeHint() int {
return surge.SizeHintU32 + secp256k1.PointSizeMarshalled*len(c)
}
// Marshal implements the surge.Marshaler interface.
func (c Commitment) Marshal(buf []byte, rem int) ([]byte, int, error) {
buf, rem, err := surge.MarshalU32(uint32(len(c)), buf, rem)
if err != nil {
return buf, rem, err
}
for i := range c {
buf, rem, err = c[i].Marshal(buf, rem)
if err != nil {
return buf, rem, err
}
}
return buf, rem, nil
}
// Unmarshal implements the surge.Unmarshaler interface.
func (c *Commitment) Unmarshal(buf []byte, rem int) ([]byte, int, error) {
var l uint32
buf, rem, err := surge.UnmarshalLen(&l, secp256k1.PointSize, buf, rem)
if err != nil {
return buf, rem, err
}
if *c == nil {
*c = make([]secp256k1.Point, 0, l)
}
*c = (*c)[:0]
for i := uint32(0); i < l; i++ {
*c = append(*c, secp256k1.Point{})
buf, rem, err = (*c)[i].Unmarshal(buf, rem)
if err != nil {
return buf, rem, err
}
}
return buf, rem, nil
}
// NewCommitmentWithCapacity creates a new Commitment with the given capacity.
// This capacity represents the maximum reconstruction threshold, k, that this
// commitment can be used for.
func NewCommitmentWithCapacity(k int) Commitment {
return make(Commitment, 0, k)
}
// Add takes two input commitments and stores in the caller the commitment that
// represents the addition of these two commitments. That is, the new
// commitment can be used to verify the correctness of the sharing defined by
// adding the two corresponding sharings for the input commitments. For
// example, if `a_i` is a valid share for the commitment `a`, and `b_i` is a
// valid share for the commitment `b`, then `a_i + b_i` will be a valid share
// for the newly constructed commitment.
//
// Panics: If the destination commitment does not have capacity at least as big
// as the greater of the capacities of the two inputs, then this function will
// panic.
func (c *Commitment) Add(a, b Commitment) {
var smaller, larger []secp256k1.Point
if len(a) > len(b) {
smaller, larger = b, a
} else {
smaller, larger = a, b
}
*c = (*c)[:len(larger)]
for i := range smaller {
(*c)[i].Add(&smaller[i], &larger[i])
}
copy((*c)[len(smaller):], larger[len(smaller):])
}
// Add takes an input commitment and a constant and stores in the caller the
// commitment that represents the addition of this commitment and the constant.
// That is, the new commitment can be used to verify the correctness of the
// sharing defined by adding the constant to the corresponding sharing for the
// input commitment. For example, if `a_i` is a valid share for the commitment
// `a`, and `c` is a constant, then `a_i.AddConstant(c)` will be a valid share
// for the newly constructed commitment.
func (c *Commitment) AddConstant(other Commitment, constant *secp256k1.Fn) {
c.Set(other)
point := secp256k1.Point{}
point.BaseExp(constant)
(*c)[0].Add(&(*c)[0], &point)
}
// Scale takes an input commitment and stores in the caller the commitment that
// represents the scaled input commitment. That is, the new commitment can be
// used to verify the correctness of the sharing defined by scaling the
// original sharing. For example, if `a_i` is a valid share for the commitment
// `other`, then `scale * a_i` will be a valid sharing for the newly
// constructed commitment.
//
// Panics: If the destination commitment does not have capacity at least as big
// as the input commitment, then this function will panic.
func (c *Commitment) Scale(other Commitment, scale *secp256k1.Fn) {
*c = (*c)[:len(other)]
for i := range *c {
(*c)[i].Scale(&other[i], scale)
}
}
// Evaluates the sharing polynomial at the given index "in the exponent".
func (c *Commitment) evaluate(eval *secp256k1.Point, index *secp256k1.Fn) {
*eval = (*c)[len(*c)-1]
for i := len(*c) - 2; i >= 0; i-- {
eval.Scale(eval, index)
eval.Add(eval, &(*c)[i])
}
}
// IsValid returns true when the given verifiable share is valid with regard to
// the given commitment, and false otherwise.
func IsValid(h secp256k1.Point, c *Commitment, vshare *VerifiableShare) bool {
var gPow, hPow, eval secp256k1.Point
gPow.BaseExp(&vshare.Share.Value)
hPow.Scale(&h, &vshare.Decommitment)
gPow.Add(&gPow, &hPow)
c.evaluate(&eval, &vshare.Share.Index)
return gPow.Eq(&eval)
}
// VShareSecret creates verifiable Shamir shares for the given secret at the
// given threshold, and stores the shares and the commitment in the given
// destinations. In the returned Shares, there will be one share for each index
// in the indices that were used to construct the Sharer.
//
// Panics: This function will panic if the destination shares slice has a
// capacity less than n (the number of indices), or if the destination
// commitment has a capacity less than k.
func VShareSecret(
vshares *VerifiableShares,
c *Commitment,
indices []secp256k1.Fn,
h secp256k1.Point,
secret secp256k1.Fn,
k int,
) error {
n := len(indices)
shares := make(Shares, n)
coeffs := make([]secp256k1.Fn, k)
err := ShareAndGetCoeffs(&shares, coeffs, indices, secret, k)
if err != nil {
return err
}
// At this point, the sharer should still have the randomly picked
// coefficients in its cache, which we need to use for the commitment.
*c = (*c)[:k]
for i, coeff := range coeffs {
(*c)[i].BaseExp(&coeff)
}
setRandomCoeffs(coeffs, secp256k1.RandomFn(), k)
for i, ind := range indices {
(*vshares)[i].Share = shares[i]
polyEval(&(*vshares)[i].Decommitment, &ind, coeffs)
}
// Finish the computation of the commitments
var hPow secp256k1.Point
for i, coeff := range coeffs {
hPow.Scale(&h, &coeff)
(*c)[i].Add(&(*c)[i], &hPow)
}
return nil
}
|
package controllers
import "lenslocked.com/views"
// NewStatic creates a struct with the static pages
func NewStatic() *Static {
return &Static{
HomeView: views.NewFiles("bootstrap", "static/home"),
ContactView: views.NewFiles("bootstrap", "static/contact"),
FAQView: views.NewFiles("bootstrap", "static/faq"),
NotFoundView: views.NewFiles("bootstrap", "static/notfound"),
}
}
// Static contains the views for the static pages
type Static struct {
HomeView *views.View
ContactView *views.View
FAQView *views.View
NotFoundView *views.View
}
|
package api
import (
"net/http"
"reflect"
"strings"
"github.com/labstack/echo"
)
func OKRequest(c echo.Context) error {
return c.JSON(http.StatusOK, map[string]string{
"stat": "OK",
})
}
func OKRequestWith(c echo.Context, o interface{}) error {
m := map[string]interface{}{
"stat": "OK",
}
on := reflect.Indirect(reflect.ValueOf(o)).Type().Name()
if len(on) <= 1 {
on = strings.ToLower(on)
} else {
on = strings.ToLower(string(on[0])) + on[1:]
}
m[on] = o
return c.JSON(http.StatusOK, m)
}
func BadRequest(c echo.Context, message string) error {
m := map[string]interface{}{
"stat": "ERROR",
"message": message,
}
return c.JSON(http.StatusBadRequest, m)
}
|
package main
import (
"fmt"
"strings"
_ "github.com/go-sql-driver/mysql"
"github.com/jinzhu/gorm"
"github.com/yanyiwu/gojieba"
)
var db *gorm.DB
func 到資料庫撈全部資料() (
店家資料 []StoreModel, err error,
) {
店家資料 = []StoreModel{}
// 開始找資料庫
err = db.New().Find(&店家資料).Error
if err != nil {
return
}
return
}
func 到資料庫找店家資料(店名 string) (
店家資料 StoreModel,
有資料 bool, err error,
) {
店名 = 先過濾店名文字(店名)
// 開始找資料庫
err = db.New().Where("UPPER(`StoreName`) LIKE ?", "%"+店名+"%").First(&店家資料).Error
if err != nil {
if gorm.IsRecordNotFoundError(err) {
err = nil
return
}
return
}
// 搜尋出來後,搜尋次數+1
店家資料.Search++
err = db.Save(&店家資料).Error
if err != nil {
return
}
有資料 = true
return
}
func 先過濾店名文字(店名 string) string {
x := gojieba.NewJieba()
defer x.Free()
// 想要過濾的字典檔
dict := map[string]int{
"我想找": 0,
"我想去": 0,
"我要找": 1,
"我要去": 1,
"找": 2,
"在哪裡": 3,
"在哪": 4,
}
for k := range dict {
x.AddWord(k)
}
words := x.Cut(店名, true)
s := []string{}
for _, word := range words {
if _, isInDict := dict[word]; !isInDict {
s = append(s, word)
}
}
店名 = strings.Join(s, "")
fmt.Println("精确模式:", strings.Join(words, "/"))
fmt.Println("過濾後:", 店名)
return 店名
}
// StoreModel 商家模組
type StoreModel struct {
No string `json:"id" gorm:"column:No;type:varchar(50);PRIMARY_KEY;NOT NULL;"`
StoreName string `json:"store_name" gorm:"column:StoreName;type:varchar(50);NOT NULL;"`
SalesType string `json:"store_type" gorm:"column:SalesType;type:varchar(50);"`
Price string `json:"price" gorm:"column:Price;type:varchar(50);"`
Floor string `json:"floor" gorm:"column:Floor;type:varchar(5);NOT NULL"`
Tel string `json:"tel" gorm:"column:tel;type:varchar(50);"`
BusinessHours string `json:"work_time" gorm:"column:Business hours;type:varchar(50);"`
Details string `json:"detail" gorm:"column:Details;type:longtext;"`
Search int `json:"search" gorm:"column:search;type:int(11);NOT NULL"`
}
// TableName 資料表名稱
func (StoreModel) TableName() string {
return "StoreDetails"
}
|
// Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package applycache
import (
"strconv"
"strings"
"testing"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/mock"
"github.com/stretchr/testify/require"
)
func TestApplyCache(t *testing.T) {
ctx := mock.NewContext()
ctx.GetSessionVars().MemQuotaApplyCache = 100
applyCache, err := NewApplyCache(ctx)
require.NoError(t, err)
fields := []*types.FieldType{types.NewFieldType(mysql.TypeLonglong)}
value := make([]*chunk.List, 3)
key := make([][]byte, 3)
for i := 0; i < 3; i++ {
value[i] = chunk.NewList(fields, 1, 1)
srcChunk := chunk.NewChunkWithCapacity(fields, 1)
srcChunk.AppendInt64(0, int64(i))
srcRow := srcChunk.GetRow(0)
value[i].AppendRow(srcRow)
key[i] = []byte(strings.Repeat(strconv.Itoa(i), 100))
// TODO: *chunk.List.GetMemTracker().BytesConsumed() is not accurate, fix it later.
require.Equal(t, int64(100), applyCacheKVMem(key[i], value[i]))
}
ok, err := applyCache.Set(key[0], value[0])
require.NoError(t, err)
require.True(t, ok)
result, err := applyCache.Get(key[0])
require.NoError(t, err)
require.NotNil(t, result)
ok, err = applyCache.Set(key[1], value[1])
require.NoError(t, err)
require.True(t, ok)
result, err = applyCache.Get(key[1])
require.NoError(t, err)
require.NotNil(t, result)
ok, err = applyCache.Set(key[2], value[2])
require.NoError(t, err)
require.True(t, ok)
result, err = applyCache.Get(key[2])
require.NoError(t, err)
require.NotNil(t, result)
// Both key[0] and key[1] are not in the cache
result, err = applyCache.Get(key[0])
require.NoError(t, err)
require.Nil(t, result)
result, err = applyCache.Get(key[1])
require.NoError(t, err)
require.Nil(t, result)
}
|
package main
import (
"log"
"os"
"text/template"
)
var tpl *template.Template
type sages struct {
Name string
Motto string
}
type car struct {
Manufacturer string
Model string
Doors int
}
// type items struct {
// Wisdom []sages
// Transport []car
// }
func init() {
tpl = template.Must(template.ParseFiles("examp.gohtml"))
}
func main() {
a := sages{
Name: "Budha",
Motto: "belief of no beliefs",
}
b := sages{
Name: "Martin Luthur",
Motto: "Hatred does not heal hatred",
}
x := sages{
Name: "Jesus",
Motto: "love all",
}
y := car{
Manufacturer: "Ford",
Model: "F150",
Doors: 2,
}
z := car{
Manufacturer: "Toyota",
Model: "Corrolla",
Doors: 4,
}
try := []sages{a, b, x}
tryial := []car{y, z}
datta := struct {
Wisdom []sages
Transport []car
}{
try,
tryial,
}
err := tpl.Execute(os.Stdout, datta)
if err != nil {
log.Fatalln(err)
}
}
|
package main
import (
"strings"
)
func (g *Guild) CalcElemStats(elem string) {
elem = strings.ToLower(elem)
_, exists := g.Finished[elem]
if exists {
return
}
el, exists := g.Elements[elem]
if !exists {
g.Finished[elem] = empty{}
return
}
if len(el.Parents) == 0 {
g.Finished[elem] = empty{}
return
}
for _, par := range el.Parents {
g.CalcElemStats(par)
}
unique := false
first := el.Parents[0]
maxDiff := 0
maxComp := 0
for _, par := range el.Parents {
if par != first {
unique = true
}
parEl, exists := g.Elements[par]
if exists {
if parEl.Complexity > maxComp {
maxComp = parEl.Complexity
}
if parEl.Difficulty > maxDiff {
maxDiff = parEl.Difficulty
}
}
}
el.Complexity = maxComp + 1
el.Difficulty = maxDiff
if unique {
el.Difficulty++
}
g.Finished[elem] = empty{}
g.Elements[elem] = el
}
func recalcStats() {
for id, gld := range glds {
gld.Finished = make(map[string]empty)
for _, elem := range starters {
gld.Finished[strings.ToLower(elem)] = empty{}
}
for _, elem := range gld.Elements {
gld.CalcElemStats(elem.Name)
}
glds[id] = gld
}
}
|
package main
import (
"fmt"
)
func main() {
c := make(chan int, 2)
c <- 55
c <- 66
fmt.Println(<-c)
fmt.Println(<-c)
fmt.Printf("%T\t", c)
}
|
package windows
type tagPOINT struct {
X int32
Y int32
}
type tagMSG struct {
Hwnd HWND
Message UINT
WParam WPARAM
LParam LPARAM
Time DWORD
Pt POINT
LPrivate DWORD
}
type tagCWPSTRUCT struct {
LParam LPARAM
WParam WPARAM
Message UINT
Hwnd HWND
}
type tagKBDLLHOOKSTRUCT struct {
VkCode DWORD
ScanCode DWORD
Flags DWORD
Time DWORD
DwExtraInfo ULONG_PTR
}
|
package tsing
import (
"errors"
"net/http"
"path/filepath"
"runtime"
"strconv"
"strings"
)
// 事件
type Event struct {
Status int // HTTP状态码
Message error // 消息(error)
Source *_Source // 来源
Trace []string // 跟踪
ResponseWriter http.ResponseWriter
Request *http.Request
}
// 来源信息
type _Source struct {
Func string // 函数名
File string // 文件名
Line int // 行号
}
// 事件处理器
type EventHandler func(*Event)
func (e *Event) reset(resp http.ResponseWriter, req *http.Request) {
e.Message = nil
e.Source = nil
e.Status = 0
e.Trace = nil
e.ResponseWriter = resp
e.Request = req
}
// 处理器返回参数事件
func (engine *Engine) handlerErrorEvent(resp http.ResponseWriter, req *http.Request, source *_Source, err error) {
if err == nil {
return
}
// 从池中取出一个ctx
event := engine.eventPool.Get().(*Event)
event.reset(resp, req)
event.Status = 500
event.Message = err
event.Source = source
if engine.Config.EventTrace {
for skip := 0; ; skip++ {
funcPtr, file, line, ok := runtime.Caller(skip)
if !ok {
break
}
// 使用短路径
if engine.Config.EventShortPath {
file = strings.TrimPrefix(filepath.Clean(file), filepath.Clean(engine.Config.RootPath))
}
event.Trace = append(event.Trace, file+":"+strconv.Itoa(line)+":"+runtime.FuncForPC(funcPtr).Name())
}
}
engine.Config.EventHandler(event)
engine.eventPool.Put(event)
}
// context的Source()触发的的事件处理器
// 能精准记录事件来源信息
func (engine *Engine) contextSourceHandler(resp http.ResponseWriter, req *http.Request, err error) {
if err == nil || engine.Config.EventHandler == nil || !engine.Config.EventHandlerError {
return
}
// 从池中取出一个ctx
event := engine.eventPool.Get().(*Event)
event.reset(resp, req)
event.Status = 500
event.Message = err
// 如果启用了source
if engine.engine.Config.EventSource {
if funcPtr, file, line, ok := runtime.Caller(2); ok {
// 使用短路径
if engine.Config.EventShortPath {
file = strings.TrimPrefix(file, engine.Config.RootPath)
}
if event.Source != nil {
event.Source.File = file
event.Source.Line = line
event.Source.Func = runtime.FuncForPC(funcPtr).Name()
} else {
var source _Source
source.File = file
source.Line = line
source.Func = runtime.FuncForPC(funcPtr).Name()
event.Source = &source
}
}
}
// 如果开启了trace
if engine.Config.EventTrace {
for skip := 0; ; skip++ {
funcPtr, file, line, ok := runtime.Caller(skip)
if !ok {
break
}
// 使用短路径
if engine.Config.EventShortPath {
file = strings.TrimPrefix(file, engine.Config.RootPath)
}
event.Trace = append(event.Trace, file+":"+strconv.Itoa(line)+":"+runtime.FuncForPC(funcPtr).Name())
}
}
engine.Config.EventHandler(event)
engine.eventPool.Put(event)
}
// handler的panic处理器
func (engine *Engine) panicEvent(resp http.ResponseWriter, req *http.Request, err interface{}) {
if !engine.Config.Recover && engine.Config.EventHandler == nil {
return
}
// 从池中取出一个ctx
event := engine.eventPool.Get().(*Event)
event.reset(resp, req)
event.Status = 500
switch t := err.(type) {
case string:
event.Message = errors.New(t)
case error:
event.Message = t
default:
event.Message = errors.New("未知错误消息类型")
}
// 如果启用事件的触发信息
if engine.Config.EventSource {
funcPtr, file, line, ok := runtime.Caller(3)
if ok {
// 缩短文件路径
if engine.Config.EventShortPath {
file = strings.TrimPrefix(file, engine.Config.RootPath)
}
if event.Source != nil {
event.Source.File = file
event.Source.Line = line
event.Source.Func = runtime.FuncForPC(funcPtr).Name()
} else {
var source _Source
source.File = file
source.Line = line
source.Func = runtime.FuncForPC(funcPtr).Name()
event.Source = &source
}
}
}
// 如果启用事件的跟踪信息
if engine.Config.EventTrace {
for skip := 0; ; skip++ {
_, file, line, ok := runtime.Caller(skip)
if !ok {
break
}
// 缩短路径
if engine.Config.EventShortPath {
file = strings.TrimPrefix(file, engine.Config.RootPath)
}
event.Trace = append(event.Trace, file+":"+strconv.Itoa(line))
}
}
engine.Config.EventHandler(event)
// 将event放回池中
engine.eventPool.Put(event)
}
// 404事件处理器
func (engine *Engine) notFoundEvent(resp http.ResponseWriter, req *http.Request) {
if engine.Config.EventHandler == nil {
return
}
// 从池中取出一个ctx
event := engine.eventPool.Get().(*Event)
event.reset(resp, req)
event.Status = http.StatusNotFound
event.Message = errors.New(http.StatusText(http.StatusNotFound))
engine.Config.EventHandler(event)
engine.eventPool.Put(event)
}
// 405事件处理器
func (engine *Engine) methodNotAllowedEvent(resp http.ResponseWriter, req *http.Request) {
if engine.Config.EventHandler == nil {
return
}
// 从池中取出一个ctx
event := engine.eventPool.Get().(*Event)
event.reset(resp, req)
event.Status = http.StatusMethodNotAllowed
event.Message = errors.New(http.StatusText(http.StatusMethodNotAllowed))
engine.Config.EventHandler(event)
engine.eventPool.Put(event)
}
|
package util
const (
SUCCESS_CODE = "00"
SUCCESS_MESSAGE = "success"
SERVER_ERROR_CODE = "-91"
)
|
package main
import (
"crypto/sha256"
"encoding/base64"
"fmt"
"time"
)
func main() {
fmt.Println(Sha2256Days())
}
func Sha2256(data []byte) [32]byte {
return sha256.Sum256(data)
}
func Sha2256Days() string {
temp := Sha2256([]byte(TodayString(3)))
//fmt.Printf("%x\n", temp)
return Base64E(temp[:])
}
func Base64E(data []byte) string {
return base64.URLEncoding.EncodeToString(data)
}
func TodayString(level int) string {
formats := "20060102150405"
switch level {
case 1:
formats = "2006"
case 2:
formats = "200601"
case 3:
formats = "20060102"
case 4:
formats = "2006010215"
case 5:
formats = "200601021504"
default:
}
return time.Now().Format(formats)
}
|
package registry
// Blob describes a
type Blob struct {
// MediaType describe the type of the content. All text based formats are
// encoded as utf-8.
MediaType string
// Size in bytes of content.
Size int64
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.