text
stringlengths 11
4.05M
|
|---|
// Copyright 2020 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
// +build wasm
package wasmclient
import "github.com/iotaledger/wasp/packages/vm/wasmlib"
//go:wasm-module wasplib
//export hostGetBytes
func hostGetBytes(objId int32, keyId int32, typeId int32, value *byte, size int32) int32
//go:wasm-module wasplib
//export hostGetKeyId
func hostGetKeyId(key *byte, size int32) int32
//go:wasm-module wasplib
//export hostGetObjectId
func hostGetObjectId(objId int32, keyId int32, typeId int32) int32
//go:wasm-module wasplib
//export hostSetBytes
func hostSetBytes(objId int32, keyId int32, typeId int32, value *byte, size int32)
// \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\
// implements wasmlib.ScHost interface
type WasmVmHost struct{}
func ConnectWasmHost() {
wasmlib.ConnectHost(WasmVmHost{})
}
func (w WasmVmHost) Exists(objId int32, keyId int32, typeId int32) bool {
// negative length (-1) means only test for existence
// returned size -1 indicates keyId not found (or error)
// this removes the need for a separate hostExists function
return hostGetBytes(objId, keyId, typeId, nil, -1) >= 0
}
func (w WasmVmHost) GetBytes(objId int32, keyId int32, typeId int32) []byte {
// first query expected length of bytes array
size := hostGetBytes(objId, keyId, typeId, nil, 0)
if size <= 0 {
return []byte(nil)
}
// allocate a byte array in Wasm memory and
// copy the actual data bytes to Wasm byte array
bytes := make([]byte, size)
hostGetBytes(objId, keyId, typeId, &bytes[0], size)
return bytes
}
func (w WasmVmHost) GetKeyIdFromBytes(bytes []byte) int32 {
size := int32(len(bytes))
// &bytes[0] will panic on zero length slice, so use nil instead
// negative size indicates this was from bytes
if size == 0 {
return hostGetKeyId(nil, -1)
}
return hostGetKeyId(&bytes[0], -size-1)
}
func (w WasmVmHost) GetKeyIdFromString(key string) int32 {
bytes := []byte(key)
size := int32(len(bytes))
// &bytes[0] will panic on zero length slice, so use nil instead
// non-negative size indicates this was from string
if size == 0 {
return hostGetKeyId(nil, 0)
}
return hostGetKeyId(&bytes[0], size)
}
func (w WasmVmHost) GetObjectId(objId int32, keyId int32, typeId int32) int32 {
return hostGetObjectId(objId, keyId, typeId)
}
func (w WasmVmHost) SetBytes(objId int32, keyId int32, typeId int32, value []byte) {
// &bytes[0] will panic on zero length slice, so use nil instead
size := int32(len(value))
if size == 0 {
hostSetBytes(objId, keyId, typeId, nil, size)
return
}
hostSetBytes(objId, keyId, typeId, &value[0], size)
}
|
package main
import (
"fmt"
)
type Celsius float64
type Huashi float64
func main() {
var a Celsius = 37.5
fmt.Printf("%vC=%vF\n", a, CToF(a))
var b Huashi = CToF(a)
fmt.Printf("%vF=%vC\n", b, FToC(b))
fmt.Printf("Normal Body Temperature is %v", a)
}
func CToF(c Celsius) Huashi {
return Huashi(c*9/5 + 32)
}
func FToC(f Huashi) Celsius {
return Celsius((f - 32) * 5 / 9)
}
func (c Celsius) String() string {
//return fmt.Sprintf("%vC", c) //stackOverflow: %v will implictly call the String() method
return fmt.Sprintf("%g°C", c)
}
|
package service
import (
"github.com/feng/future/go-kit/agfun/agfun-server/dao"
"github.com/feng/future/go-kit/agfun/agfun-server/entity"
// "github.com/sirupsen/logrus"
"common-utilities/encrypt"
"common-utilities/utilities"
"github.com/feng/future/go-kit/agfun/agfun-server/protocol"
"github.com/feng/future/go-kit/agfun/agfun-server/store"
)
//CreateAccount 创建账户
func (app *AppSvc) CreateAccount(req protocol.CreateAccountReq) (protocol.Resp, error) {
var resp protocol.Resp
var err error
userAccount := entity.UserAccount{
Account: req.Account,
Pwd: req.Pwd,
}
if err = dao.CreateAccount(&userAccount); err != nil {
panic(err)
}
createResp := protocol.CreateAccountResp{}
return resp.Success("success", createResp), err
}
//Account 获取账户信息
func (app *AppSvc) Account(req protocol.AccountReq) (protocol.Resp, error) {
var resp protocol.Resp
var err error
id := store.GetUserId(req.Accesstoken)
if id == 0 {
return resp.Failed("no this user"), err
}
myAccount, e := dao.AccountById(id)
if e != nil {
return resp.Failed("no this user"), e
}
accountResp := protocol.AccountResp{
UserAccount: entity.UserAccount{
Name: myAccount.Name,
BankCard: myAccount.BankCard,
WeChat: myAccount.WeChat,
Telephone: myAccount.Telephone,
},
}
return resp.Success("success", accountResp), nil
}
//UpdateAccount 更新账户
func (app *AppSvc) UpdateAccount(req protocol.UpdateAccountReq) (protocol.Resp, error) {
// var resp protocol.UpdateAccountResp
// var err error
// userAccount := entity.UserAccount{
// }
// if err := dao.UpdateAccount(req.Account, req.); err != nil {
// code = 11100
// msg = err.Error()
// return code, msg
// }
// code = 0
// msg = "success"
// return code, msg
panic("todo")
}
//DeleteAccount 删除账户
// func (app *AppSvc) DeleteAccount(account string) (int, string) {
// return 0, ""
// }
//Login 登录
func (app *AppSvc) Login(req protocol.LoginReq) (protocol.Resp, error) {
var resp protocol.Resp
var err error
var loginResp protocol.LoginResp
myAccount, e := dao.Account(req.Account)
if e != nil {
panic(err)
}
if myAccount.ID == 0 || req.Pwd != myAccount.Pwd {
return resp.Failed("用户名或密码错误"), err
}
accessToken := encrypt.SHA1(utilities.GetRandomStr(32) + req.Pwd)
store.CacheUser(accessToken, myAccount.ID)
loginResp.AccessToken = accessToken
return resp.Success("success", loginResp), err
}
|
package utils
import (
"net/url"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestURLPathFullClean(t *testing.T) {
testCases := []struct {
name string
have string
expected string
}{
{"ShouldReturnFullPathSingleSlash", "https://example.com/", "/"},
{"ShouldReturnFullPathSingleSlashWithQuery", "https://example.com/?query=1&alt=2", "/?query=1&alt=2"},
{"ShouldReturnFullPathNormal", "https://example.com/test", "/test"},
{"ShouldReturnFullPathNormalWithSlashSuffix", "https://example.com/test/", "/test/"},
{"ShouldReturnFullPathNormalWithSlashSuffixAndQuery", "https://example.com/test/?query=1&alt=2", "/test/?query=1&alt=2"},
{"ShouldReturnFullPathWithQuery", "https://example.com/test?query=1&alt=2", "/test?query=1&alt=2"},
{"ShouldReturnCleanedPath", "https://example.com/five/../test?query=1&alt=2", "/test?query=1&alt=2"},
{"ShouldReturnCleanedPathEscaped", "https://example.com/five/..%2ftest?query=1&alt=2", "/test?query=1&alt=2"},
{"ShouldReturnCleanedPathEscapedExtra", "https://example.com/five/..%2ftest?query=1&alt=2", "/test?query=1&alt=2"},
{"ShouldReturnCleanedPathEscapedExtraSurrounding", "https://example.com/five/%2f..%2f/test?query=1&alt=2", "/test?query=1&alt=2"},
{"ShouldReturnCleanedPathEscapedPeriods", "https://example.com/five/%2f%2e%2e%2f/test?query=1&alt=2", "/test?query=1&alt=2"},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
u, err := url.ParseRequestURI(tc.have)
require.NoError(t, err)
actual := URLPathFullClean(u)
assert.Equal(t, tc.expected, actual)
})
}
}
func isURLSafe(requestURI string, domain string) bool { //nolint:unparam
u, _ := url.ParseRequestURI(requestURI)
return IsURISafeRedirection(u, domain)
}
func TestIsRedirectionSafe_ShouldReturnTrueOnExactDomain(t *testing.T) {
assert.True(t, isURLSafe("https://example.com", "example.com"))
}
func TestIsRedirectionSafe_ShouldReturnFalseOnBadScheme(t *testing.T) {
assert.False(t, isURLSafe("http://secure.example.com", "example.com"))
assert.False(t, isURLSafe("ftp://secure.example.com", "example.com"))
assert.True(t, isURLSafe("https://secure.example.com", "example.com"))
}
func TestIsRedirectionSafe_ShouldReturnFalseOnBadDomain(t *testing.T) {
assert.False(t, isURLSafe("https://secure.example.com.c", "example.com"))
assert.False(t, isURLSafe("https://secure.example.comc", "example.com"))
assert.False(t, isURLSafe("https://secure.example.co", "example.com"))
}
func TestHasDomainSuffix(t *testing.T) {
assert.False(t, HasDomainSuffix("abc", ""))
assert.False(t, HasDomainSuffix("", ""))
}
|
package name
import (
"regexp"
"strings"
)
// reference: https://gist.github.com/stoewer/fbe273b711e6a06315d19552dd4d33e6
// in above gist, matchFirstCap use regexp `(.)([A-Z][a-z]+)`, where `.` would match any character,
// witch make separators such as `.` `,` also be matched
// so use `[A-Za-z0-9]` instead, to avoid match special character
var matchFirstCap = regexp.MustCompile(`([A-Za-z0-9])([A-Z][a-z]+)`)
var matchAllCap = regexp.MustCompile(`([a-z0-9])([A-Z])`)
func ToSnakeCase(str string) string {
snake := matchFirstCap.ReplaceAllString(str, "${1}_${2}")
snake = matchAllCap.ReplaceAllString(snake, "${1}_${2}")
return strings.ToLower(snake)
}
var link = regexp.MustCompile(`(^[A-Za-z])|_([A-Za-z])`)
func ToCamelCase(str string) string {
return link.ReplaceAllStringFunc(str, func(s string) string {
return strings.ToUpper(strings.Replace(s, "_", "", -1))
})
}
|
package main
import (
"fmt"
"io/ioutil"
"math"
"strconv"
"strings"
)
func main() {
data, err := ioutil.ReadFile("input.txt")
if err != nil {
panic(err)
}
lines := strings.Split(strings.TrimSpace(string(data)), "\n")
w1 := wire(lines[0])
w2 := wire(lines[1])
ins := intersect(w1, w2)
part1(w1, w2, ins)
part2(w1, w2, ins)
}
func part1(w1, w2, ins []point) {
min := math.MaxInt32
for _, i := range ins {
d := dist(i, point{0, 0})
if d < min {
min = d
}
}
fmt.Println(min)
}
func part2(w1, w2, ins []point) {
min := math.MaxInt32
for _, i := range ins {
v := stepsTo(i, w1) + stepsTo(i, w2)
if v < min {
min = v
}
}
fmt.Println(min)
}
type point struct{ x, y int }
func stepsTo(p point, w []point) int {
for i, wp := range w {
if eq(wp, p) {
return i + 1
}
}
panic("not here")
}
func dist(p1, p2 point) int {
return abs(p1.x-p2.x) + abs(p1.y-p2.y)
}
func abs(i int) int {
if i < 0 {
return -i
}
return i
}
func eq(p1, p2 point) bool {
return p1.x == p2.x && p1.y == p2.y
}
func hash(p point) string {
return fmt.Sprintf("x:%d,y:%d", p.x, p.y)
}
func intersect(w1, w2 []point) []point {
ret := []point{}
m := map[string]bool{}
for _, p1 := range w1 {
m[hash(p1)] = true
}
for _, p2 := range w2 {
if has, ok := m[hash(p2)]; ok && has {
ret = append(ret, p2)
}
}
return ret
}
func wire(line string) []point {
ret := []point{}
parse := strings.Split(line, ",")
cur := point{0, 0}
for _, ins := range parse {
dir := string(ins[0])
dist, err := strconv.Atoi(ins[1:])
if err != nil {
panic(err)
}
var n point
for i := 1; i <= dist; i++ {
switch dir {
case "R":
n = point{cur.x + i, cur.y}
case "L":
n = point{cur.x - i, cur.y}
case "U":
n = point{cur.x, cur.y + i}
case "D":
n = point{cur.x, cur.y - i}
default:
panic("Unknown dir")
}
ret = append(ret, n)
}
cur = n
}
return ret
}
|
package produce
import (
"sub_account_service/number_server/routers/produce/api"
"github.com/gin-gonic/gin"
)
func InitRouter() *gin.Engine {
r := gin.New()
r.Use(gin.Logger())
r.Use(gin.Recovery())
gin.SetMode(gin.ReleaseMode)
r.POST("/addOrder", api.AddOrder)
return r
}
|
package core
import (
"fmt"
"strings"
"testing"
"time"
icid "github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p-core/peerstore"
"github.com/segmentio/ksuid"
"github.com/textileio/go-textile/ipfs"
"github.com/textileio/go-textile/pb"
"github.com/textileio/go-textile/schema/textile"
)
var cafeVars = struct {
nodePath string
cafePath string
cafeApiPort string
node *Textile
cafe *Textile
token string
}{
nodePath: "./testdata/.textile3",
cafePath: "./testdata/.textile4",
cafeApiPort: "5000",
}
func TestCore_SetupCafes(t *testing.T) {
var err error
cafeVars.node, err = CreateAndStartPeer(InitConfig{
RepoPath: cafeVars.nodePath,
Debug: true,
}, true)
if err != nil {
t.Fatal(err)
}
cafeVars.cafe, err = CreateAndStartPeer(InitConfig{
RepoPath: cafeVars.cafePath,
Debug: true,
SwarmPorts: "4001",
CafeApiAddr: "0.0.0.0:" + cafeVars.cafeApiPort,
CafeURL: "http://127.0.0.1:" + cafeVars.cafeApiPort,
CafeOpen: true,
}, true)
if err != nil {
t.Fatal(err)
}
}
func TestTextile_CafeTokens(t *testing.T) {
var err error
cafeVars.token, err = cafeVars.cafe.CreateCafeToken("", true)
if err != nil {
t.Fatalf("error creating cafe token: %s", err)
}
if len(cafeVars.token) == 0 {
t.Fatal("invalid token created")
}
tokens, _ := cafeVars.cafe.CafeTokens()
if len(tokens) < 1 {
t.Fatal("token database not updated (should be length 1)")
}
ok, err := cafeVars.cafe.ValidateCafeToken("blah")
if err == nil || ok {
t.Fatal("expected token comparison with 'blah' to be invalid")
}
ok, err = cafeVars.cafe.ValidateCafeToken(cafeVars.token)
if err != nil || !ok {
t.Fatal("expected token comparison to be valid")
}
}
func TestTextile_RemoveCafeToken(t *testing.T) {
err := cafeVars.cafe.RemoveCafeToken(cafeVars.token)
if err != nil {
t.Fatal("expected be remove token cleanly")
}
tokens, _ := cafeVars.cafe.CafeTokens()
if len(tokens) > 0 {
t.Fatal("token database not updated (should be zero length)")
}
}
func TestCore_RegisterCafe(t *testing.T) {
token, err := cafeVars.cafe.CreateCafeToken("", true)
if err != nil {
t.Fatal(err)
}
ok, err := cafeVars.cafe.ValidateCafeToken(token)
if !ok || err != nil {
t.Fatal(err)
}
// register with cafe
cafeID := cafeVars.cafe.Ipfs().Identity
cafeVars.node.Ipfs().Peerstore.AddAddrs(
cafeID, cafeVars.cafe.Ipfs().PeerHost.Addrs(), peerstore.PermanentAddrTTL)
_, err = cafeVars.node.RegisterCafe(cafeID.Pretty(), token)
if err != nil {
t.Fatalf("register node1 w/ node2 failed: %s", err)
}
// add some data
err = addTestData(cafeVars.node)
if err != nil {
t.Fatal(err)
}
cafeVars.node.FlushCafes()
}
func TestCore_HandleCafeRequests(t *testing.T) {
waitOnRequests(time.Second * 60)
n := cafeVars.node
c := cafeVars.cafe
// ensure all requests have been deleted
cnt := n.datastore.CafeRequests().Count(-1)
ncnt := n.datastore.CafeRequests().Count(0)
if ncnt != 0 {
t.Fatalf("expected all requests to be handled, got %d total, %d new", cnt, ncnt)
}
// check if blocks are pinned
var blocks []string
var datas []string
list := n.Blocks("", -1, "")
for _, b := range list.Items {
blocks = append(blocks, b.Id)
if b.Type == pb.Block_FILES {
datas = append(datas, b.Data)
}
}
missingBlockPins, err := ipfs.NotPinned(c.Ipfs(), blocks)
if err != nil {
t.Fatal(err)
}
if len(missingBlockPins) != 0 {
var strs []string
for _, id := range missingBlockPins {
strs = append(strs, id.Hash().B58String())
}
t.Fatalf("blocks not pinned: %s", strings.Join(strs, ", "))
}
// check if datas are pinned
missingDataPins, err := ipfs.NotPinned(c.Ipfs(), datas)
if err != nil {
t.Fatal(err)
}
if len(missingDataPins) != 0 {
var strs []string
for _, id := range missingDataPins {
strs = append(strs, id.Hash().B58String())
}
t.Fatalf("datas not pinned: %s", strings.Join(strs, ", "))
}
// try unpinning data
if len(datas) > 0 {
dec, err := icid.Decode(datas[0])
if err != nil {
t.Fatal(err)
}
err = ipfs.UnpinCid(c.Ipfs(), dec, true)
if err != nil {
t.Fatal(err)
}
not, err := ipfs.NotPinned(c.Ipfs(), []string{datas[0]})
if err != nil {
t.Fatal(err)
}
if len(not) == 0 || not[0].Hash().B58String() != datas[0] {
t.Fatal("data was not recursively unpinned")
}
}
}
func TestCore_TeardownCafes(t *testing.T) {
_ = cafeVars.node.Stop()
_ = cafeVars.cafe.Stop()
cafeVars.node = nil
cafeVars.cafe = nil
}
func addTestData(n *Textile) error {
thrd, err := addTestThread(n, &pb.AddThreadConfig{
Key: ksuid.New().String(),
Name: "test",
Schema: &pb.AddThreadConfig_Schema{
Json: textile.Blob,
},
Type: pb.Thread_PRIVATE,
Sharing: pb.Thread_INVITE_ONLY,
})
if err != nil {
return err
}
_, err = addData(n, []string{"../mill/testdata/image.jpeg"}, thrd, "hi")
if err != nil {
return err
}
_, err = thrd.AddMessage("", "hi")
if err != nil {
return err
}
files, err := addData(n, []string{"../mill/testdata/image.png"}, thrd, "hi")
if err != nil {
return err
}
_, err = thrd.AddComment(files.Block, "nice")
if err != nil {
return err
}
files, err = addData(n, []string{"../mill/testdata/image.jpeg", "../mill/testdata/image.png"}, thrd, "hi")
if err != nil {
return err
}
_, err = thrd.AddLike(files.Block)
if err != nil {
return err
}
_, err = thrd.AddMessage("", "bye")
if err != nil {
return err
}
return nil
}
func waitOnRequests(total time.Duration) {
tick := time.NewTicker(time.Second)
defer tick.Stop()
var waited time.Duration
for {
select {
case <-tick.C:
cnt := cafeVars.node.datastore.CafeRequests().Count(-1)
if cnt == 0 {
return
} else {
fmt.Printf("waiting on %d requests to complete\n", cnt)
}
waited += time.Second
if waited >= total {
return
}
}
}
}
|
// Copyright 2020 The Amadeus Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kafkaconnect
import (
"context"
"encoding/json"
e "errors"
"fmt"
"strconv"
"sync"
"time"
kafkaconnectv1alpha1 "github.com/amadeusitgroup/kubernetes-kafka-connect-operator/pkg/apis/kafkaconnect/v1alpha1"
"github.com/amadeusitgroup/kubernetes-kafka-connect-operator/pkg/kafkaconnectclient"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/klog"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
)
const (
encodedDeploymentAnnotation = "kafkaconnect/encoded-deployment"
encodedServiceAnnotation = "kafkaconnect/encoded-service"
encodedIngressAnnotation = "kafkaconnect/encoded-ingress"
)
// Utils reconciles a KafkaConnect object
type Utils struct {
// This client, initialized using mgr.Client() above, is a split client
// that reads objects from the cache and writes to the apiserver
ReconcileKafkaConnect
specUpdated bool
}
// initConnectorTaskMax init a taskmax for each connector
func (utils *Utils) initConnectorTaskMax(wg *sync.WaitGroup, spec *kafkaconnectv1alpha1.KafkaConnectorConfig, connectorStatus *kafkaconnectv1alpha1.KafkaConnectorStatus, instance *kafkaconnectv1alpha1.KafkaConnect) {
defer wg.Done()
if spec.TasksMax != nil {
return
}
nb := int32(0)
utils.specUpdated = true
//get config bytes from the url
config, err := kafkaconnectclient.GetKafkaConnectConfig(utils.corev1Itf.ConfigMaps(instance.Namespace), *spec)
// if there is error
if err != nil {
spec.TasksMax = &nb
utils.eventRecorder.Eventf(instance, v1.EventTypeWarning, "UnableToGetConfig", "cannot get config for connector %s", spec.Name)
connectorStatus.Error = fmt.Sprintf("cannot get config for %s", spec.Name)
klog.Error(err, "cannot get config")
return
}
//read tasks.max
taskNb := config["tasks.max"]
// if string
if nbStr, ok := taskNb.(string); ok {
nbInt := 0
if nbInt, err = strconv.Atoi(nbStr); err != nil {
spec.TasksMax = &nb
utils.eventRecorder.Eventf(instance, v1.EventTypeWarning, "UnableToGetTasksMax", "cannot get tasks.max from config for connector %s", spec.Name)
connectorStatus.Error = fmt.Sprintf("cannot get int value of tasks.max from config for %s", spec.Name)
klog.Error(err, "cannot get int value of tasks.max from config")
return
}
nb = int32(nbInt)
} else if nbInt, ok := taskNb.(int); ok {
nb = int32(nbInt)
}
if nb > 0 {
spec.TasksMax = &nb
}
}
func (utils *Utils) getTotalTaskNb(instance *kafkaconnectv1alpha1.KafkaConnect) int32 {
res := int32(0)
for _, connectorSpec := range instance.Spec.KafkaConnectorsSpec.Configs {
res = res + *connectorSpec.TasksMax
}
return res
}
func (utils *Utils) checkUpdatingStatus(instance *kafkaconnectv1alpha1.KafkaConnect, changed bool) {
if changed && !instance.Status.Updating {
instance.Status.Updating = true
}
if !changed && instance.Status.Updating {
instance.Status.Updating = false
instance.Status.LastScaleTime = metav1.Time{Time: time.Now()}
}
}
func (utils *Utils) updateDefaultTaskMax(instance *kafkaconnectv1alpha1.KafkaConnect) {
var wg sync.WaitGroup
for i := range instance.Spec.KafkaConnectorsSpec.Configs {
wg.Add(1)
go utils.initConnectorTaskMax(&wg, &instance.Spec.KafkaConnectorsSpec.Configs[i], &instance.Status.KafkaConnectorStatus[i], instance)
}
wg.Wait()
}
// CheckGlobalStatus check all the status add see which object need to be updated
func (utils *Utils) CheckGlobalStatus(instance *kafkaconnectv1alpha1.KafkaConnect) error {
oriInstance := instance.DeepCopy()
// if no status, create new status from config and update
if instance.Status == nil {
instance.Status = &kafkaconnectv1alpha1.KafkaConnectStatus{
Updating: true,
KafkaConnectorStatus: make([]kafkaconnectv1alpha1.KafkaConnectorStatus, len(instance.Spec.KafkaConnectorsSpec.Configs)),
LastScaleTime: metav1.Time{Time: time.Now()},
PodNb: 0,
}
for i, configSpec := range instance.Spec.KafkaConnectorsSpec.Configs {
instance.Status.KafkaConnectorStatus[i] = kafkaconnectv1alpha1.KafkaConnectorStatus{
Name: configSpec.Name,
TaskNb: int32(0),
Error: "",
}
}
}
utils.updateDefaultTaskMax(instance)
if utils.specUpdated {
klog.Info("task max updated")
err := utils.client.Update(context.TODO(), instance)
if err != nil {
klog.Errorf("cannot update the kafka connect object %s:%s, %s",
instance.Namespace, instance.Name, err.Error())
}
return err
}
// if status exist, check deployement config, service and ingress object
depIsUpdating := false
changed := false
var foundDep *appsv1.Deployment
var foundSvc *corev1.Service
if dep, foundDeployment, err := utils.checkDeployment(instance); err != nil {
return err
} else if dep != nil {
changed = true
depIsUpdating = true
klog.Infof("udpate dep %s/%s", dep.Namespace, dep.Name)
if err = controllerutil.SetControllerReference(instance, dep, utils.scheme); err != nil {
utils.eventRecorder.Eventf(instance, v1.EventTypeWarning, "FailedSetOwnerReference", "failed to set owner reference for deployment %s", err.Error())
return err
}
if dep.Annotations == nil {
dep.Annotations = make(map[string]string)
}
exist := foundDeployment != nil
if err = utils.apply(dep, exist); err != nil {
utils.eventRecorder.Eventf(instance, v1.EventTypeWarning, "FailedApplyDeployment", "failed to apply deployment %s", err.Error())
return err
}
utils.eventRecorder.Eventf(instance, v1.EventTypeNormal, "SuccessedApplyDeployment", "successed create deployment %s/%s", dep.Namespace, dep.Name)
} else if instance.Status.PodNb > *foundDeployment.Spec.Replicas {
depIsUpdating = true
changed = true
err = utils.ScaleDeployment(instance, foundDeployment)
if err != nil {
utilruntime.HandleError(err)
return err
}
utils.eventRecorder.Eventf(instance, v1.EventTypeNormal, "ScaleUpDeployment", "deployment exist already need to scale up %s/%s", foundDeployment.Namespace, foundDeployment.Name)
foundDep = foundDeployment
} else {
utils.eventRecorder.Eventf(instance, v1.EventTypeNormal, "DeploymentExist", "deployment exist already %s/%s", foundDeployment.Namespace, foundDeployment.Name)
foundDep = foundDeployment
}
if svc, foundService, err := utils.checkService(instance); err != nil {
return err
} else if svc != nil {
changed = true
klog.Infof("udpate svc %s/%s", svc.Namespace, svc.Name)
if err = controllerutil.SetControllerReference(instance, svc, utils.scheme); err != nil {
utils.eventRecorder.Eventf(instance, v1.EventTypeWarning, "FailedSetOwnerReference", "failed to set owner reference for service %s", err.Error())
return err
}
exist := (foundService != nil)
if err = utils.apply(svc, exist); err != nil {
utils.eventRecorder.Eventf(instance, v1.EventTypeWarning, "FailedApplyService", "failed to apply service %s", err.Error())
return err
}
utils.eventRecorder.Eventf(instance, v1.EventTypeNormal, "SuccessedApplyService", "successed create service %s/%s", svc.Namespace, svc.Name)
} else {
utils.eventRecorder.Eventf(instance, v1.EventTypeNormal, "ServiceExist", "service exist already %s/%s", foundService.Namespace, foundService.Name)
foundSvc = foundService
}
if ing, exist, err := utils.checkIngress(instance); err != nil {
return err
} else if ing != nil {
changed = true
klog.Infof("udpate ing %s/%s", ing.Namespace, ing.Name)
if err = controllerutil.SetControllerReference(instance, ing, utils.scheme); err != nil {
utils.eventRecorder.Eventf(instance, v1.EventTypeWarning, "FailedSetOwnerReference", "failed to set owner reference for ingress %s", err.Error())
return err
}
if err = utils.apply(ing, exist); err != nil {
utils.eventRecorder.Eventf(instance, v1.EventTypeWarning, "FailedApplyIngress", "failed to apply ingress %s", err.Error())
return err
}
utils.eventRecorder.Eventf(instance, v1.EventTypeNormal, "SuccessedApplyIngress", "successed create ingress %s/%s", ing.Namespace, ing.Name)
} else {
utils.eventRecorder.Eventf(instance, v1.EventTypeNormal, "IngressExist", "ingres exist already %s/%s-ingress", instance.Namespace, instance.Name)
}
var firstPutError error
if !depIsUpdating && foundDep != nil && *foundDep.Spec.Replicas > 0 && foundSvc != nil && deploymentComplete(foundDep, &(foundDep.Status)) {
klog.Infof("dep complete %s/%s, will check config", foundDep.Namespace, foundDep.Name)
kcNamespacedName := types.NamespacedName{Namespace: instance.Namespace, Name: instance.Name}
newConnectorStatus := make([]kafkaconnectv1alpha1.KafkaConnectorStatus, len(instance.Spec.KafkaConnectorsSpec.Configs))
oldConnectorStatus := instance.Status.KafkaConnectorStatus
var connectors []string
if !instance.Spec.KafkaConnectorsSpec.KeepUnknownConnectors {
c, err := utils.kcc.GetAllConnectors(kcNamespacedName, instance.Spec.KafkaConnectRestAPIPort)
if err != nil {
klog.Error("unable to connect to kafka connect rest api ", err)
utils.eventRecorder.Eventf(instance, v1.EventTypeWarning, "UnableToConnectRestAPI", "unable to connect to kafka connect rest api %s", err.Error())
return err
}
connectors = c
}
for i, config := range instance.Spec.KafkaConnectorsSpec.Configs {
foundOldStatus := false
for _, cStatus := range oldConnectorStatus {
if cStatus.Name == config.Name {
foundOldStatus = true
newConnectorStatus[i] = cStatus
break
}
}
if !foundOldStatus {
newConnectorStatus[i] = kafkaconnectv1alpha1.KafkaConnectorStatus{
Name: config.Name,
TaskNb: int32(0),
Error: "",
}
}
if !instance.Spec.KafkaConnectorsSpec.KeepUnknownConnectors && connectors != nil {
for idx := range connectors {
if connectors[idx] == config.Name {
connectors[idx] = connectors[len(connectors)-1]
connectors[len(connectors)-1] = ""
connectors = connectors[:len(connectors)-1]
break
}
}
}
//get existing config from kafka connect cluster
utils.eventRecorder.Eventf(instance, v1.EventTypeNormal, "CheckingConnectorConfig", "checking connector config for %s", config.Name)
if *config.TasksMax == 0 && newConnectorStatus[i].Error != "" {
//TODO should i delete old connector????????
utils.eventRecorder.Eventf(instance, v1.EventTypeWarning, "WrongTaskMax", "wrong value tasksmax for %s", config.Name)
continue
}
foundConfig, err := utils.kcc.GetKafkaConnectConfig(config.Name, instance.Spec.KafkaConnectRestAPIPort, kcNamespacedName)
if err != nil {
return err
}
if errorCode, ok := foundConfig["error_code"]; ok {
klog.Warning("KafkaConnect REST Api not available error code ", errorCode)
if message, ok := foundConfig["message"]; ok {
klog.Warning("error message : ", message)
}
klog.Warningf("KafkaConnect REST Api not available error code %v", errorCode)
utils.eventRecorder.Eventf(instance, v1.EventTypeWarning, "RESTApiUnavailable", "KafkaConnect REST Api not available error code %v", errorCode)
//return error so retry later
return e.New("RESTApi not available")
}
// get config from url
expectedConfig, err := kafkaconnectclient.GetKafkaConnectConfig(utils.corev1Itf.ConfigMaps(instance.Namespace), config)
if err != nil {
return err
}
// replace tasks max with the nb from status. this value is updated by autoscaler
expectedConfig["tasks.max"] = strconv.Itoa(int(*config.TasksMax))
expectedConfig["name"] = config.Name
if !apiequality.Semantic.DeepEqual(foundConfig, expectedConfig) {
changed = true
utils.eventRecorder.Eventf(instance, v1.EventTypeNormal, "UpdatingConnector", "updating connector %s", config.Name)
klog.Infof("config is diff, will update config new : %+v", expectedConfig)
klog.Infof("config is diff, will update config old : %+v", foundConfig)
expectedConfigString, err := json.Marshal(expectedConfig)
if err != nil {
klog.Error("unable to marshal config ", err)
} else if err = utils.kcc.PutKafkaConnectConfig(config.Name, instance.Spec.KafkaConnectRestAPIPort, kcNamespacedName, expectedConfigString); err != nil {
newConnectorStatus[i].Error = err.Error()
firstPutError = err
} else {
newConnectorStatus[i].TaskNb = *config.TasksMax
}
} else {
utils.eventRecorder.Eventf(instance, v1.EventTypeNormal, "SameConnector", "same connector config %s", config.Name)
klog.Info("config is same, won't update config")
newConnectorStatus[i].TaskNb = *config.TasksMax
}
}
if !instance.Spec.KafkaConnectorsSpec.KeepUnknownConnectors && connectors != nil {
for _, c := range connectors {
err := utils.kcc.DeleteConnector(c, instance.Spec.KafkaConnectRestAPIPort, kcNamespacedName)
if err != nil {
klog.Errorf("unable to delete connector %s with error %s", c, err.Error())
utils.eventRecorder.Eventf(instance, v1.EventTypeWarning, "CannotDelConnector", "unable to delete connector %s with error %s", c, err.Error())
//Do not return with error, will retry later
}
}
}
instance.Status.KafkaConnectorStatus = newConnectorStatus
}
if !changed && deploymentComplete(foundDep, &(foundDep.Status)) && foundDep != nil && instance.Status.PodNb < *foundDep.Spec.Replicas {
changed = true
err := utils.ScaleDeployment(instance, foundDep)
if err != nil {
utilruntime.HandleError(err)
return err
}
utils.eventRecorder.Eventf(instance, v1.EventTypeNormal, "ScaleUpDeployment", "deployment exist already need to scale up %s/%s", foundDep.Namespace, foundDep.Name)
}
if !changed && foundDep != nil && deploymentComplete(foundDep, &(foundDep.Status)) {
utils.checkUpdatingStatus(instance, false)
} else {
utils.checkUpdatingStatus(instance, true)
}
currentInstance := &kafkaconnectv1alpha1.KafkaConnect{}
err := utils.client.Get(context.TODO(), types.NamespacedName{Name: instance.Name, Namespace: instance.Namespace}, currentInstance)
if err != nil {
if errors.IsNotFound(err) {
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
return nil
}
// Error reading the object - requeue the request.
return err
}
//the instance has been updated during this reconcil,
//we will do nothing and wait for the next reconcil
if !apiequality.Semantic.DeepEqual(oriInstance, currentInstance) {
klog.Warningf("instance %s:%s has been updated during the reconcilation, will apply the status to the new instance",
currentInstance.Namespace, currentInstance.Name)
klog.Warningf("the old instance is : %+v", oriInstance)
klog.Warningf("the new instance is : %+v", currentInstance)
currentInstance.Status = instance.Status
if err := utils.client.Status().Update(context.TODO(), currentInstance); err != nil {
utilruntime.HandleError(err)
}
} else if !apiequality.Semantic.DeepEqual(oriInstance.Status, instance.Status) {
if err := utils.client.Status().Update(context.TODO(), instance); err != nil {
utilruntime.HandleError(err)
}
}
return firstPutError
}
func deploymentComplete(deployment *appsv1.Deployment, newStatus *appsv1.DeploymentStatus) bool {
return newStatus.UpdatedReplicas == *(deployment.Spec.Replicas) &&
newStatus.Replicas == *(deployment.Spec.Replicas) &&
newStatus.AvailableReplicas == *(deployment.Spec.Replicas) &&
newStatus.ObservedGeneration >= deployment.Generation
}
func (utils *Utils) apply(obj runtime.Object, exist bool) error {
if obj != nil {
if exist {
return utils.client.Update(context.TODO(), obj)
}
return utils.client.Create(context.TODO(), obj)
}
return nil
}
// checkDeployment check if we need to create a new deployment or not, if only replicas is different, just scale old one
func (utils *Utils) checkDeployment(instance *kafkaconnectv1alpha1.KafkaConnect) (*appsv1.Deployment, *appsv1.Deployment, error) {
foundDeployment := &appsv1.Deployment{}
podLabels := GeneratePodLabels(*instance)
podTemplate := corev1.PodTemplateSpec{
Spec: instance.Spec.PodSpec,
ObjectMeta: metav1.ObjectMeta{
Name: instance.Name,
Namespace: instance.Namespace,
Labels: podLabels,
},
}
tasksTotal := utils.getTotalTaskNb(instance)
tpp := int32(1)
if instance.Spec.KafkaConnectorsSpec.TaskPerPod != nil {
tpp = *instance.Spec.KafkaConnectorsSpec.TaskPerPod
}
// only 1 pod if tpp <= 0
instance.Status.PodNb = int32(1)
if tpp > 0 {
instance.Status.PodNb = tasksTotal / tpp
if tasksTotal%tpp > 0 {
instance.Status.PodNb = instance.Status.PodNb + 1
}
} else if instance.Spec.KafkaConnectorsSpec.InitDeploymentReplicas != nil && *instance.Spec.KafkaConnectorsSpec.InitDeploymentReplicas > 0 {
instance.Status.PodNb = int32(1)
} else {
klog.Warning("taskPerPod is less than 0 and initPodReplicas is not validate value, we create deployment with 1 pod")
utils.eventRecorder.Event(instance, v1.EventTypeWarning, "UnableToCalculReplicas", "taskPerPod is less than 0 and initPodReplicas is not validate value, we create deployment with 1 pod")
}
expectedDeployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: instance.Name,
Namespace: instance.Namespace,
Labels: instance.Labels,
},
Spec: appsv1.DeploymentSpec{
//Replicas: &instance.Status.PodNb,
Selector: &metav1.LabelSelector{
MatchLabels: podLabels,
},
Template: podTemplate,
},
}
exist := true
err := utils.client.Get(context.TODO(), types.NamespacedName{Name: expectedDeployment.Name, Namespace: expectedDeployment.Namespace}, foundDeployment)
//if no deployment or deployement is different
needUpdateDeployment := false
if err != nil {
if errors.IsNotFound(err) {
needUpdateDeployment = true
exist = false
} else {
return nil, nil, err
}
}
if !needUpdateDeployment {
annotationDeployment := &appsv1.Deployment{}
needUpdateDeployment = !checkSame(&foundDeployment.ObjectMeta, expectedDeployment, annotationDeployment, encodedDeploymentAnnotation)
}
if needUpdateDeployment {
err = udpateObjectAnnotation(expectedDeployment, &(expectedDeployment.ObjectMeta), encodedDeploymentAnnotation)
if err != nil {
return nil, nil, err
}
expectedDeployment.Spec.Replicas = &instance.Status.PodNb
if exist {
return expectedDeployment, foundDeployment, nil
}
return expectedDeployment, nil, nil
}
if tpp <= 0 {
instance.Status.PodNb = *foundDeployment.Spec.Replicas
return nil, foundDeployment, nil
}
return nil, foundDeployment, nil
}
// ScaleDeployment will scale dep according to instance.Status.PodNb
func (utils *Utils) ScaleDeployment(instance *kafkaconnectv1alpha1.KafkaConnect, dep *appsv1.Deployment) error {
//if replicas is different we need to only scale
if instance.Status.PodNb != *dep.Spec.Replicas {
gvk := dep.GroupVersionKind()
mapping, err := utils.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version)
if err != nil {
klog.Error(err)
utils.eventRecorder.Eventf(dep, v1.EventTypeWarning, "UnableToFindRESTMapping", "cannot find RESTMapping for %s", gvk.String())
return err
}
gr := mapping.Resource.GroupResource()
scale, err := utils.scaleClient.Scales(dep.Namespace).Get(gr, dep.Name)
if err != nil {
klog.Error(err)
utils.eventRecorder.Eventf(dep, v1.EventTypeWarning, "UnableToGetScale", "cannot get Scale for deployement %s/%s", dep.GetNamespace(), dep.GetName())
return err
}
scale.Spec.Replicas = instance.Status.PodNb
scale, err = utils.scaleClient.Scales(dep.Namespace).Update(gr, scale)
if err != nil {
klog.Error(err)
utils.eventRecorder.Eventf(dep, v1.EventTypeWarning, "UnableToUpdateScale", "cannot update Scale for deployement %s/%s", dep.GetNamespace(), dep.GetName())
return err
}
}
return nil
}
//GeneratePodLabels create label to select pod
func GeneratePodLabels(instance kafkaconnectv1alpha1.KafkaConnect) map[string]string {
podLabels := map[string]string{
"name": instance.Name,
"type": "kafkaConnect",
}
return podLabels
}
// GetSvcName generate the service name from the KafkaConnect
func GetSvcName(kcName string) string {
return fmt.Sprintf("%s-service", kcName)
}
// checkService check if we need to create a new service or not
func (utils *Utils) checkService(instance *kafkaconnectv1alpha1.KafkaConnect) (*corev1.Service, *corev1.Service, error) {
foundService := &corev1.Service{}
port := instance.Spec.KafkaConnectRestAPIPort
// generate pod label
podLabels := GeneratePodLabels(*instance)
expectedService := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: GetSvcName(instance.Name),
Namespace: instance.Namespace,
Labels: map[string]string{
"app": "kafkaconnect",
"type": "service",
},
},
Spec: corev1.ServiceSpec{
Selector: podLabels,
ClusterIP: corev1.ClusterIPNone,
Ports: []corev1.ServicePort{
{
Protocol: corev1.ProtocolTCP,
Name: "kafkaconnect-rest",
Port: port,
TargetPort: intstr.FromInt(int(port)),
},
},
},
}
exist := true
err := utils.client.Get(context.TODO(), types.NamespacedName{Name: expectedService.Name, Namespace: expectedService.Namespace}, foundService)
//if no service or service is different
needUpdateService := false
if err != nil {
if errors.IsNotFound(err) {
needUpdateService = true
exist = false
} else {
return nil, nil, err
}
}
if !needUpdateService {
annotationService := &corev1.Service{}
needUpdateService = !checkSame(&foundService.ObjectMeta, expectedService, annotationService, encodedServiceAnnotation)
}
if needUpdateService {
err = udpateObjectAnnotation(expectedService, &(expectedService.ObjectMeta), encodedServiceAnnotation)
if err != nil {
return nil, nil, err
}
if exist {
expectedService.ResourceVersion = foundService.ResourceVersion
return expectedService, foundService, nil
}
return expectedService, nil, nil
}
return nil, foundService, nil
}
// checkIngress check if we need to create a new ingress or not
func (utils *Utils) checkIngress(instance *kafkaconnectv1alpha1.KafkaConnect) (*extensions.Ingress, bool, error) {
if instance.Spec.IngressSpec == nil {
return nil, true, nil
}
var expectedIngress *extensions.Ingress
spec := *instance.Spec.IngressSpec
if spec.Style != nil && *spec.Style == kafkaconnectv1alpha1.DomainStyle {
expectedIngress = &extensions.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-ingress", instance.Name),
Namespace: instance.Namespace,
Labels: map[string]string{
"app": "kafkaconnect",
"type": "service",
},
},
Spec: extensions.IngressSpec{
Rules: []extensions.IngressRule{
{
Host: fmt.Sprintf("%s.%s", instance.Name, instance.Spec.IngressSpec.ParentDomain),
IngressRuleValue: extensions.IngressRuleValue{
HTTP: &extensions.HTTPIngressRuleValue{
Paths: []extensions.HTTPIngressPath{
{
Backend: extensions.IngressBackend{
ServiceName: fmt.Sprintf("%s-service", instance.Name),
ServicePort: intstr.FromInt(8083),
},
},
},
},
},
},
},
},
}
} else {
expectedIngress = &extensions.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-ingress", instance.Name),
Namespace: instance.Namespace,
Labels: map[string]string{
"app": "kafkaconnect",
"type": "service",
},
},
Spec: extensions.IngressSpec{
Rules: []extensions.IngressRule{
{
IngressRuleValue: extensions.IngressRuleValue{
HTTP: &extensions.HTTPIngressRuleValue{
Paths: []extensions.HTTPIngressPath{
{
Path: fmt.Sprintf("/%s", instance.Name),
Backend: extensions.IngressBackend{
ServiceName: fmt.Sprintf("%s-service", instance.Name),
ServicePort: intstr.FromInt(8083),
},
},
},
},
},
},
},
},
}
}
klog.Info("check ingress")
foundIngress := &extensions.Ingress{}
err := utils.client.Get(context.TODO(), types.NamespacedName{Name: expectedIngress.Name, Namespace: expectedIngress.Namespace}, foundIngress)
//if no ingress or ingress is different
exist := true
needUpdateIngress := false
if err != nil {
if errors.IsNotFound(err) {
klog.Info("ingress not found, create new")
needUpdateIngress = true
exist = false
} else {
return nil, false, err
}
}
if !needUpdateIngress {
klog.Info("ingress found compare annotation")
annotationIngress := &extensions.Ingress{}
needUpdateIngress = !checkSame(&foundIngress.ObjectMeta, expectedIngress, annotationIngress, encodedIngressAnnotation)
}
if needUpdateIngress {
klog.Info("need to update")
err = udpateObjectAnnotation(expectedIngress, &(expectedIngress.ObjectMeta), encodedIngressAnnotation)
if err != nil {
return nil, false, err
}
return expectedIngress, exist, nil
}
return nil, true, nil
}
func checkSame(annotationObj *metav1.ObjectMeta, exptectedObject interface{}, jsonEncodedObj interface{}, annotationName string) bool {
if annotation, ok := annotationObj.Annotations[annotationName]; ok {
json.Unmarshal([]byte(annotation), jsonEncodedObj)
if !apiequality.Semantic.DeepEqual(jsonEncodedObj, exptectedObject) {
return false
}
} else {
return false
}
return true
}
func udpateObjectAnnotation(exptectedObject interface{}, meta *metav1.ObjectMeta, annotationName string) error {
jsonEncoded, err := json.Marshal(exptectedObject)
if err != nil {
return err
}
meta.Annotations = map[string]string{}
meta.Annotations[annotationName] = string(jsonEncoded)
return nil
}
|
package rd_test
import (
"testing"
"time"
"github.com/nenad/rd"
"github.com/stretchr/testify/assert"
)
func TestToken_ExpiredAuthorization(t *testing.T) {
token := rd.Token{
AccessToken: "ACCESS",
RefreshToken: "REFRESH",
ObtainedAt: time.Now().Add(-3600 * time.Second),
}
assert.False(t, token.IsValid())
}
|
package apis
import (
"net/http"
"gopkg.in/gin-gonic/gin.v1"
."taskweb/models"
"taskweb/core"
)
func GetTbPerformancesApi(c *gin.Context){
var tbPerformances = make([]TbPerformance, 0)
tbPerformances, err :=GetTbPerformances()
if err != nil {
core.Logger.Fatalln(err)
}
c.JSON(http.StatusOK, gin.H{
"msg": tbPerformances,
})
}
|
package main
import (
"fmt"
"sync"
"github.com/satori/go.uuid"
)
type PGM struct{
Vertices map[uuid.UUID]*Vertex
Edges map[uuid.UUID]*Edge
storageMutex sync.Mutex
TotalEntityCount float64
TotalVertexCount float64
TotalEdgeCount float64
UniqueVertexNameMap map[string]uuid.UUID
UniqueEdgeNameMap map[string]uuid.UUID
}
type Entity struct {
Id uuid.UUID
Name string
UniqueName string
Label string
Count float64
Properties map[string]interface{}
}
type Vertex struct {
Entity
Edges map[uuid.UUID]*Edge
}
type Edge struct {
Entity
VertexA *Vertex
VertexB *Vertex
Directionality string
}
func (p *PGM) GetEdgeById(id uuid.UUID) (*Edge, bool) {
e,ok := p.Edges[id]
return e,ok
}
func (p *PGM) GetVertexById(id uuid.UUID) (*Vertex, bool){
v,ok := p.Vertices[id]
return v,ok
}
func (e *Entity) Increment(){
e.Count += 1.0
}
func (e *Entity) UpdateProperties(props, overwriteProps map[string]interface{}){
//favor already existing properties
for k, v := range props{
if _,ok := e.Properties[k]; ok == false{
e.Properties[k] = v
}
}
//overwrite any explcitly overwritten properties
for k,v := range overwriteProps{
if _,ok := e.Properties[k]; ok{
e.Properties[k] = v
}
}
e.Properties = props
e.Increment()
}
func (p *PGM) UpsertVertex(vb *Vertex) *Vertex{
p.storageMutex.Lock()
defer p.storageMutex.Unlock()
var _vid uuid.UUID
if vid,ok := p.UniqueVertexNameMap[vb.UniqueName]; ok{
_vid = vid
p.Vertices[vid].UpdateProperties(vb.Properties, map[string]interface{}{})
}else{
//new
_vid = uuid.NewV4()
vb.Id = _vid
vb.Count = 1.0
vb.Edges = make(map[uuid.UUID]*Edge)
p.Vertices[vb.Id] = vb
//unique name to id
p.UniqueVertexNameMap[vb.UniqueName] = _vid
p.TotalVertexCount += 1.0
p.TotalEntityCount += 1.0
}
return p.Vertices[_vid]
}
func (p *PGM) UpsertEdge(ea *Edge) *Edge{
p.storageMutex.Lock()
defer p.storageMutex.Unlock()
var _eid uuid.UUID
if eid,ok := p.UniqueEdgeNameMap[ea.UniqueName]; ok{
_eid = eid
//update properties
p.Edges[eid].UpdateProperties(ea.Properties, map[string]interface{}{})
}else{
//new
_eid = uuid.NewV4()
ea.Id = _eid
p.Edges[ea.Id] = ea
ea.Count = 1.0
//unique name to id
p.UniqueEdgeNameMap[ea.UniqueName] = _eid
p.TotalEdgeCount += 1.0
p.TotalEntityCount += 1.0
}
return p.Edges[_eid]
}
func (v *Vertex) UpsertEdge(e *Edge) *Edge {
var _edge *Edge
if edge, ok := v.Edges[e.Id]; ok == false{
v.Edges[e.Id] = e
_edge = edge
}else{
_edge = edge
}
return _edge
}
func NewPgm() *PGM{
pgm := &PGM{}
pgm.Vertices = make(map[uuid.UUID]*Vertex)
pgm.Edges = make(map[uuid.UUID]*Edge)
pgm.UniqueVertexNameMap = make(map[string]uuid.UUID)
pgm.UniqueEdgeNameMap = make(map[string]uuid.UUID)
return pgm
}
func main(){
test := [][]string{
[]string{"what","is","thought","eric","baum"},
[]string{"what","is","happiness","eric","whitegate"},
[]string{"what","do","happiness","and","fear", "have", "in", "common"},
[]string{"on","the","nature","of","things","whitegate"},
[]string{"what","is","the","meaning","of","this"},
}
//insert data into graph
pgm := NewPgm()
for _,sentence := range test {
for word_idx := 0; word_idx <len(sentence)-1; word_idx+=2{
va := &Vertex{Entity{UniqueName: sentence[word_idx]},nil}
va = pgm.UpsertVertex(va)
vb := &Vertex{Entity{UniqueName: sentence[word_idx+1]},nil}
vb = pgm.UpsertVertex(vb)
e := &Edge{Entity{UniqueName: sentence[word_idx]+"_"+sentence[word_idx+1]},va,vb,"yo"}
e = pgm.UpsertEdge(e)
va.UpsertEdge(e)
vb.UpsertEdge(e)
}
}
//check that probabilities are correct
what_v := pgm.Vertices[pgm.UniqueVertexNameMap["what"]]
fmt.Printf("probability of global %s: %v\n", what_v.UniqueName, what_v.Count/pgm.TotalVertexCount)
for _,e := range what_v.Edges {
fmt.Printf("probability of global %s: %v\n", e.UniqueName, e.Count/pgm.TotalEdgeCount)
edge_sum := 0.0
for _,_e := range what_v.Edges{
edge_sum += _e.Count
}
fmt.Printf("probability of %s coming after %s: %v\n", e.VertexB.UniqueName,e.VertexA.UniqueName, e.Count/edge_sum)
}
}
|
// Copyright 2016 IBM Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package checker
import (
"time"
"encoding/json"
"github.com/Sirupsen/logrus"
"github.com/amalgam8/sidecar/config"
"github.com/amalgam8/sidecar/router/clients"
"github.com/amalgam8/sidecar/router/nginx"
)
// Poller performs a periodic poll on Controller for changes to the NGINX config
type Poller interface {
Start() error
Stop() error
}
type poller struct {
ticker *time.Ticker
controller clients.Controller
nginx nginx.Nginx
config *config.Config
version *time.Time
}
// NewPoller creates instance
func NewPoller(config *config.Config, rc clients.Controller, nginx nginx.Nginx) Poller {
return &poller{
controller: rc,
config: config,
nginx: nginx,
}
}
// Start begins periodic polling of Controller for the latest configuration. This is a blocking operation.
func (p *poller) Start() error {
// Stop existing ticker if necessary
if p.ticker != nil {
if err := p.Stop(); err != nil {
logrus.WithError(err).Error("Could not stop existing periodic poll")
return err
}
}
// Create new ticker
p.ticker = time.NewTicker(p.config.Controller.Poll)
// Do initial poll
if err := p.poll(); err != nil {
logrus.WithError(err).Error("Poll failed")
}
// Start periodic poll
for _ = range p.ticker.C {
if err := p.poll(); err != nil {
logrus.WithError(err).Error("Poll failed")
}
}
return nil
}
// poll obtains the latest NGINX config from Controller and updates NGINX to use it
func (p *poller) poll() error {
// Get latest config from Controller
conf, err := p.controller.GetNGINXConfig(p.version)
if err != nil {
logrus.WithError(err).Error("Call to Controller failed")
return err
}
if conf == nil {
//TODO no new rules to update, do we need to do anything else?
return nil
}
confBytes, err := json.Marshal(conf)
// Update our existing NGINX config
if err := p.nginx.Update(confBytes); err != nil {
logrus.WithError(err).Error("Could not update NGINX config")
return err
}
t := time.Now()
p.version = &t
return nil
}
// Stop halts the periodic poll of Controller
func (p *poller) Stop() error {
// Stop ticker if necessary
if p.ticker != nil {
p.ticker.Stop()
p.ticker = nil
}
return nil
}
|
/*
Copyright 2018 Pressinfra SRL.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package wordpress
import (
"context"
gosync "sync"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
"sigs.k8s.io/controller-runtime/pkg/source"
wordpressv1alpha1 "github.com/presslabs/wordpress-operator/pkg/apis/wordpress/v1alpha1"
"github.com/presslabs/wordpress-operator/pkg/controller/wordpress/sync"
)
var log = logf.Log.WithName(controllerName)
const controllerName = "wordpress-controller"
const (
eventNormal = "Normal"
eventWarning = "Warning"
)
var rtMap struct {
lock gosync.RWMutex
m map[types.NamespacedName]string
}
// Add creates a new Wordpress Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller
// and Start it when the Manager is Started.
func Add(mgr manager.Manager) error {
return add(mgr, newReconciler(mgr))
}
// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager) reconcile.Reconciler {
return &ReconcileWordpress{Client: mgr.GetClient(), scheme: mgr.GetScheme(), recorder: mgr.GetRecorder(controllerName)}
}
// add adds a new Controller to mgr with r as the reconcile.Reconciler
func add(mgr manager.Manager, r reconcile.Reconciler) error {
// Create a new controller
c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
// Watch for changes to Wordpress
err = c.Watch(&source.Kind{Type: &wordpressv1alpha1.Wordpress{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}
// Watch for changes to WordpressRuntime
err = c.Watch(&source.Kind{Type: &wordpressv1alpha1.WordpressRuntime{}}, &handler.EnqueueRequestsFromMapFunc{
ToRequests: handler.ToRequestsFunc(func(rt handler.MapObject) []reconcile.Request {
rtMap.lock.RLock()
defer rtMap.lock.RUnlock()
var reconciles = []reconcile.Request{}
for key, runtime := range rtMap.m {
if runtime == rt.Meta.GetName() {
reconciles = append(reconciles, reconcile.Request{NamespacedName: key})
}
}
return reconciles
}),
})
if err != nil {
return err
}
// Watch for Deployment changes
err = c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &wordpressv1alpha1.Wordpress{},
})
if err != nil {
return err
}
// Watch for Service changes
err = c.Watch(&source.Kind{Type: &corev1.Service{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &wordpressv1alpha1.Wordpress{},
})
if err != nil {
return err
}
// TODO(calind): watch for PVC, CronJobs, Jobs and Ingresses
return nil
}
var _ reconcile.Reconciler = &ReconcileWordpress{}
// ReconcileWordpress reconciles a Wordpress object
type ReconcileWordpress struct {
client.Client
scheme *runtime.Scheme
recorder record.EventRecorder
}
// Reconcile reads that state of the cluster for a Wordpress object and makes changes based on the state read
// and what is in the Wordpress.Spec
//
// Automatically generate RBAC rules to allow the Controller to read and write Deployments
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=wordpress.presslabs.org,resources=wordpresses,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=wordpress.presslabs.org,resources=wordpressruntimes,verbs=get;list;watch;create;update;patch;delete
func (r *ReconcileWordpress) Reconcile(request reconcile.Request) (reconcile.Result, error) {
// Fetch the Wordpress instance
wp := &wordpressv1alpha1.Wordpress{}
err := r.Get(context.TODO(), request.NamespacedName, wp)
if err != nil {
if errors.IsNotFound(err) {
// Object not found, return. Created objects are automatically garbage collected.
// For additional cleanup logic use finalizers.
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
return reconcile.Result{}, err
}
// Add Wordpress to the runtimes map
rtMap.lock.Lock()
rtMap.m[request.NamespacedName] = wp.Spec.Runtime
rtMap.lock.Unlock()
wp.SetDefaults()
rt := &wordpressv1alpha1.WordpressRuntime{}
err = r.Get(context.TODO(), types.NamespacedName{Name: wp.Spec.Runtime}, rt)
if err != nil {
return reconcile.Result{}, err
}
rt.SetDefaults()
syncers := []sync.Interface{
sync.NewDeploymentSyncer(wp, rt, r.scheme),
sync.NewServiceSyncer(wp, rt, r.scheme),
sync.NewIngressSyncer(wp, rt, r.scheme),
sync.NewWPCronSyncer(wp, rt, r.scheme),
sync.NewDBUpgradeJobSyncer(wp, rt, r.scheme),
}
volSpec := rt.Spec.WebrootVolumeSpec
if wp.Spec.WebrootVolumeSpec != nil {
volSpec = wp.Spec.WebrootVolumeSpec
}
if volSpec.PersistentVolumeClaim != nil {
syncers = append(syncers, sync.NewWebrootPVCSyncer(wp, rt, r.scheme))
}
volSpec = rt.Spec.MediaVolumeSpec
if wp.Spec.MediaVolumeSpec != nil {
volSpec = wp.Spec.MediaVolumeSpec
}
if volSpec != nil && volSpec.PersistentVolumeClaim != nil {
syncers = append(syncers, sync.NewMediaPVCSyncer(wp, rt, r.scheme))
}
return reconcile.Result{}, r.sync(wp, syncers)
}
func (r *ReconcileWordpress) sync(wp *wordpressv1alpha1.Wordpress, syncers []sync.Interface) error {
for _, s := range syncers {
key := s.GetKey()
existing := s.GetExistingObjectPlaceholder()
op, err := controllerutil.CreateOrUpdate(context.TODO(), r.Client, key, existing, s.T)
reason := string(s.GetErrorEventReason(err))
log.Info(string(op), "key", key.String(), "kind", existing.GetObjectKind().GroupVersionKind().Kind)
if err != nil {
r.recorder.Eventf(wp, eventWarning, reason, "%T %s/%s failed syncing: %s", existing, key.Namespace, key.Name, err)
return err
}
if op != controllerutil.OperationNoop {
r.recorder.Eventf(wp, eventNormal, reason, "%T %s/%s %s successfully", existing, key.Namespace, key.Name, op)
}
}
return nil
}
func init() {
rtMap.m = make(map[types.NamespacedName]string)
}
|
package main
import (
"fmt"
"testing"
)
func TestKthSmallest(t *testing.T) {
fmt.Println(1 / 2)
}
|
package abnf
import (
"github.com/lioneagle/goutil/src/mem"
)
type Context struct {
allocator *mem.ArenaAllocator
parseSrc []byte
parsePos Pos
srcLen Pos
}
func NewContext(allocator *mem.ArenaAllocator, src []byte) *Context {
return &Context{allocator: allocator, parseSrc: src, srcLen: Pos(len(src))}
}
func (this *Context) SetParseSrc(src []byte) {
this.parseSrc = src
this.srcLen = Pos(len(src))
}
func (this *Context) SetParsePos(pos Pos) {
this.parsePos = pos
}
func (this *Context) SetAllocator(allocator *mem.ArenaAllocator) {
this.allocator = allocator
}
|
// Copyright 2021 The image-cloner Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package docker
import (
"context"
"encoding/json"
"errors"
"io"
"strings"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/jsonmessage"
"k8s.io/klog/v2"
)
func (d *docker) ImagePull(ctx context.Context, image string) error {
res, err := d.client.ImagePull(context.Background(), image, types.ImagePullOptions{})
if res != nil {
defer res.Close()
}
if err != nil {
return err
}
if err = d.watch(res); err != nil {
return err
}
return nil
}
func (d *docker) ImagePush(ctx context.Context, image string) error {
res, err := d.client.ImagePush(context.Background(), image,
types.ImagePushOptions{
RegistryAuth: d.registryAuth,
})
if res != nil {
defer res.Close()
}
if err != nil {
return err
}
if err = d.watch(res); err != nil {
return err
}
return nil
}
func (d *docker) ImageTag(ctx context.Context, src, dst string) error {
err := d.client.ImageTag(context.Background(), src, dst)
if err != nil {
return err
}
klog.Infof("[info]: '%s' successfully tagged as '%s'\n", src, dst)
return nil
}
func (d *docker) watch(in io.Reader) error {
dec := json.NewDecoder(in)
status := ""
for {
var jm jsonmessage.JSONMessage
if err := dec.Decode(&jm); err != nil {
if err == io.EOF {
break
}
return err
}
if jm.Error != nil {
return jm.Error
}
if len(jm.ErrorMessage) > 0 {
return errors.New(jm.ErrorMessage)
}
if jm.Status != "" && !strings.EqualFold(status, jm.Status) {
klog.Infof("[info]: %v\n", jm.Status)
status = jm.Status
}
}
return nil
}
|
package main
import (
"cm_liveme_im/libs/bytes"
"cm_liveme_im/libs/debug"
"cm_liveme_im/libs/define"
"cm_liveme_im/libs/proto"
itime "cm_liveme_im/libs/time"
"cm_liveme_im/libs/tokenbucket"
"sync"
"time"
log "github.com/thinkboy/log4go"
)
const (
roomMapCup = 100
)
var roomBucket *RoomBucket
var RoomRoutineTimeout time.Duration = 180 * time.Second //if no msg in 300s , close routine
type RoomBucket struct {
roomNum int
rooms map[string]*Room
rwLock sync.RWMutex
options RoomOptions
round *Round
}
func InitRoomBucket(r *Round, options RoomOptions) {
roomBucket = &RoomBucket{
roomNum: 0,
rooms: make(map[string]*Room, roomMapCup),
rwLock: sync.RWMutex{},
options: options,
round: r,
}
}
func (this *RoomBucket) Get(roomId string) (r *Room) {
this.rwLock.RLock()
room, ok := this.rooms[roomId]
if !ok {
room = NewRoom(roomId, this.round.Timer(this.roomNum), this.options)
this.rooms[roomId] = room
this.roomNum++
debug.D.Debug("new roomId:%s num:%d", roomId, this.roomNum)
}
this.rwLock.RUnlock()
return room
}
type RoomOptions struct {
BatchNum int
SignalTime time.Duration
RoomMsgLimitFrequency int64
}
type Room struct {
id string
rLock sync.RWMutex
proto chan *proto.Proto
buck *tokenbucket.Bucket
timeouter *time.Timer
}
var (
roomReadyProto = &proto.Proto{Operation: define.OP_ROOM_READY}
)
// NewRoom new a room struct, store channel room info.
func NewRoom(id string, t *itime.Timer, options RoomOptions) (r *Room) {
r = new(Room)
r.id = id
r.proto = make(chan *proto.Proto, options.BatchNum*2)
var (
freq int64 = options.RoomMsgLimitFrequency
duration time.Duration = time.Duration(1e9 / freq)
)
r.buck = tokenbucket.NewBucket(freq, duration)
log.Info("token bucket is ok(%d/s)", freq)
go r.pushproc(t, uint16(options.BatchNum), options.SignalTime)
return
}
func (r *Room) CanIGo(token int64) bool {
if r.buck.Take(token) == 0 {
return false
} else {
return true
}
}
func (r *Room) Close() {
r.proto <- nil
}
// Push push msg to the room, if chan full discard it.
func (r *Room) Push(ver uint16, operation uint32, msg []byte) (err error) {
if !r.CanIGo(1) {
log.Warn("room msg is too fast , discard it")
return
}
var p = &proto.Proto{Ver: ver, Operation: operation, Body: msg}
select {
case r.proto <- p:
default:
err = ErrRoomFull
}
return
}
// EPush ensure push msg to the room.
func (r *Room) EPush(ver uint16, operation uint32, msg []byte) {
if !r.CanIGo(1) {
log.Warn("room msg is too fast , discard it")
return
}
var p = &proto.Proto{Ver: ver, Operation: operation, Body: msg}
r.proto <- p
return
}
// pushproc merge proto and push msgs in batch.
func (r *Room) pushproc(timer *itime.Timer, batch uint16, sigTime time.Duration) {
r.timeouter = time.NewTimer(RoomRoutineTimeout)
defer r.buck.Close()
var (
n uint16
last time.Time
p *proto.Proto
td *itime.TimerData
buf = bytes.NewWriterSize(Conf.MaxBufferSize)
)
log.Info("start room: %s goroutine", r.id)
td = timer.Add(sigTime, func() {
select {
case r.proto <- roomReadyProto:
default:
}
})
LOOP:
for {
debug.D.Debug(" in room %s", r.id)
select {
case p = <-r.proto:
r.timeouter.Reset(RoomRoutineTimeout)
if p == nil {
break // exit
} else if p != roomReadyProto {
debug.D.Debug(" in room %s merge msg", r.id)
// merge buffer ignore error, always nil
p.WriteTo(buf)
if n++; n == 1 {
last = time.Now()
timer.Set(td, sigTime)
continue
} else if n < batch {
if sigTime > time.Now().Sub(last) {
continue
}
}
} else {
// merge buffer ignore error, always nil
if n == 0 {
debug.D.Debug(" in room %s new loop", r.id)
continue
}
debug.D.Debug(" in room %s merge msg ok", r.id)
}
broadcastRoomBytes(r.id, buf.Buffer(), n)
n = 0
// TODO use reset buffer
// after push to room channel, renew a buffer, let old buffer gc
buf = bytes.NewWriterSize(buf.Size())
case <-r.timeouter.C:
break LOOP
//r.Close()
}
}
timer.Del(td)
r.timeouter.Stop()
delete(roomBucket.rooms, r.id)
log.Info("room: %s goroutine exit", r.id)
}
|
package runner
// This file contains the implementation for the storage sub system that will
// be used by the runner to retrieve storage from cloud providers or localized storage
import (
"fmt"
"io"
"net/url"
"path/filepath"
"strings"
"time"
"github.com/go-stack/stack"
"github.com/karlmutch/errors"
)
type Storage interface {
// Retrieve contents of the named storage object and optionally unpack it into the
// user specified output directory
//
Fetch(name string, unpack bool, output string, tap io.Writer, timeout time.Duration) (warnings []errors.Error, err errors.Error)
// File upload, deduplication is implemented outside of this interface
//
Deposit(src string, dest string, timeout time.Duration) (warnings []errors.Error, err errors.Error)
// Hash can be used to retrive the hash of the contents of the file. The hash is
// retrieved not computed and so is a lightweight operation common to both S3 and Google Storage.
// The hash on some storage platforms is not a plain MD5 but uses multiple hashes from file
// segments to increase the speed of hashing and also to reflect the multipart download
// processing that was used for the file, for a full explanation please see
// https://stackoverflow.com/questions/12186993/what-is-the-algorithm-to-compute-the-amazon-s3-etag-for-a-file-larger-than-5gb
//
Hash(name string, timeout time.Duration) (hash string, err errors.Error)
Close()
}
type StoreOpts struct {
Art *Artifact
ProjectID string
Group string
Creds string // The credentials file name
Env map[string]string
Validate bool
Timeout time.Duration
}
func NewStorage(spec *StoreOpts) (stor Storage, err errors.Error) {
if spec == nil {
return nil, errors.Wrap(err, "empty specification supplied").With("stack", stack.Trace().TrimRuntime())
}
uri, errGo := url.ParseRequestURI(spec.Art.Qualified)
if errGo != nil {
return nil, errors.Wrap(errGo).With("stack", stack.Trace().TrimRuntime())
}
switch uri.Scheme {
case "gs":
return NewGSstorage(spec.ProjectID, spec.Creds, spec.Env, spec.Art.Bucket, spec.Validate, spec.Timeout)
case "s3":
uriPath := strings.Split(uri.EscapedPath(), "/")
if len(spec.Art.Key) == 0 {
spec.Art.Key = strings.Join(uriPath[2:], "/")
}
if len(spec.Art.Bucket) == 0 {
spec.Art.Bucket = uriPath[1]
}
if len(uri.Host) == 0 {
return nil, errors.New("S3/minio endpoint lacks a scheme, or the host name was not specified").With("stack", stack.Trace().TrimRuntime())
}
useSSL := uri.Scheme == "https"
return NewS3storage(spec.ProjectID, spec.Creds, spec.Env, uri.Host,
spec.Art.Bucket, spec.Art.Key, spec.Validate, spec.Timeout, useSSL)
case "file":
return NewLocalStorage()
default:
return nil, errors.New(fmt.Sprintf("unknown, or unsupported URI scheme %s, s3 or gs expected", uri.Scheme)).With("stack", stack.Trace().TrimRuntime())
}
}
// IsTar is used to test the extension to see if the presence of tar can be found
//
func IsTar(name string) bool {
switch {
case strings.Contains(name, ".tar."):
return true
case strings.HasSuffix(name, ".tgz"):
return true
case strings.HasSuffix(name, ".tar"):
return true
case strings.HasSuffix(name, ".tar.bzip2"):
return true
case strings.HasSuffix(name, ".tar.bz2"):
return true
case strings.HasSuffix(name, ".tbz2"):
return true
case strings.HasSuffix(name, ".tbz"):
return true
}
return false
}
// MimeFromExt is used to characterize a mime type from a files extension
//
func MimeFromExt(name string) (fileType string, err errors.Error) {
switch filepath.Ext(name) {
case ".gzip", ".gz":
return "application/x-gzip", nil
case ".zip":
return "application/zip", nil
case ".tgz": // Non standard extension as a result of studioml python code
return "application/bzip2", nil
case ".tb2", ".tbz", ".tbz2", ".bzip2", ".bz2": // Standard bzip2 extensions
return "application/bzip2", nil
case ".tar":
return "application/tar", nil
default:
fileType, errGo := DetectFileType(name)
if errGo != nil {
// Fill in a default value even if there is an error
return "application/octet-stream", errGo
}
return fileType, nil
}
}
|
package pilot
import (
"github.com/julienschmidt/httprouter"
)
type apiServer struct {
/*
db *database
*/
router *httprouter.Router
}
func setup() {
r := httprouter.New()
r.GET("/payments", nil)
}
|
package consumer
import (
"encoding/json"
"fmt"
"github.com/Shopify/sarama"
"github.com/radyatamaa/loyalti-go-echo/src/api/host/Config"
"github.com/radyatamaa/loyalti-go-echo/src/domain/model"
"github.com/radyatamaa/loyalti-go-echo/src/domain/repository"
"os"
"os/signal"
"strings"
//"time"
)
func consumeOutlet(topics []string, master sarama.Consumer) (chan *sarama.ConsumerMessage, chan *sarama.ConsumerError) {
consumers := make(chan *sarama.ConsumerMessage)
errors := make(chan *sarama.ConsumerError)
fmt.Println("Kafka Outlet is Ready")
for _, topic := range topics {
if strings.Contains(topic, "__consumer_offsets") {
continue
}
partitions, _ := master.Partitions(topic)
// this only consumes partition no 1, you would probably want to consume all partitions
consumer, err := master.ConsumePartition(topic, partitions[0], sarama.OffsetNewest)
if nil != err {
fmt.Printf("Topic %v Partitions: %v", topic, partitions)
panic(err)
}
//fmt.Println(" Start consuming topic ", topic)
go func(topic string, consumer sarama.PartitionConsumer) {
for {
select {
case consumerError := <-consumer.Errors():
errors <- consumerError
fmt.Println("consumerError: ", consumerError.Err)
case msg := <-consumer.Messages():
//*messageCountStart++
//Deserialize
outlet := model.Outlet{}
switch msg.Topic {
case "create-outlet-topic":
err := json.Unmarshal([]byte(msg.Value), &outlet)
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
repository.CreateOutlet(&outlet)
fmt.Println(string(msg.Value))
fmt.Println("Outlet berhasil dibuat")
case "update-outlet-topic":
err := json.Unmarshal([]byte(msg.Value), &outlet)
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
repository.UpdateOutlet(&outlet)
fmt.Println("Outlet berhasil diupdate")
case "delete-outlet-topic":
err := json.Unmarshal([]byte(msg.Value), &outlet)
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
repository.DeleteOutlet(&outlet)
fmt.Println("Outlet berhasil diupdate")
}
}
}
}(topic, consumer)
}
return consumers, errors
}
func NewOutletConsumer() {
brokers := []string{"11.11.5.146:9092"}
//kafkaConfig := consumer.getKafkaConfig("", "")
kafkaConfig := Config.GetKafkaConfig("", "")
master, err := sarama.NewConsumer(brokers, kafkaConfig)
if err != nil {
panic(err)
}
defer func() {
if err := master.Close(); err != nil {
panic(err)
}
}()
//topic, err := master.Topics()
if err != nil {
panic(err)
}
topics, _ := master.Topics()
//
consumer, errors := consumeOutlet(topics, master)
////consumer1, err := master.ConsumePartition(updateTopic, 0, sarama.OffsetNewest)
//
if errors != nil {
fmt.Println(err)
//panic(err)
}
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt)
// Count how many message processed
msgCount := 0
// Get signnal for finish
doneCh := make(chan struct{})
go func() {
for {
select {
case msg := <-consumer:
msgCount++
fmt.Println("Received messages", string(msg.Key), string(msg.Value))
case consumerError := <-errors:
msgCount++
fmt.Println("Received consumerError ", string(consumerError.Topic), string(consumerError.Partition), consumerError.Err)
doneCh <- struct{}{}
case <-signals:
fmt.Println("Interrupt is detected")
doneCh <- struct{}{}
}
}
}()
<-doneCh
master.Close()
fmt.Println("Processed", msgCount, "messages")
}
|
package simplettl
import (
"sync"
"time"
)
// entry - typical element of cache
type entry struct {
value interface{}
expiry *time.Time
}
// Cache - simple implementation of cache
// More information: https://en.wikipedia.org/wiki/Time_to_live
type Cache struct {
timeTTL time.Duration
cache map[string]*entry
lock *sync.RWMutex
}
// NewCache - initialization of new cache.
// For avoid mistake - minimal time to live is 1 minute.
// For simplification, - key is string and cache haven`t stop method
func NewCache(interval time.Duration) *Cache {
if interval < time.Second {
interval = time.Second
}
cache := &Cache{
timeTTL: interval,
cache: make(map[string]*entry),
lock: &sync.RWMutex{},
}
go func() {
ticker := time.NewTicker(cache.timeTTL)
for {
// wait of ticker
now := <-ticker.C
// remove entry outside TTL
cache.lock.Lock()
for id, entry := range cache.cache {
if entry.expiry != nil && entry.expiry.Before(now) {
delete(cache.cache, id)
}
}
cache.lock.Unlock()
}
}()
return cache
}
// Count - return amount element of TTL map.
func (cache *Cache) Count() int {
cache.lock.RLock()
defer cache.lock.RUnlock()
return len(cache.cache)
}
// Get - return value from cache
func (cache *Cache) Get(key string) (interface{}, bool) {
cache.lock.RLock()
defer cache.lock.RUnlock()
e, ok := cache.cache[key]
if ok && e.expiry != nil && e.expiry.After(time.Now()) {
return e.value, true
}
return nil, false
}
// Add - add key/value in cache
func (cache *Cache) Add(key string, value interface{}, ttl time.Duration) {
cache.lock.Lock()
defer cache.lock.Unlock()
expiry := time.Now().Add(ttl)
cache.cache[key] = &entry{
value: value,
expiry: &expiry,
}
}
// GetKeys - return all keys of cache map
func (cache *Cache) GetKeys() []interface{} {
cache.lock.RLock()
defer cache.lock.RUnlock()
keys := make([]interface{}, len(cache.cache))
var i int
for k := range cache.cache {
keys[i] = k
i++
}
return keys
}
|
// Benchmark for memcache servers.
//
// Supports simultaneous benchmarking of multiple servers.
package main
import (
"flag"
"fmt"
memcache_org "github.com/bradfitz/gomemcache/memcache"
memcache_new "github.com/valyala/ybc/libs/go/memcache"
"github.com/vharitonsky/iniflags"
"log"
"math/rand"
"runtime"
"strings"
"sync"
"time"
)
var (
numCpu = runtime.NumCPU()
defaultConnectionsCount = numCpu
defaultMaxProcs = numCpu
defaultWorkersCount = 1024 * numCpu
defaultMaxPendingRequestsCount = defaultWorkersCount
)
var (
clientType = flag.String("clientType", "new", "Client type. May be 'new' or 'original'.\n"+
"'original' is https://github.com/bradfitz/gomemcache/tree/master/memcache,\n"+
"'new' is https://github.com/valyala/ybc/tree/master/libs/go/memcache")
connectionsCount = flag.Int("connectionsCount", defaultConnectionsCount, "The number of TCP connections to memcache server. Makes sense only for clientType=new")
getRatio = flag.Float64("getRatio", 0.9, "Ratio of 'get' requests for workerMode=GetSet.\n"+
"0.0 means 'no get requests'. 1.0 means 'no set requests'")
goMaxProcs = flag.Int("goMaxProcs", defaultMaxProcs, "The maximum number of simultaneous worker threads in go")
itemsCount = flag.Int("itemsCount", 500*1000, "The number of items in working set")
ioTimeout = flag.Duration("ioTimeout", time.Second*10, "Timeout for IO operations")
keySize = flag.Int("keySize", 16, "Key size in bytes")
maxPendingRequestsCount = flag.Int("maxPendingRequestsCount", defaultMaxPendingRequestsCount, "Maximum number of pending requests. Makes sense only for clientType=new")
maxResponseTime = flag.Duration("maxResponseTime", time.Millisecond*20, "Maximum response time shown on response time histogram")
osReadBufferSize = flag.Int("osReadBufferSize", 224*1024, "The size of read buffer in bytes in OS. Makes sense only for clientType=new")
osWriteBufferSize = flag.Int("osWriteBufferSize", 224*1024, "The size of write buffer in bytes in OS. Makes sense only for clientType=new")
requestsCount = flag.Int("requestsCount", 1000*1000, "The number of requests to send to memcache")
readBufferSize = flag.Int("readBufferSize", 56*1024, "The size of read buffer in bytes. Makes sense only for clientType=new")
responseTimeHistogramSize = flag.Int("responseTimeHistogramSize", 10, "The size of response time histogram")
serverAddrs = flag.String("serverAddrs", "localhost:11211", "Comma-delimited addresses of memcache servers to test")
valueSize = flag.Int("valueSize", 90, "Value size in bytes")
workerMode = flag.String("workerMode", "GetMiss", "Worker mode. May be 'GetMiss', 'GetHit', 'Set', 'GetSet'")
workersCount = flag.Int("workersCount", defaultWorkersCount, "The number of workers to send requests to memcache")
writeBufferSize = flag.Int("writeBufferSize", 56*1024, "The size of write buffer in bytes. Makes sense only for clientType=new")
)
var (
key, value []byte
)
type Stats struct {
responseTimeHistogram []uint32
totalResponseTime time.Duration
minResponseTime time.Duration
maxResponseTime time.Duration
cacheMissCount uint32
cacheHitCount uint32
errorsCount uint32
}
func updateResponseTimeHistogram(stats *Stats, startTime time.Time) {
n := *responseTimeHistogramSize
t := time.Since(startTime)
stats.totalResponseTime += t
if t < stats.minResponseTime {
stats.minResponseTime = t
}
if t > stats.maxResponseTime {
stats.maxResponseTime = t
}
i := int(float64(t) / float64(*maxResponseTime) * float64(n))
if i > n-1 {
i = n - 1
} else if i < 0 {
i = 0
}
stats.responseTimeHistogram[i]++
}
func dashBar(percent float64) string {
return strings.Repeat("#", int(percent/100.0*60.0))
}
func printStats(stats []Stats, startTime *time.Time) {
fmt.Printf("done\n")
testDuration := time.Since(*startTime)
n := *responseTimeHistogramSize
var totalStats Stats
totalStats.responseTimeHistogram = make([]uint32, n)
totalStats.minResponseTime = time.Hour * 24 * 365
for i := 0; i < *workersCount; i++ {
s := &stats[i]
for j := 0; j < n; j++ {
totalStats.responseTimeHistogram[j] += s.responseTimeHistogram[j]
}
totalStats.totalResponseTime += s.totalResponseTime
if totalStats.minResponseTime > s.minResponseTime {
totalStats.minResponseTime = s.minResponseTime
}
if totalStats.maxResponseTime < s.maxResponseTime {
totalStats.maxResponseTime = s.maxResponseTime
}
totalStats.cacheMissCount += s.cacheMissCount
totalStats.cacheHitCount += s.cacheHitCount
totalStats.errorsCount += s.errorsCount
}
var totalRequestsCount uint32
for i := 0; i < n; i++ {
totalRequestsCount += totalStats.responseTimeHistogram[i]
}
if totalRequestsCount == 0 {
fmt.Printf("There are no successful requests performed\n")
return
}
var avgResponseTime time.Duration
if totalRequestsCount > 0 {
avgResponseTime = totalStats.totalResponseTime / time.Duration(totalRequestsCount)
}
var requestsPerSecond float64
if testDuration > 0 {
requestsPerSecond = float64(totalRequestsCount) / (float64(testDuration) / float64(time.Second))
}
fmt.Printf("Response time histogram\n")
interval := *maxResponseTime / time.Duration(n)
for i := 0; i < n; i++ {
startDuration := interval * time.Duration(i)
endDuration := interval * time.Duration(i+1)
if i == n-1 {
endDuration = time.Hour
}
percent := float64(totalStats.responseTimeHistogram[i]) / float64(totalRequestsCount)
percent *= 100.0
fmt.Printf("%6.6s -%6.6s: %8.3f%% %s\n", startDuration, endDuration, percent, dashBar(percent))
}
fmt.Printf("Requests per second: %10.0f\n", requestsPerSecond)
fmt.Printf("Test duration: %10s\n", testDuration)
fmt.Printf("Avg response time: %10s\n", avgResponseTime)
fmt.Printf("Min response time: %10s\n", totalStats.minResponseTime)
fmt.Printf("Max response time: %10s\n", totalStats.maxResponseTime)
fmt.Printf("Cache miss count: %10d\n", totalStats.cacheMissCount)
fmt.Printf("Cache hit count: %10d\n", totalStats.cacheHitCount)
hitMissrequestsCount := totalStats.cacheMissCount + totalStats.cacheHitCount
cacheMissRatio := 0.0
if hitMissrequestsCount > 0 {
cacheMissRatio = float64(totalStats.cacheMissCount) / float64(hitMissrequestsCount)
}
fmt.Printf("Cache miss ratio: %10.3f%%\n", cacheMissRatio*100.0)
fmt.Printf("Errors count: %10d\n", totalStats.errorsCount)
}
func workerGetMissOrg(client *memcache_org.Client, wg *sync.WaitGroup, ch <-chan int, stats *Stats) {
defer wg.Done()
for _ = range ch {
n := rand.Intn(*itemsCount)
keyStr := fmt.Sprintf("miss_%s_%d", key, n)
startTime := time.Now()
if _, err := client.Get(keyStr); err != memcache_org.ErrCacheMiss {
stats.errorsCount++
continue
}
stats.cacheMissCount++
updateResponseTimeHistogram(stats, startTime)
}
}
func workerGetMissNew(client memcache_new.Cacher, wg *sync.WaitGroup, ch <-chan int, stats *Stats) {
defer wg.Done()
var item memcache_new.Item
for _ = range ch {
n := rand.Intn(*itemsCount)
item.Key = []byte(fmt.Sprintf("miss_%s_%d", key, n))
startTime := time.Now()
if err := client.Get(&item); err != memcache_new.ErrCacheMiss {
stats.errorsCount++
continue
}
stats.cacheMissCount++
updateResponseTimeHistogram(stats, startTime)
}
}
func workerGetHitOrg(client *memcache_org.Client, wg *sync.WaitGroup, ch <-chan int, stats *Stats) {
defer wg.Done()
for _ = range ch {
n := rand.Intn(*itemsCount)
keyStr := fmt.Sprintf("%s_%d", key, n)
startTime := time.Now()
_, err := client.Get(keyStr)
if err == memcache_org.ErrCacheMiss {
stats.cacheMissCount++
continue
}
if err != nil {
stats.errorsCount++
continue
}
stats.cacheHitCount++
updateResponseTimeHistogram(stats, startTime)
}
}
func workerGetHitNew(client memcache_new.Cacher, wg *sync.WaitGroup, ch <-chan int, stats *Stats) {
defer wg.Done()
var item memcache_new.Item
for _ = range ch {
n := rand.Intn(*itemsCount)
item.Key = []byte(fmt.Sprintf("%s_%d", key, n))
startTime := time.Now()
err := client.Get(&item)
if err == memcache_new.ErrCacheMiss {
stats.cacheMissCount++
continue
}
if err != nil {
stats.errorsCount++
continue
}
stats.cacheHitCount++
updateResponseTimeHistogram(stats, startTime)
}
}
func workerSetOrg(client *memcache_org.Client, wg *sync.WaitGroup, ch <-chan int, stats *Stats) {
defer wg.Done()
var item memcache_org.Item
for _ = range ch {
n := rand.Intn(*itemsCount)
item.Key = fmt.Sprintf("%s_%d", key, n)
item.Value = value
startTime := time.Now()
if err := client.Set(&item); err != nil {
stats.errorsCount++
continue
}
updateResponseTimeHistogram(stats, startTime)
}
}
func workerSetNew(client memcache_new.Cacher, wg *sync.WaitGroup, ch <-chan int, stats *Stats) {
defer wg.Done()
var item memcache_new.Item
for _ = range ch {
n := rand.Intn(*itemsCount)
item.Key = []byte(fmt.Sprintf("%s_%d", key, n))
item.Value = value
startTime := time.Now()
if err := client.Set(&item); err != nil {
stats.errorsCount++
continue
}
updateResponseTimeHistogram(stats, startTime)
}
}
func workerGetSetOrg(client *memcache_org.Client, wg *sync.WaitGroup, ch <-chan int, stats *Stats) {
defer wg.Done()
var item memcache_org.Item
for _ = range ch {
n := rand.Intn(*itemsCount)
item.Key = fmt.Sprintf("%s_%d", key, n)
startTime := time.Now()
if rand.Float64() < *getRatio {
_, err := client.Get(item.Key)
if err == memcache_org.ErrCacheMiss {
stats.cacheMissCount++
continue
}
if err != nil {
stats.errorsCount++
continue
}
stats.cacheHitCount++
updateResponseTimeHistogram(stats, startTime)
} else {
item.Value = value
if err := client.Set(&item); err != nil {
stats.errorsCount++
continue
}
updateResponseTimeHistogram(stats, startTime)
}
}
}
func workerGetSetNew(client memcache_new.Cacher, wg *sync.WaitGroup, ch <-chan int, stats *Stats) {
defer wg.Done()
var item memcache_new.Item
for _ = range ch {
n := rand.Intn(*itemsCount)
item.Key = []byte(fmt.Sprintf("%s_%d", key, n))
startTime := time.Now()
if rand.Float64() < *getRatio {
err := client.Get(&item)
if err == memcache_new.ErrCacheMiss {
stats.cacheMissCount++
continue
}
if err != nil {
stats.errorsCount++
continue
}
stats.cacheHitCount++
updateResponseTimeHistogram(stats, startTime)
} else {
item.Value = value
if err := client.Set(&item); err != nil {
stats.errorsCount++
continue
}
updateResponseTimeHistogram(stats, startTime)
}
}
}
var keyChars = []byte("1234567890qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM-+,./<>?;':\"[]{}=_()*&^%$#@!\\|`~")
func getRandomKey(size int) []byte {
buf := make([]byte, size)
for i := 0; i < size; i++ {
buf[i] = keyChars[rand.Int()%len(keyChars)]
}
return buf
}
func getRandomValue(size int) []byte {
buf := make([]byte, size)
for i := 0; i < size; i++ {
buf[i] = byte(rand.Int())
}
return buf
}
func precreateItemsOrg(client *memcache_org.Client) {
n := *itemsCount / *workersCount
workerFunc := func(wg *sync.WaitGroup, start int) {
defer wg.Done()
item := memcache_org.Item{
Value: value,
}
for i := start; i < start+n; i++ {
item.Key = fmt.Sprintf("%s_%d", key, i)
if err := client.Set(&item); err != nil {
log.Fatalf("Error in Client.Set(): [%s]", err)
}
}
}
var wg sync.WaitGroup
defer wg.Wait()
for i := 0; i < *workersCount; i++ {
wg.Add(1)
go workerFunc(&wg, i*n)
}
}
func precreateItemsNew(client memcache_new.Cacher) {
item := memcache_new.Item{
Value: value,
}
for i := 0; i < *itemsCount; i++ {
item.Key = []byte(fmt.Sprintf("%s_%d", key, i))
client.SetNowait(&item)
}
}
func getWorkerOrg(serverAddrs_ []string) func(wg *sync.WaitGroup, ch chan int, stats *Stats) {
client := memcache_org.New(serverAddrs_...)
client.Timeout = *ioTimeout
worker := workerGetMissOrg
switch *workerMode {
case "GetHit":
precreateItemsOrg(client)
worker = workerGetHitOrg
case "GetMiss":
worker = workerGetMissOrg
case "Set":
worker = workerSetOrg
case "GetSet":
precreateItemsOrg(client)
worker = workerGetSetOrg
default:
log.Fatalf("Unknown workerMode=[%s]", *workerMode)
}
return func(wg *sync.WaitGroup, ch chan int, stats *Stats) {
worker(client, wg, ch, stats)
}
}
func getWorkerNew(serverAddrs_ []string) func(wg *sync.WaitGroup, ch chan int, stats *Stats) {
config := memcache_new.ClientConfig{
ConnectionsCount: *connectionsCount,
MaxPendingRequestsCount: *maxPendingRequestsCount,
ReadBufferSize: *readBufferSize,
WriteBufferSize: *writeBufferSize,
OSReadBufferSize: *osReadBufferSize,
OSWriteBufferSize: *osWriteBufferSize,
}
var client memcache_new.Cacher
if len(serverAddrs_) < 2 {
client = &memcache_new.Client{
ServerAddr: *serverAddrs,
ClientConfig: config,
}
client.Start()
} else {
c := &memcache_new.DistributedClient{
ClientConfig: config,
}
c.StartStatic(serverAddrs_)
client = c
}
worker := workerGetMissNew
switch *workerMode {
case "GetHit":
precreateItemsNew(client)
worker = workerGetHitNew
case "GetMiss":
client.Delete(key)
worker = workerGetMissNew
case "Set":
worker = workerSetNew
case "GetSet":
precreateItemsNew(client)
worker = workerGetSetNew
default:
log.Fatalf("Unknown workerMode=[%s]", *workerMode)
}
return func(wg *sync.WaitGroup, ch chan int, stats *Stats) {
worker(client, wg, ch, stats)
}
}
func main() {
iniflags.Parse()
fmt.Printf("Config:\n")
flag.VisitAll(func(f *flag.Flag) {
fmt.Printf("%s=%v\n", f.Name, f.Value)
})
fmt.Printf("\n")
rand.Seed(time.Now().UnixNano())
runtime.GOMAXPROCS(*goMaxProcs)
serverAddrs_ := strings.Split(*serverAddrs, ",")
fmt.Printf("Preparing...")
key = getRandomKey(*keySize)
value = getRandomValue(*valueSize)
stats := make([]Stats, *workersCount)
for i := 0; i < *workersCount; i++ {
stats[i].responseTimeHistogram = make([]uint32, *responseTimeHistogramSize)
stats[i].minResponseTime = time.Hour * 24 * 365
}
var startTime time.Time
defer printStats(stats, &startTime)
var worker func(wg *sync.WaitGroup, ch chan int, stats *Stats)
switch *clientType {
case "original":
worker = getWorkerOrg(serverAddrs_)
case "new":
worker = getWorkerNew(serverAddrs_)
default:
log.Fatalf("Unknown clientType=[%s]. Expected 'new' or 'original'", *clientType)
}
fmt.Printf("done\n")
fmt.Printf("starting...")
startTime = time.Now()
ch := make(chan int, 1000000)
var wg sync.WaitGroup
defer wg.Wait()
for i := 0; i < *workersCount; i++ {
wg.Add(1)
go worker(&wg, ch, &stats[i])
}
for i := 0; i < *requestsCount; i++ {
ch <- i
}
close(ch)
}
|
package daily_notifications
import (
"testing"
"time"
)
func TestParsingStringTime(t *testing.T) {
assertParsedTimeError(t, "00")
assertParsedTimeError(t, "")
assertParsedTime(t, "00:00", 0)
assertParsedTime(t, "00:01", 60)
assertParsedTime(t, "01:00", 3600)
assertParsedTime(t, "23:59", 23*60*60+59*60)
}
func TestCheckingNotificationWasSentToday(t *testing.T) {
notification := DailyNotificationConfig{
UserID: 0,
NotificationTimestamp: 3600 * 12,
LastTimeActivated: 0,
}
curTime := time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC)
wasSent := notification.CheckWasSentToday(curTime)
if wasSent != false {
t.Error("wrong CheckWasSentToday() value, expected false")
}
notification.LastTimeActivated = curTime.Unix()
wasSent = notification.CheckWasSentToday(curTime)
if wasSent != true {
t.Error("wrong CheckWasSentToday() value, expected true")
}
notification.LastTimeActivated = curTime.Add(time.Second).Unix()
wasSent = notification.CheckWasSentToday(curTime)
if wasSent != true {
t.Error("wrong CheckWasSentToday() value, expected true")
}
}
func TestCheckingNotificationTimeToSend(t *testing.T) {
notification := DailyNotificationConfig{
UserID: 0,
NotificationTimestamp: 3600 * 12,
LastTimeActivated: 0,
}
curTime := time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC)
timeToSend := notification.CheckIsTimeToSend(curTime)
if timeToSend != false {
t.Error("wrong CheckIsTimeToSend() value, expected false")
}
curTime = time.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC)
timeToSend = notification.CheckIsTimeToSend(curTime)
if timeToSend != true {
t.Error("wrong CheckIsTimeToSend() value, expected true")
}
curTime = time.Date(2015, 1, 1, 23, 59, 59, 59, time.UTC)
timeToSend = notification.CheckIsTimeToSend(curTime)
if timeToSend != true {
t.Error("wrong CheckIsTimeToSend() value, expected true")
}
}
func assertParsedTime(t *testing.T, testTime string, expectedResult int) {
parseResult, err := stringTimeToDayOffset(testTime)
if err != nil {
t.Errorf("error parsing string time '%v': error %v", testTime, err)
}
if parseResult != expectedResult {
t.Errorf("error parsing string time '%v': got result %v, expected %v", testTime, parseResult, expectedResult)
}
}
func assertParsedTimeError(t *testing.T, testTime string) {
parseResult, err := stringTimeToDayOffset(testTime)
if err == nil {
t.Errorf("error testing parsing time '%v': expected error, got result %v", testTime, parseResult)
}
}
|
package models
import (
"api/utils"
"fmt"
)
func Fetchsingers() (artists []Artist, err error) {
var artist Artist
db, err := utils.Connecttodb()
if err != nil {
fmt.Println("unable to connect todb")
return
}
query := "select * from artist"
rows, err := db.Query(query)
if err != nil {
fmt.Println("unable to fetch singers")
return
}
for rows.Next() {
if err = rows.Scan(&artist.Id, &artist.StageName, &artist.FullName, &artist.DateOfBirth,
&artist.Nationality, &artist.CreatedAt); err != nil {
fmt.Println("Unable to scan because ", err)
continue
}
artists = append(artists, artist)
}
return
}
func Create_artist(artist NewArtist) (id int64, err error) {
query := "insert into artist (stage_name, full_name, date_of_birth, nationality)" +
"values (?,?,?,?)"
db, err := utils.Connecttodb()
if err != nil {
fmt.Println("unable to connect todb")
return
}
row, err := db.Exec(query, artist.StageName, artist.FullName, artist.DateOfBirth, artist.Nationality)
if err != nil {
fmt.Println(err)
return
}
id, _ = row.LastInsertId()
return
}
func Fetchsinger(singer string) (artist Artist) {
db, err := utils.Connecttodb()
if err != nil {
fmt.Println("unable to connect todb")
return
}
query := "select * from artist where stage_name = ?;"
row := db.QueryRow(query, singer)
if row == nil {
fmt.Println("unable to fetch singers")
return
}
err = row.Scan(&artist.Id, &artist.StageName, &artist.FullName, &artist.DateOfBirth,
&artist.Nationality, &artist.CreatedAt)
if err != nil {
fmt.Println(err)
return
}
return
}
|
package main
import (
"iv-code-challenge/api/db"
"iv-code-challenge/api/services"
"iv-code-challenge/api/server"
"log"
)
func main() {
var err error
session, err := db.NewSession()
if err != nil {
log.Fatalln("unable to connect to mongodb")
}
ss := services.NewSubmissionService(session.Copy(), "submissions-api", "submissions")
s := server.NewServer(ss)
s.Start()
}
|
package bid
import (
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/gookit/gcli/v3"
dcli "github.com/ovrclk/akash/x/deployment/client/cli"
"github.com/ovrclk/akash/x/market/client/cli"
"github.com/ovrclk/akash/x/market/types"
"github.com/ovrclk/akcmd/client"
"github.com/ovrclk/akcmd/flags"
)
func TxCmd() *gcli.Command {
cmd := &gcli.Command{
Name: "bid",
Desc: "Bid subcommands",
Func: func(cmd *gcli.Command, args []string) error {
cmd.ShowHelp()
return nil
},
Subs: []*gcli.Command{CreateCMD(), CloseCMD()},
}
return cmd
}
var createOpts = struct {
price string
}{}
func CreateCMD() *gcli.Command {
cmd := &gcli.Command{
Name: "create",
Desc: "Create a market bid",
Config: func(cmd *gcli.Command) {
flags.AddTxFlagsToCmd(cmd)
flags.AddOrderIDFlags(cmd)
cmd.StrOpt(&createOpts.price, "price", "", "", "Bid Price")
flags.AddDepositFlags(cmd, cli.DefaultDeposit)
},
Func: func(cmd *gcli.Command, args []string) error {
clientCtx, err := client.GetClientTxContext()
if err != nil {
return err
}
coins, err := sdk.ParseCoinNormalized(createOpts.price)
if err != nil {
return err
}
id, err := flags.OrderIDFromFlags(dcli.WithProvider(clientCtx.FromAddress))
if err != nil {
return err
}
deposit, err := flags.DepositFromFlags()
if err != nil {
return err
}
msg := &types.MsgCreateBid{
Order: id,
Provider: clientCtx.GetFromAddress().String(),
Price: coins,
Deposit: deposit,
}
if err := msg.ValidateBasic(); err != nil {
return err
}
return client.BroadcastTX(clientCtx, msg)
},
}
return cmd
}
func CloseCMD() *gcli.Command {
cmd := &gcli.Command{
Name: "close",
Desc: "Close a market bid",
Config: func(cmd *gcli.Command) {
flags.AddTxFlagsToCmd(cmd)
flags.AddBidIDFlags(cmd)
},
Func: func(cmd *gcli.Command, args []string) error {
clientCtx, err := client.GetClientTxContext()
if err != nil {
return err
}
id, err := flags.BidIDFromFlags(dcli.WithProvider(clientCtx.FromAddress))
if err != nil {
return err
}
msg := &types.MsgCloseBid{
BidID: id,
}
if err := msg.ValidateBasic(); err != nil {
return err
}
return client.BroadcastTX(clientCtx, msg)
},
}
return cmd
}
|
package scraper
import (
"fmt"
"regexp"
"strconv"
"strings"
"github.com/PuerkitoBio/goquery"
"github.com/gocolly/colly"
"github.com/yevhenshymotiuk/ekatalog-scraper/items"
)
func removeSpaces(s string) string {
return strings.ReplaceAll(s, "\u00a0", "")
}
func trimCapacitySuffix(s string) string {
return strings.TrimSuffix(s, "\u00a0ГБ")
}
func specificationsURL(URL string) string {
productNameRegexp := regexp.MustCompilePOSIX(
`https:\/\/ek\.ua\/(([[:alnum:]]|\-)+)\.htm`,
)
groups := productNameRegexp.FindStringSubmatch(URL)
return fmt.Sprintf(
"https://ek.ua/ek-item.php?resolved_name_=%s&view_=tbl",
groups[1],
)
}
func scrapeCategory(URL string) (string, error) {
var category string
c := colly.NewCollector()
c.OnHTML(".path_lnk", func(e *colly.HTMLElement) {
category = e.DOM.Text()
})
err := c.Visit(URL)
return category, err
}
func scrapeLaptop(row *colly.HTMLElement) (items.Laptop, error) {
var (
laptop items.Laptop
price items.Price
)
ramCapacity, err := strconv.Atoi(
trimCapacitySuffix(
row.DOM.Find(
".conf-td span[title='Объем оперативной памяти']",
).Text(),
),
)
if err != nil {
return laptop, err
}
driveCapacity, err := strconv.Atoi(
trimCapacitySuffix(
row.DOM.Find(".conf-td span[title='Емкость накопителя']").Text(),
),
)
if err != nil {
return laptop, err
}
pricesNode := row.DOM.Find(".price-int")
pricesSeparator := ".."
switch {
// Modification record contains both minimal and maximal price
case strings.Contains(pricesNode.Text(), pricesSeparator):
var minPrice, maxPrice int
priceTexts := pricesNode.Find(
"span",
).Map(
func(_ int, s *goquery.Selection) string {
return strings.Replace(
strings.TrimSpace(s.Text()),
"\u00a0",
"",
-1,
)
},
)
minPrice, err = strconv.Atoi(priceTexts[0])
if err != nil {
return laptop, err
}
maxPrice, err = strconv.Atoi(priceTexts[1])
if err != nil {
return laptop, err
}
price = items.Price{
Min: minPrice,
Max: maxPrice,
}
// Modification record contains only minimal price
case strings.Contains(pricesNode.Text(), "грн"):
minPrice, err := strconv.Atoi(
strings.Replace(
strings.TrimSpace(pricesNode.Find("span").Text()),
"\u00a0",
"",
-1,
),
)
if err != nil {
return laptop, err
}
price = items.Price{
Min: minPrice,
}
// Modification doesn't have price
default:
price = items.Price{}
}
laptop = items.Laptop{
CPU: items.CPU{
Series: strings.TrimSpace(
row.DOM.Find(
".conf-td span[title='Серия процессора']",
).Text(),
),
Model: strings.TrimSpace(
row.DOM.Find(
".conf-td span[title='Модель процессора']",
).Text(),
),
},
RAM: items.RAM{
Capacity: ramCapacity,
},
GPU: items.GPU{
Model: strings.TrimSpace(
row.DOM.Find(
".conf-td span[title='Модель видеокарты']",
).Text(),
),
},
Drive: items.Drive{
Type: strings.TrimSpace(
row.DOM.Find(".conf-td span[title='Тип накопителя']").Text(),
),
Capacity: driveCapacity,
},
Price: price,
}
return laptop, nil
}
func scrapeSmartphone(table *colly.HTMLElement) (items.Smartphone, error) {
var smartphone items.Smartphone
tds := table.DOM.Find(".op3")
ramCapacity, err := strconv.Atoi(
trimCapacitySuffix(tds.Get(11).FirstChild.Data),
)
if err != nil {
return smartphone, err
}
driveCapacity, err := strconv.Atoi(
trimCapacitySuffix(tds.Get(12).FirstChild.Data),
)
if err != nil {
return smartphone, err
}
minPrice, err := strconv.Atoi(
removeSpaces(
table.DOM.Find("span[itemprop='lowPrice']").First().Text(),
),
)
if err != nil {
return smartphone, err
}
maxPrice, err := strconv.Atoi(
removeSpaces(
table.DOM.Find("span[itemprop='highPrice']").First().Text(),
),
)
if err != nil {
return smartphone, err
}
smartphone = items.Smartphone{
CPU: items.CPU{
Model: strings.TrimSpace(tds.Get(7).FirstChild.Data),
},
RAM: items.RAM{
Capacity: ramCapacity,
},
GPU: items.GPU{
Model: strings.TrimSpace(tds.Get(10).FirstChild.Data),
},
Drive: items.Drive{
Type: tds.Get(13).FirstChild.Data,
Capacity: driveCapacity,
},
Price: items.Price{
Min: minPrice,
Max: maxPrice,
},
}
return smartphone, nil
}
// TODO: Add other categories (not only Laptops)
func scrapeProduct(URL string) (product items.Product, err error) {
var (
name string
modifications []items.ModificationType
)
category, err := scrapeCategory(URL)
c := colly.NewCollector()
c.OnHTML("#top-page-title b.ib", func(e *colly.HTMLElement) {
name = e.DOM.Text()
})
switch category {
case "Ноутбуки":
// Find row which correspond to modification
c.OnHTML(".conf-tr", func(e *colly.HTMLElement) {
laptop := items.Laptop{}
laptop, err = scrapeLaptop(e)
modifications = append(modifications, laptop)
})
default:
URL = specificationsURL(URL)
c.OnHTML(".common-table-div", func(e *colly.HTMLElement) {
smartphone := items.Smartphone{}
smartphone, err = scrapeSmartphone(e)
modifications = append(modifications, smartphone)
})
}
err = c.Visit(URL)
product = items.Product{Name: name, Modifications: modifications}
return
}
// ScrapeProducts scrapes products by URLs
func ScrapeProducts(URLs []string) ([]items.Product, error) {
products := []items.Product{}
for _, URL := range URLs {
p, err := scrapeProduct(URL)
if err != nil {
return products, err
}
products = append(products, p)
}
return products, nil
}
|
package main
import (
"fmt"
"github.com/mmcdole/gofeed"
"time"
"os"
"github.com/anaskhan96/soup"
)
func getItems(url string, after time.Time) (items []gofeed.Item, err error) {
fp := gofeed.NewParser()
feed, err := fp.ParseURL(url)
if err != nil {
return items, err
}
for _, item := range feed.Items {
if item.PublishedParsed.Before(after) {
continue
}
items = append(items, *item)
}
return items, nil
}
func getSites() (links []string) {
resp, err := soup.Get("https://www.craigslist.org/about/sites")
if err != nil {
os.Exit(1)
}
doc := soup.HTMLParse(resp)
linkEls := doc.Find("div", "class", "box").FindAll("a")
for _, link := range linkEls {
links = append(links, link.Attrs()["href"])
}
return links
}
func genUrls(url string, subs []string, terms []string) (urls []string) {
for _, sub := range subs {
for _, term := range terms {
urls = append(urls, url + "search/" + sub + "?format=rss&query=" + term)
}
}
return urls
}
func main() {
sites := getSites()
hoursAgo := 48
concurrency := 10
subs := []string{
"crs",
"crg",
}
terms := []string{
"need+logo",
}
cutoff := time.Now().Add(- time.Hour * time.Duration(hoursAgo))
hits := make(chan gofeed.Item)
done := make(chan int)
throttle := make(chan int, concurrency)
go func() {
for link := range hits {
fmt.Println(link.Link)
//fmt.Println(link.Description)
fmt.Println(link.PublishedParsed)
}
}()
urls := []string{}
for _, site := range sites {
urls = append(urls, genUrls(site, subs, terms)...)
}
for i, url := range urls {
throttle<-1
fmt.Println(fmt.Sprintf("[%d of %d] ", i + 1, len(urls)), "Searching " + url)
go func(url string) {
defer func() {
<-throttle
done<-1
}()
items, err := getItems(
url,
cutoff,
)
if err != nil {
fmt.Println(err)
return
}
for _, item := range items {
hits<-item
}
}(url)
}
for i := 0; i < len(urls); i++ {
<-done
}
close(hits)
fmt.Println("Done")
}
|
package network
import (
"fmt"
"net"
linuxproc "github.com/c9s/goprocinfo/linux"
)
type Address struct {
IP string
Mask string
}
type Iface struct {
Name string
Mac string
Addrv4 []*Address
Addrv6 []*Address
MTU int
Stat *linuxproc.NetworkStat
}
const procNetDevPath = "/proc/net/dev"
func NewIface(name string) (*Iface, error) {
stats, err := getNetworkStats()
if err != nil {
return nil, err
}
if _, ok := stats[name]; !ok {
return nil, fmt.Errorf("interface %v does not exist", name)
}
iface, err := newIfaceByStat(stats[name])
if err != nil {
return nil, err
}
return iface, nil
}
func NewIfaces() ([]*Iface, error) {
stats, err := getNetworkStats()
if err != nil {
return nil, err
}
ifaces := make([]*Iface, 0)
for name, _ := range stats {
iface, err := newIfaceByStat(stats[name])
if err != nil {
continue
}
ifaces = append(ifaces, iface)
}
return ifaces, nil
}
func getNetworkStats() (map[string]*linuxproc.NetworkStat, error) {
stats, err := linuxproc.ReadNetworkStat(procNetDevPath)
if err != nil {
return nil, err
}
networkStats := make(map[string]*linuxproc.NetworkStat)
for i, s := range stats {
networkStats[s.Iface] = &stats[i]
}
return networkStats, nil
}
func newIfaceByStat(s *linuxproc.NetworkStat) (*Iface, error) {
iface, err := net.InterfaceByName(s.Iface)
if err != nil {
return nil, err
}
addrs, err := iface.Addrs()
if err != nil {
return nil, err
}
addrv4 := make([]*Address, 0)
addrv6 := make([]*Address, 0)
for _, addr := range addrs {
var ip net.IP
var mask net.IPMask
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
mask = v.Mask
case *net.IPAddr:
ip = v.IP
mask = ip.DefaultMask()
}
if ip == nil || ip.IsLoopback() {
continue
}
if ip.To4() != nil {
addrv4 = append(addrv4, &Address{
IP: ip.String(),
Mask: ipv4MaskString(mask),
})
} else {
addrv6 = append(addrv6, &Address{
IP: ip.String(),
Mask: ipv6MaskString(mask),
})
}
}
return &Iface{
Name: s.Iface,
Mac: iface.HardwareAddr.String(),
Addrv4: addrv4,
Addrv6: addrv6,
MTU: iface.MTU,
Stat: s,
}, nil
}
func ipv4MaskString(mask net.IPMask) string {
const maxIPv4StringLen = len("255.255.255.255")
b := make([]byte, maxIPv4StringLen)
n := ubtoa(b, 0, mask[0])
b[n] = '.'
n++
n += ubtoa(b, n, mask[1])
b[n] = '.'
n++
n += ubtoa(b, n, mask[2])
b[n] = '.'
n++
n += ubtoa(b, n, mask[3])
return string(b[:n])
}
func ipv6MaskString(mask net.IPMask) string {
const (
IPv6len = 16
maxLen = len("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")
)
p := mask
// Find longest run of zeros.
e0 := -1
e1 := -1
for i := 0; i < IPv6len; i += 2 {
j := i
for j < IPv6len && p[j] == 0 && p[j+1] == 0 {
j += 2
}
if j > i && j-i > e1-e0 {
e0 = i
e1 = j
i = j
}
}
// The symbol "::" MUST NOT be used to shorten just one 16 bit 0 field.
if e1-e0 <= 2 {
e0 = -1
e1 = -1
}
b := make([]byte, 0, maxLen)
// Print with possible :: in place of run of zeros
for i := 0; i < IPv6len; i += 2 {
if i == e0 {
b = append(b, ':', ':')
i = e1
if i >= IPv6len {
break
}
} else if i > 0 {
b = append(b, ':')
}
b = appendHex(b, (uint32(p[i])<<8)|uint32(p[i+1]))
}
return string(b)
}
func ubtoa(dst []byte, start int, v byte) int {
if v < 10 {
dst[start] = v + '0'
return 1
} else if v < 100 {
dst[start+1] = v%10 + '0'
dst[start] = v/10 + '0'
return 2
}
dst[start+2] = v%10 + '0'
dst[start+1] = (v/10)%10 + '0'
dst[start] = v/100 + '0'
return 3
}
func appendHex(dst []byte, i uint32) []byte {
const hexDigit = "0123456789abcdef"
if i == 0 {
return append(dst, '0')
}
for j := 7; j >= 0; j-- {
v := i >> uint(j*4)
if v > 0 {
dst = append(dst, hexDigit[v&0xf])
}
}
return dst
}
|
package proxy
import (
"context"
"net/http"
"github.com/rs/xid"
log "github.com/sirupsen/logrus"
)
// Middleware is a function that accepts allows to add additional behavior to the request processing cycle
// Middleware should return new http.Handler, that calls the http.Handler that was passed to it as `next` parameter
type Middleware func(next http.Handler) http.Handler
type key int
var requestIDKey key = 0
func newRequestID() string {
return xid.New().String()
}
func GetRequestID(ctx context.Context) string {
requestID, ok := ctx.Value(requestIDKey).(string)
if !ok {
// TODO: generate new ID?
requestID = "unknown"
}
return requestID
}
// Apply list of middlewares for router
// Note: we apply middlewares the way that the first middleware in list will be the firs middleware to receive the request
func applyMiddlewares(router http.Handler, middlewares []Middleware) http.Handler {
n := len(middlewares)
// Applying middlewares in the reverse order, so the first middleware in the list will be the outmost
for i := n - 1; i >= 0; i-- {
router = middlewares[i](router)
}
return router
}
func tracing(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
requestID := r.Header.Get("X-Request-Id")
if requestID == "" {
requestID = newRequestID()
}
ctx := context.WithValue(r.Context(), requestIDKey, requestID)
r.Header.Set("X-Request-Id", requestID)
next.ServeHTTP(w, r.WithContext(ctx))
})
}
func logging(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer func() {
requestID := GetRequestID(r.Context())
// TODO: log response code
// TODO: log the target redirect
log.Infof("[ID:%s] %s %s, %s, %s", requestID, r.Method, r.URL.Path, r.RemoteAddr, r.UserAgent())
}()
next.ServeHTTP(w, r)
})
}
|
package main
import (
"fmt"
"sort"
)
func main() {
numEls, number, q1, q2, q3 := 0, 0, 0, 0, 0
count := make(map[int]int)
var nums []int
var numbers []int
fmt.Scanf("%d", &numEls)
for i := 0; i < numEls; i++ {
fmt.Scanf("%d", &number)
nums = append(nums, number)
}
for i := 0; i < numEls; i++ {
fmt.Scanf("%d", &number)
for j := 0; j < number; j++ {
numbers = append(numbers, nums[i])
}
}
sort.Ints(numbers)
if len(numbers)%2 == 0 {
q2 = (numbers[len(numbers)/2] + numbers[len(numbers)/2-1]) / 2
if (len(numbers)/2)%2 == 0 {
q1 = (numbers[(len(numbers)/2)/2] + numbers[(len(numbers)/2)/2-1]) / 2
q3 = (numbers[(len(numbers)/2)/2*3] + numbers[(len(numbers)/2)/2*3-1]) / 2
} else {
q1 = numbers[(len(numbers)/2)/2]
q3 = numbers[((len(numbers)/2)/2)*3]
}
} else {
q2 = numbers[len(numbers)/2]
if (len(numbers)/2)%2 == 0 {
q1 = (numbers[(len(numbers)/2)/2] + numbers[(len(numbers)/2)/2-1]) / 2
q3 = (numbers[(len(numbers)/2)/2*3] + numbers[(len(numbers)/2)/2*3-1]) / 2
} else {
q1 = numbers[(len(numbers)/2)/2]
q3 = numbers[((len(numbers)/2)/2)*3]
}
}
fmt.Println(q3 - q1)
}
|
package sound
import (
"errors"
"fmt"
"github.com/rmcsoft/hasp/events"
)
type SoundCapturedEventData struct {
AudioData *AudioData
}
const (
SoundCapturedEventName = "SoundCaptured"
)
// NewSoundCapturedEvent creates HotWordDetectedEvent
func NewSoundCapturedEvent(audioData *AudioData) *events.Event {
return &events.Event{
Name: SoundCapturedEventName,
Args: []interface{}{
SoundCapturedEventData{audioData},
},
}
}
// GetSoundCapturedEventData gets HotWordDetectedEvent data
func GetSoundCapturedEventData(event *events.Event) (SoundCapturedEventData, error) {
if event.Name != SoundCapturedEventName && event.Name != HotWordWithDataDetectedEventName {
return SoundCapturedEventData{},
fmt.Errorf("The event must be named %s or %s", SoundCapturedEventName, HotWordWithDataDetectedEventName)
}
if len(event.Args) != 1 {
return SoundCapturedEventData{},
errors.New("Event does not data")
}
data, ok := event.Args[0].(SoundCapturedEventData)
if !ok {
return SoundCapturedEventData{},
errors.New("Event does not contain samples")
}
return data, nil
}
|
package service
import (
"sixedu/model"
// "fmt"
)
type AuthService struct {}
func (a *AuthService) Register(username,password string,age int, sex string) bool {
user := model.NewUser()
user.SetUsername(username)
user.SetPassword(password)
user.SetAge(age)
user.SetSex(sex)
user.Save()
// fmt.Println("service.auth.Register.users:",user)
return true
}
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resourcetracker
import (
"math"
"testing"
"github.com/stretchr/testify/require"
"k8s.io/utils/pointer"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/multicluster"
)
func TestResourceTreePrintOption_getWidthForDetails(t *testing.T) {
r := require.New(t)
options := &ResourceTreePrintOptions{}
r.Equal(math.MaxInt, options._getWidthForDetails(nil))
options.MaxWidth = pointer.Int(50 + applyTimeWidth)
r.Equal(30, options._getWidthForDetails([]int{10, 10}))
r.Equal(math.MaxInt, options._getWidthForDetails([]int{20, 20}))
}
func TestResourceTreePrintOptions_wrapDetails(t *testing.T) {
r := require.New(t)
options := &ResourceTreePrintOptions{}
detail := "test-key: test-val\ttest-data: test-val\ntest-next-line: text-next-value test-long-key: test long long long long value test-append: test-append-val"
r.Equal(
[]string{
"test-key: test-val test-data: test-val",
"test-next-line: text-next-value",
"test-long-key: test long long long long ",
"value test-append: test-append-val",
},
options._wrapDetails(detail, 40))
}
func TestBuildResourceRow(t *testing.T) {
r := require.New(t)
cases := map[string]struct {
Cluster string
ResourceRowStatus string
ExpectedCluster string
ExpectedResourceRowStatus string
}{
"localCluster": {
Cluster: "",
ResourceRowStatus: resourceRowStatusUpdated,
ExpectedCluster: multicluster.ClusterLocalName,
ExpectedResourceRowStatus: resourceRowStatusUpdated,
},
"remoteCluster": {
Cluster: "remoteCluster",
ResourceRowStatus: resourceRowStatusUpdated,
ExpectedCluster: "remoteCluster",
ExpectedResourceRowStatus: resourceRowStatusUpdated,
},
}
for name, c := range cases {
mr := v1beta1.ManagedResource{
ClusterObjectReference: common.ClusterObjectReference{
Cluster: c.Cluster,
},
}
rr := buildResourceRow(mr, c.ResourceRowStatus)
r.Equal(c.ExpectedCluster, rr.mr.Cluster, name)
r.Equal(c.ExpectedResourceRowStatus, rr.status, name)
}
}
|
package graphql
import (
"encoding"
"fmt"
"reflect"
"strings"
)
const TAG = "json"
// can't take recursive slice type
// e.g
// type Person struct{
// Friends []Person
// }
// it will throw panic stack-overflow
func BindFields(obj interface{}) Fields {
t := reflect.TypeOf(obj)
v := reflect.ValueOf(obj)
fields := make(map[string]*Field)
if t.Kind() == reflect.Ptr {
t = t.Elem()
v = v.Elem()
}
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
tag := extractTag(field.Tag)
if tag == "-" {
continue
}
fieldType := field.Type
if fieldType.Kind() == reflect.Ptr {
fieldType = fieldType.Elem()
}
var graphType Output
if fieldType.Kind() == reflect.Struct {
itf := v.Field(i).Interface()
if _, ok := itf.(encoding.TextMarshaler); ok {
fieldType = reflect.TypeOf("")
goto nonStruct
}
structFields := BindFields(itf)
if tag == "" {
fields = appendFields(fields, structFields)
continue
} else {
graphType = NewObject(ObjectConfig{
Name: tag,
Fields: structFields,
})
}
}
nonStruct:
if tag == "" {
continue
}
if graphType == nil {
graphType = getGraphType(fieldType)
}
fields[tag] = &Field{
Type: graphType,
Resolve: func(p ResolveParams) (interface{}, error) {
return extractValue(tag, p.Source), nil
},
}
}
return fields
}
func getGraphType(tipe reflect.Type) Output {
kind := tipe.Kind()
switch kind {
case reflect.String:
return String
case reflect.Int, reflect.Int8, reflect.Int32, reflect.Int64:
return Int
case reflect.Float32, reflect.Float64:
return Float
case reflect.Bool:
return Boolean
case reflect.Slice:
return getGraphList(tipe)
}
return String
}
func getGraphList(tipe reflect.Type) *List {
if tipe.Kind() == reflect.Slice {
switch tipe.Elem().Kind() {
case reflect.Int, reflect.Int8, reflect.Int32, reflect.Int64:
return NewList(Int)
case reflect.Bool:
return NewList(Boolean)
case reflect.Float32, reflect.Float64:
return NewList(Float)
case reflect.String:
return NewList(String)
}
}
// finally bind object
t := reflect.New(tipe.Elem())
name := strings.Replace(fmt.Sprint(tipe.Elem()), ".", "_", -1)
obj := NewObject(ObjectConfig{
Name: name,
Fields: BindFields(t.Elem().Interface()),
})
return NewList(obj)
}
func appendFields(dest, origin Fields) Fields {
for key, value := range origin {
dest[key] = value
}
return dest
}
func extractValue(originTag string, obj interface{}) interface{} {
val := reflect.Indirect(reflect.ValueOf(obj))
for j := 0; j < val.NumField(); j++ {
field := val.Type().Field(j)
found := originTag == extractTag(field.Tag)
if field.Type.Kind() == reflect.Struct {
itf := val.Field(j).Interface()
if str, ok := itf.(encoding.TextMarshaler); ok && found {
byt, _ := str.MarshalText()
return string(byt)
}
res := extractValue(originTag, itf)
if res != nil {
return res
}
}
if found {
return reflect.Indirect(val.Field(j)).Interface()
}
}
return nil
}
func extractTag(tag reflect.StructTag) string {
t := tag.Get(TAG)
if t != "" {
t = strings.Split(t, ",")[0]
}
return t
}
// lazy way of binding args
func BindArg(obj interface{}, tags ...string) FieldConfigArgument {
v := reflect.Indirect(reflect.ValueOf(obj))
var config = make(FieldConfigArgument)
for i := 0; i < v.NumField(); i++ {
field := v.Type().Field(i)
mytag := extractTag(field.Tag)
if inArray(tags, mytag) {
config[mytag] = &ArgumentConfig{
Type: getGraphType(field.Type),
}
}
}
return config
}
func inArray(slice interface{}, item interface{}) bool {
s := reflect.ValueOf(slice)
if s.Kind() != reflect.Slice {
panic("inArray() given a non-slice type")
}
for i := 0; i < s.Len(); i++ {
if reflect.DeepEqual(item, s.Index(i).Interface()) {
return true
}
}
return false
}
|
package server
import (
"errors"
"net/http"
"github.com/Tanibox/tania-core/src/tasks/domain"
"github.com/Tanibox/tania-core/src/tasks/storage"
"github.com/gofrs/uuid"
"github.com/labstack/echo/v4"
)
func (s *TaskServer) SaveToTaskReadModel(event interface{}) error {
taskRead := &storage.TaskRead{}
switch e := event.(type) {
case domain.TaskCreated:
taskRead.Title = e.Title
taskRead.UID = e.UID
taskRead.Description = e.Description
taskRead.CreatedDate = e.CreatedDate
taskRead.DueDate = e.DueDate
taskRead.Priority = e.Priority
taskRead.Status = e.Status
taskRead.Domain = e.Domain
taskRead.DomainDetails = e.DomainDetails
taskRead.Category = e.Category
taskRead.IsDue = e.IsDue
taskRead.AssetID = e.AssetID
case domain.TaskTitleChanged:
// Get TaskRead By UID
taskReadFromRepo, err := s.getTaskReadFromID(e.UID)
if err != nil {
return err
}
taskReadFromRepo.Title = e.Title
taskRead = taskReadFromRepo
case domain.TaskDescriptionChanged:
// Get TaskRead By UID
taskReadFromRepo, err := s.getTaskReadFromID(e.UID)
if err != nil {
return err
}
taskReadFromRepo.Description = e.Description
taskRead = taskReadFromRepo
case domain.TaskPriorityChanged:
// Get TaskRead By UID
taskReadFromRepo, err := s.getTaskReadFromID(e.UID)
if err != nil {
return err
}
taskReadFromRepo.Priority = e.Priority
taskRead = taskReadFromRepo
case domain.TaskDueDateChanged:
// Get TaskRead By UID
taskReadFromRepo, err := s.getTaskReadFromID(e.UID)
if err != nil {
return err
}
taskReadFromRepo.DueDate = e.DueDate
taskRead = taskReadFromRepo
case domain.TaskCategoryChanged:
// Get TaskRead By UID
taskReadFromRepo, err := s.getTaskReadFromID(e.UID)
if err != nil {
return err
}
taskReadFromRepo.Category = e.Category
taskRead = taskReadFromRepo
case domain.TaskDetailsChanged:
// Get TaskRead By UID
taskReadFromRepo, err := s.getTaskReadFromID(e.UID)
if err != nil {
return err
}
taskReadFromRepo.DomainDetails = e.DomainDetails
taskRead = taskReadFromRepo
case domain.TaskCompleted:
// Get TaskRead By UID
taskReadFromRepo, err := s.getTaskReadFromID(e.UID)
if err != nil {
return err
}
taskReadFromRepo.CompletedDate = e.CompletedDate
taskReadFromRepo.Status = domain.TaskStatusCompleted
taskRead = taskReadFromRepo
case domain.TaskCancelled:
// Get TaskRead By UID
taskReadFromRepo, err := s.getTaskReadFromID(e.UID)
if err != nil {
return err
}
taskReadFromRepo.CancelledDate = e.CancelledDate
taskReadFromRepo.Status = domain.TaskStatusCancelled
taskRead = taskReadFromRepo
case domain.TaskDue:
// Get TaskRead By UID
taskReadFromRepo, err := s.getTaskReadFromID(e.UID)
if err != nil {
return err
}
taskReadFromRepo.IsDue = true
taskRead = taskReadFromRepo
default:
return errors.New("unknown task event")
}
err := <-s.TaskReadRepo.Save(taskRead)
if err != nil {
return err
}
return nil
}
func (s *TaskServer) getTaskReadFromID(uid uuid.UUID) (*storage.TaskRead, error) {
readResult := <-s.TaskReadQuery.FindByID(uid)
taskReadFromRepo, ok := readResult.Result.(storage.TaskRead)
if taskReadFromRepo.UID != uid {
return &storage.TaskRead{}, domain.TaskError{Code: domain.TaskErrorTaskNotFoundCode}
}
if !ok {
return &storage.TaskRead{}, echo.NewHTTPError(http.StatusBadRequest, "Internal server error")
} else {
return &taskReadFromRepo, nil
}
}
|
package main
import (
"encoding/json"
"strings"
"testing"
)
func TestCorrectMarshaling(t *testing.T) {
testJSONString := `
{
"ImageUUID": "yolo",
"userUUID": "user",
"url": "url",
"imageScale" : "ORIGINAL"
}
`
var imageUpdate ImageUpdate
err := json.Unmarshal([]byte(testJSONString), &imageUpdate)
if err != nil {
t.Errorf("failed to unmarshal json string: %v", err)
}
if imageUpdate.URL != "url" {
t.Errorf("url did not have expected value 'url': %v", imageUpdate.URL)
}
if imageUpdate.ImageUUID != "yolo" {
t.Errorf("imageUUID did not have expected value 'yolo': %v", imageUpdate.ImageUUID)
}
if imageUpdate.UserUUID != "user" {
t.Errorf("userUUID did not have expected value 'user': %v", imageUpdate.UserUUID)
}
}
func TestCorrectUnmarshalling(t *testing.T) {
testImageUpdate := ImageUpdate{ImageUUID: "yolo", UserUUID: "user", URL: "url", ImageScale: "ORIGINAL"}
byteArr, err := json.Marshal(testImageUpdate)
if err != nil {
t.Errorf("failed to marshal imageUpdate to JSON string: %v", err)
}
testJSONString := `{"imageUUID":"yolo","userUUID":"user","url":"url","imageScale":"ORIGINAL"}`
actualOutput := strings.ToLower(string(byteArr))
expectedOutput := strings.ToLower(testJSONString)
if actualOutput != expectedOutput {
t.Errorf("output does not equal expected output (case insensitive): %s vs %s", actualOutput, expectedOutput)
}
}
|
package trident
import (
"errors"
pf "trident.li/pitchfork/lib"
)
type TriUser interface {
pf.PfUser
IsNominator(ctx pf.PfCtx, nom_name string) (ok bool)
BestNominator(ctx pf.PfCtx) (nom_name string, err error)
}
type TriUserS struct {
pf.PfUser `pfset:"self" pfget:"self"`
}
func NewTriUser() pf.PfUser {
return &TriUserS{PfUser: pf.NewPfUser(nil, nil)}
}
func (user *TriUserS) IsNominator(ctx pf.PfCtx, nom_name string) (ok bool) {
cnt := 0
q := "SELECT COUNT(*) " +
"FROM member_vouch mv " +
"JOIN member_email me ON mv.vouchor = me.member " +
"WHERE vouchee = $1 " +
"AND mv.positive " +
"AND me.pgpkey_id IS NOT NULL " +
"WHERE vouchor = $2"
_ = pf.DB.QueryRow(q, user.GetUserName(), nom_name).Scan(&cnt)
return cnt == 0
}
func (user *TriUserS) BestNominator(ctx pf.PfCtx) (nom_name string, err error) {
q := "SELECT vouchor " +
"FROM member_vouch mv " +
"JOIN member_email me ON mv.vouchor = me.member " +
"WHERE vouchee = $1 " +
"AND mv.positive " +
"AND me.pgpkey_id IS NOT NULL " +
"ORDER BY mv.entered " +
"LIMIT 1"
err = pf.DB.QueryRow(q, user.GetUserName()).Scan(&nom_name)
return
}
func user_pw_send(ctx pf.PfCtx, is_reset bool, nom_name string) (err error) {
var user_email pf.PfUserEmail
var nom_email pf.PfUserEmail
var pw pf.PfPass
var user_portion string
var nom_portion string
theuser := ctx.SelectedUser()
username := theuser.GetUserName()
/* Make sure the name is mostly sane */
nom_name, err = pf.Chk_ident("UserName", nom_name)
if err != nil {
return
}
if nom_name == username {
err = errors.New("Nominator cannot be the same as the user")
return
}
err = ctx.SelectUser(nom_name, pf.PERM_USER_NOMINATE)
if err != nil {
return
}
nom_user := ctx.SelectedUser()
nom_email, err = nom_user.GetPriEmail(ctx, false)
if err != nil {
return
}
/* Reselect the user, this is the one it is all about */
err = ctx.SelectUser(username, pf.PERM_USER)
if err != nil {
return
}
user_email, err = theuser.GetPriEmail(ctx, true)
if err != nil {
return
}
user_portion, err = pw.GenPass(16)
if err != nil {
return
}
err = Mail_PassResetUser(ctx, user_email, is_reset, nom_email, user_portion)
if err != nil {
return
}
nom_portion, err = pw.GenPass(16)
if err != nil {
return
}
err = Mail_PassResetNominator(ctx, nom_email, is_reset, user_email, nom_portion)
if err != nil {
return
}
err = theuser.SetRecoverToken(ctx, user_portion+nom_portion)
return
}
func user_pw_reset(ctx pf.PfCtx, args []string) (err error) {
username := args[0]
nom_name := ""
err = ctx.SelectUser(username, pf.PERM_USER_SELF)
if err != nil {
return
}
tctx := TriGetCtx(ctx)
user := tctx.TriSelectedUser()
/*
* Note that when the user does not have a valid nominator
* the password can't be reset either
*/
if len(args) >= 2 {
nom_name = args[1]
if !user.IsNominator(ctx, nom_name) {
err = errors.New(nom_name + " is not a nominator for this user")
return
}
} else {
nom_name, err = user.BestNominator(ctx)
if err != nil {
err = errors.New("No nominator with valid PGP key")
return
}
}
/* Send out the new password */
err = user_pw_send(ctx, true, nom_name)
if err == nil {
ctx.OutLn("Recovery Passwords sent to user and " + nom_name)
}
return
}
func user_nominate(ctx pf.PfCtx, args []string) (err error) {
username := args[0]
email := args[1]
bio_info := args[2]
affiliation := args[3]
descr := args[4]
return pf.User_new(ctx, username, email, bio_info, affiliation, descr)
}
func user_merge(ctx pf.PfCtx, args []string) (err error) {
u_new := args[0]
u_old := args[1]
err = pf.DB.TxBegin(ctx)
if err != nil {
return err
}
/* No error yet */
err = nil
q := ""
if err == nil {
q = "UPDATE member_vouch " +
"SET vouchor = $1 " +
"WHERE vouchor = $2"
err = pf.DB.Exec(ctx,
"Update Vouches $2 to $1",
-1, q,
u_new, u_old)
}
if err == nil {
q = "UPDATE member_vouch " +
"SET vouchee = $1" +
"WHERE vouchee = $2"
err = pf.DB.Exec(ctx,
"Update Vouchee $2 to $1",
-1, q,
u_new, u_old)
}
return pf.User_merge(ctx, u_new, u_old, err)
}
func user_pw_menu(ctx pf.PfCtx, menu *pf.PfMenu) {
m := []pf.PfMEntry{
{"reset", user_pw_reset, 1, 2, []string{"username", "nominator"}, pf.PERM_USER, "Send a recovery password split between the user and a nominator"},
}
menu.Add(m...)
}
func user_menu(ctx pf.PfCtx, menu *pf.PfMenu) {
m := []pf.PfMEntry{
{"nominate", user_nominate, 5, 5, []string{"username", "email", "bio_info", "affiliation", "descr"}, pf.PERM_USER, "Nominate New User"},
}
menu.Add(m...)
}
|
package virtual_security
import (
"reflect"
"testing"
"time"
)
type testClock struct {
iClock
now1 time.Time
getStockSession1 Session
getStockSessionHistory []time.Time
getSession1 Session
getBusinessDay1 time.Time
}
func (t *testClock) now() time.Time { return t.now1 }
func (t *testClock) getStockSession(now time.Time) Session {
t.getStockSessionHistory = append(t.getStockSessionHistory, now)
return t.getStockSession1
}
func (t *testClock) getSession(ExchangeType, time.Time) Session { return t.getSession1 }
func (t *testClock) getBusinessDay(ExchangeType, time.Time) time.Time { return t.getBusinessDay1 }
func Test_newClock(t *testing.T) {
want := &clock{}
got := newClock()
t.Parallel()
if !reflect.DeepEqual(want, got) {
t.Errorf("%s error\nwant: %+v\ngot: %+v\n", t.Name(), want, got)
}
}
func Test_clock_Now1(t *testing.T) {
want := time.Now()
got := (&clock{}).now()
t.Parallel()
if got.Before(want) {
t.Errorf("%s error\nwant: %+v\ngot: %+v\n", t.Name(), want, got)
}
}
func Test_clock_Now2(t *testing.T) {
got := (&clock{}).now()
want := time.Now()
t.Parallel()
if got.After(want) {
t.Errorf("%s error\nwant: %+v\ngot: %+v\n", t.Name(), want, got)
}
}
func Test_clock_GetStockSession(t *testing.T) {
t.Parallel()
tests := []struct {
name string
arg time.Time
want Session
}{
{name: "前場前 なら unspecified", arg: time.Date(0, 1, 1, 8, 59, 59, 0, time.Local), want: SessionUnspecified},
{name: "前場開始時刻 なら morning", arg: time.Date(0, 1, 1, 9, 0, 0, 0, time.Local), want: SessionMorning},
{name: "前場中 なら morning", arg: time.Date(0, 1, 1, 10, 0, 0, 0, time.Local), want: SessionMorning},
{name: "前場終了時刻 なら morning", arg: time.Date(0, 1, 1, 11, 30, 0, 0, time.Local), want: SessionMorning},
{name: "前場終了後 なら morning", arg: time.Date(0, 1, 1, 11, 30, 5, 0, time.Local), want: SessionUnspecified},
{name: "前場後・後場前 なら unspecified", arg: time.Date(0, 1, 1, 12, 0, 0, 0, time.Local), want: SessionUnspecified},
{name: "後場開始時刻 なら afternoon", arg: time.Date(0, 1, 1, 12, 30, 0, 0, time.Local), want: SessionAfternoon},
{name: "後場中 なら afternoon", arg: time.Date(0, 1, 1, 13, 0, 0, 0, time.Local), want: SessionAfternoon},
{name: "後場終了時刻 なら afternoon", arg: time.Date(0, 1, 1, 15, 0, 0, 0, time.Local), want: SessionAfternoon},
{name: "後場終了後 なら unspecified", arg: time.Date(0, 1, 1, 15, 0, 5, 0, time.Local), want: SessionUnspecified},
{name: "後場後 なら unspecified", arg: time.Date(0, 1, 1, 15, 0, 6, 0, time.Local), want: SessionUnspecified},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
clock := &clock{}
got := clock.getStockSession(test.arg)
if !reflect.DeepEqual(test.want, got) {
t.Errorf("%s error\nwant: %+v\ngot: %+v\n", t.Name(), test.want, got)
}
})
}
}
func Test_clock_GetBusinessDay(t *testing.T) {
t.Parallel()
tests := []struct {
name string
arg1 ExchangeType
arg2 time.Time
want time.Time
}{
{name: "引数がゼロ値ならそのまま返す", arg1: ExchangeTypeUnspecified, arg2: time.Time{}, want: time.Time{}},
{name: "現物なら年月日をそのまま営業日にして返す",
arg1: ExchangeTypeStock,
arg2: time.Date(2021, 6, 29, 16, 29, 0, 0, time.Local),
want: time.Date(2021, 6, 29, 0, 0, 0, 0, time.Local)},
{name: "信用なら年月日をそのまま営業日にして返す",
arg1: ExchangeTypeMargin,
arg2: time.Date(2021, 6, 29, 16, 29, 0, 0, time.Local),
want: time.Date(2021, 6, 29, 0, 0, 0, 0, time.Local)},
{name: "上記以外は年月日をそのまま返す",
arg1: ExchangeTypeUnspecified,
arg2: time.Date(2021, 6, 29, 16, 29, 0, 0, time.Local),
want: time.Date(2021, 6, 29, 0, 0, 0, 0, time.Local)},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
c := &clock{}
got := c.getBusinessDay(test.arg1, test.arg2)
if !reflect.DeepEqual(test.want, got) {
t.Errorf("%s error\nwant: %+v\ngot: %+v\n", t.Name(), test.want, got)
}
})
}
}
func Test_clock_GetSession(t *testing.T) {
t.Parallel()
tests := []struct {
name string
arg1 ExchangeType
arg2 time.Time
want Session
}{
{name: "日時がゼロ値なら未指定",
arg1: ExchangeTypeStock,
arg2: time.Time{},
want: SessionUnspecified},
{name: "ExchangeTypeが未指定なら引数も未指定",
arg1: ExchangeTypeUnspecified,
arg2: time.Date(2021, 6, 30, 10, 0, 0, 0, time.Local),
want: SessionUnspecified},
{name: "現物で前場の時間なら前場",
arg1: ExchangeTypeStock,
arg2: time.Date(2021, 6, 30, 10, 0, 0, 0, time.Local),
want: SessionMorning},
{name: "現物で後場の時間なら後場",
arg1: ExchangeTypeStock,
arg2: time.Date(2021, 6, 30, 13, 0, 0, 0, time.Local),
want: SessionAfternoon},
{name: "現物で上記以外の時間なら未指定",
arg1: ExchangeTypeStock,
arg2: time.Date(2021, 6, 30, 12, 0, 0, 0, time.Local),
want: SessionUnspecified},
{name: "信用で前場の時間なら前場",
arg1: ExchangeTypeMargin,
arg2: time.Date(2021, 6, 30, 10, 0, 0, 0, time.Local),
want: SessionMorning},
{name: "信用で後場の時間なら後場",
arg1: ExchangeTypeMargin,
arg2: time.Date(2021, 6, 30, 13, 0, 0, 0, time.Local),
want: SessionAfternoon},
{name: "信用で上記以外の時間なら未指定",
arg1: ExchangeTypeMargin,
arg2: time.Date(2021, 6, 30, 12, 0, 0, 0, time.Local),
want: SessionUnspecified},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
clock := &clock{}
got := clock.getSession(test.arg1, test.arg2)
if !reflect.DeepEqual(test.want, got) {
t.Errorf("%s error\nwant: %+v\ngot: %+v\n", t.Name(), test.want, got)
}
})
}
}
|
package heap
import (
"fmt"
)
// Heap holds a heap array
type Heap struct {
arr []int
}
// New creates a new instance of heap
func New() *Heap {
return &Heap{
arr: make([]int, 0),
}
}
// Insert adds a new element to heap
func (h *Heap) Insert(data int) {
h.arr = append(h.arr, data)
size := len(h.arr)
i := size - 1
for i != 0 && h.arr[parent(i)] > h.arr[i] {
h.arr[parent(i)], h.arr[i] = h.arr[i], h.arr[parent(i)]
i = parent(i)
}
}
// DecreaseKey decrease a key from heap
func (h *Heap) DecreaseKey(key, val int) error {
if key >= len(h.arr) {
return fmt.Errorf("Key is beyond the length")
}
h.arr[key] = val
i := key
for i != 0 && h.arr[parent(i)] > h.arr[i] {
h.arr[parent(i)], h.arr[i] = h.arr[i], h.arr[parent(i)]
i = parent(i)
}
return nil
}
// DeleteKey deletes a key from heap
func (h *Heap) DeleteKey(key int) error {
if key >= len(h.arr) {
return fmt.Errorf("Key is beyond the length")
}
h.DecreaseKey(key, -1)
h.ExtractMin()
return nil
}
// ExtractMin removes the min element from heap
func (h *Heap) ExtractMin() int {
if len(h.arr) == 0 {
return 0
}
min := h.arr[0]
h.arr[0] = h.arr[len(h.arr)-1]
h.arr = h.arr[:len(h.arr)-1]
minHeapify(0, len(h.arr), &h.arr)
return min
}
// minHeapify creates a min heap
func minHeapify(current int, n int, arr *[]int) {
right := 2*current + 2
left := 2*current + 1
root := current
if left < n && (*arr)[left] < (*arr)[root] {
root = left
}
if right < n && (*arr)[right] < (*arr)[root] {
root = right
}
if root != current {
(*arr)[current], (*arr)[root] = (*arr)[root], (*arr)[current]
minHeapify(root, n, arr)
}
}
// parent returns the parent index of element
func parent(i int) int {
return (i - 1) / 2
}
|
package main
import (
"fmt"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
var (
treeCommand cliconfig.TreeValues
treeDescription = "Prints layer hierarchy of an image in a tree format"
_treeCommand = &cobra.Command{
Use: "tree [flags] IMAGE",
Short: treeDescription,
Long: treeDescription,
RunE: func(cmd *cobra.Command, args []string) error {
treeCommand.InputArgs = args
treeCommand.GlobalFlags = MainGlobalOpts
treeCommand.Remote = remoteclient
return treeCmd(&treeCommand)
},
Example: "podman image tree alpine:latest",
}
)
func init() {
treeCommand.Command = _treeCommand
treeCommand.SetUsageTemplate(UsageTemplate())
treeCommand.Flags().BoolVar(&treeCommand.WhatRequires, "whatrequires", false, "Show all child images and layers of the specified image")
}
func treeCmd(c *cliconfig.TreeValues) error {
args := c.InputArgs
if len(args) == 0 {
return errors.Errorf("an image name must be specified")
}
if len(args) > 1 {
return errors.Errorf("you must provide at most 1 argument")
}
runtime, err := adapter.GetRuntime(getContext(), &c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.DeferredShutdown(false)
tree, err := runtime.ImageTree(c.InputArgs[0], c.WhatRequires)
if err != nil {
return err
}
fmt.Print(tree)
return nil
}
|
package main
import (
"fmt"
"time"
)
func main() {
c1 := make(chan string)
c2 := make(chan string)
go speed1(c1)
go speed2(c2)
fmt.Println("The first to arrive is:")
// select 在没有default分支情况时,会阻塞 直到有一分支满足条件
select {
case s1 := <-c1:
fmt.Println(s1)
case s2 := <-c2:
fmt.Println(s2)
//default:
// fmt.Println("hello")
}
}
func speed1(ch chan string) {
time.Sleep(1 * time.Second)
ch <- "speed 1"
}
func speed2(ch chan string) {
time.Sleep(1 * time.Second)
ch <- "speed 2"
}
|
package gogit
import (
"encoding/json"
"fmt"
"github.com/NavenduDuari/goinfo/gogit/utils"
)
func getCodeFrequency(userName string) []utils.CodeFreqStruct {
repos := getRepos(userName)
fmt.Println("Repos => ", len(repos))
var codeFreqs []utils.CodeFreqStruct
go func() {
for _, repo := range repos {
codeFrequencyURL := getCodeFrequencyURL(userName, repo.Name)
fmt.Println("LOC URL: ", codeFrequencyURL)
go getInfo(codeFrequencyURL)
}
}()
for i := 1; i <= len(repos); i++ {
rawCodeFreq, ok := <-utils.RawInfo
if !ok {
continue
}
var codeFreq utils.CodeFreqStruct
json.Unmarshal([]byte(rawCodeFreq), &codeFreq)
codeFreqs = append(codeFreqs, codeFreq)
}
return codeFreqs
}
func GetLOC(userName string) int64 {
var totalLOC int64
codeFreqs := getCodeFrequency(userName)
for _, codeFreq := range codeFreqs {
for _, weeklyArr := range codeFreq {
if len(weeklyArr) == 0 {
continue
}
totalLOC = totalLOC + weeklyArr[1] + weeklyArr[2]
}
}
// utils.GetLOC <- totalLOC
return totalLOC
}
|
package kubemq_queue
import (
"fmt"
"time"
"github.com/batchcorp/plumber-schemas/build/go/protos/opts"
"github.com/pkg/errors"
"github.com/batchcorp/plumber-schemas/build/go/protos/records"
"github.com/batchcorp/plumber/printer"
)
func (k *KubeMQ) DisplayMessage(cliOpts *opts.CLIOptions, msg *records.ReadRecord) error {
if err := validateReadRecord(msg); err != nil {
return errors.Wrap(err, "unable to validate read record")
}
record := msg.GetKubemq()
properties := [][]string{
{"Message ID", record.Id},
{"Client ID", record.ClientId},
{"Channel", record.Channel},
{"Sequence", fmt.Sprintf("%d", record.Sequence)},
}
receivedAt := time.Unix(msg.ReceivedAtUnixTsUtc, 0)
printer.PrintTable(cliOpts, msg.Num, receivedAt, msg.Payload, properties)
return nil
}
// DisplayError will parse an Error record and print (pretty) output to STDOUT
func (k *KubeMQ) DisplayError(msg *records.ErrorRecord) error {
printer.DefaultDisplayError(msg)
return nil
}
func validateReadRecord(msg *records.ReadRecord) error {
if msg == nil {
return errors.New("msg cannot be nil")
}
if msg.GetKubemq() == nil {
return errors.New("KubeMQ message cannot be nil")
}
return nil
}
|
package main
import db "test/database"
func main() {
defer db.SqlDB.Close()
router := initRouter()
router.Run(":8000")
}
|
package command
import (
"TskSch/msgQ"
"github.com/garyburd/redigo/redis"
"net/http"
"fmt"
"io/ioutil"
"runtime/debug"
"TskSch/mailer"
)
//SEARCHING FOR COMMAND BASED ON THE ID POPED FROM MSG QUEUE
func Search(c redis.Conn, cmd_id string,managerPath string, host string,name string) string {
s := "http://"+managerPath+"/askCommand?cmdId=" + cmd_id+":"+host+"&agentName="+name
res, err := http.Get(s)
if err!=nil{
fmt.Println("CAN'T CONNECT TO SCHEDULER TO GET THE TASK_CMD OF GIVEN TASK ID")
mailer.Mail("GOSERVE: Unable to connect to the MANAGER", "Unable to establish connection with the Scheduer to get the command \n\n"+ err.Error()+"\n\nStack Trace: --------------------\n\n\n"+string(debug.Stack()))
return ""
}
body , _ := ioutil.ReadAll(res.Body)
if string(body) == "" {
msgQ.Push(c, cmd_id)
return ""
}else{
return string(body)
}
}
|
package _34_Find_First_and_Last_Position_of_Element_in_Sorted_Array
import (
"fmt"
"testing"
)
func TestSearchRange(t *testing.T) {
nums := []int{5, 7, 7, 8, 8, 10}
target := 8
fmt.Println(searchRange(nums, target))
}
func TestFindFirst(t *testing.T) {
nums := []int{5, 7, 7, 8, 8, 10}
target := 8
fmt.Println(FindFirst(nums, target, 0, len(nums)-1, -1))
}
func TestFindLast(t *testing.T) {
nums := []int{5, 7, 7, 8, 8, 10}
target := 8
fmt.Println(FindLast(nums, target, 0, len(nums)-1, -1))
}
|
package tgo
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
)
//获取配置文件优先级 mount_configs > configs
func configGet(name string, data interface{}, defaultData interface{}) (err error) {
absPath := getConfigPath(name)
var file *os.File
file, err = os.Open(absPath)
if err != nil {
UtilLogError(fmt.Sprintf("open %s config file failed:%s", name, err.Error()))
data = defaultData
return
} else {
defer file.Close()
decoder := json.NewDecoder(file)
err = decoder.Decode(data)
if err != nil {
//记录日志
UtilLogError(fmt.Sprintf("decode %s config error:%s", name, err.Error()))
data = defaultData
return
}
}
return
}
func getConfigPath(name string) (absPath string) {
var (
path string
err error
)
path = fmt.Sprintf("mount_configs/%s.json", name)
_, err = os.Stat(path)
if err != nil && os.IsNotExist(err) {
absPath, _ = filepath.Abs(fmt.Sprintf("configs/%s.json", name))
} else {
absPath, _ = filepath.Abs(fmt.Sprintf("mount_configs/%s.json", name))
}
return
}
func configPathExist(name string) bool {
var (
path string
err error
)
path = fmt.Sprintf("mount_configs/%s.json", name)
_, err = os.Stat(path)
if err != nil && os.IsNotExist(err) {
path = fmt.Sprintf("configs/%s.json", name)
} else {
return true
}
_, err = os.Stat(path)
if err != nil && os.IsNotExist(err) {
return false
}
return true
}
func ConfigReload() {
configAppClear()
configCacheReload()
configCodeClear()
configDbClear()
}
|
package main
import (
"bytes"
"go/format"
"regexp"
"strings"
"testing"
)
func TestGenerator_ReadFile(t *testing.T) {
g := newGenerator()
g.ReadFile("test/file1.go")
p := g.Package
if p.Name != "main" || p.Dir != "test" || p.File != "file1.go" || len(p.Structs) != 1 {
t.Errorf("invalid package: %v", p)
} else {
t.Logf("valid package: %v", p)
}
s := p.Structs[0]
if s.Name != "Pill" || len(s.Fields) != 3 {
t.Errorf("invalid struct: %v", s)
} else {
t.Logf("valid struct: %v", s)
}
fieldSamples := []struct {
Name string
Type string
Tag string
Conds map[string]string
}{
{"Name", "string", "required", map[string]string{"required": ""}},
{"Color", "int64", "required", map[string]string{"required": ""}},
{"Amount", "uint8", "min=1,max=100,default=1", map[string]string{"min": "1", "max": "100", "default": "1"}},
}
for i, sample := range fieldSamples {
f := s.Fields[i]
if f.Name != sample.Name || f.Type != sample.Type || f.Tag != sample.Tag {
t.Errorf("invalid field: %v", f)
} else {
t.Logf("valid field: %v", f)
}
for _, c := range f.Conds {
if v, ok := sample.Conds[c.Name]; !ok || c.Value != v {
t.Errorf("invalid cond: %v", c)
} else {
t.Logf("valid cond: %v", c)
}
}
}
}
func TestGenerator_Generate(t *testing.T) {
g := newGenerator()
g.ReadFile("test/file1.go")
src, e := g.Generate([]string{"Pill"})
if e != nil {
t.Error(e)
}
t.Logf("%s", src)
expect, _ := format.Source([]byte(`
// Code generated by "argumenter -type Pill"; DO NOT EDIT
package main
import "errors"
func (p *Pill) Valid() error {
if p.Name == "" {
return errors.New("Name must not \"\"")
}
if p.Color == 0 {
return errors.New("Color must not 0")
}
if p.Amount < 1 {
return errors.New("Amount must greater than or equal 1")
}
if p.Amount > 100 {
return errors.New("Amount must less than or equal 100")
}
if p.Amount == 0 {
p.Amount = 1
}
return nil
}
`))
if bytes.Compare(src, expect) != 0 {
t.Errorf("not match: %s, %s", string(src), string(expect))
}
}
func TestGenerator_SelectStructs(t *testing.T) {
p := packageDecl{
Structs: []structDecl{
structDecl{Name: "Dog"},
structDecl{Name: "Cat"},
structDecl{Name: "Bird"},
},
}
g := &generator{Package: p}
ss := g.SelectStructs([]string{"Cat"})
if len(ss) != 1 || ss[0].Name != "Cat" {
t.Error("error", ss)
}
ss = g.SelectStructs([]string{"Cat", "Bird"})
if len(ss) != 2 || ss[0].Name != "Cat" || ss[1].Name != "Bird" {
t.Error("error", ss)
}
}
func TestStructDecl_Generate(t *testing.T) {
s := structDecl{
Name: "MyStruct",
Fields: []fieldDecl{
newFieldDecl("N", "int", "default=1,min=0,max=1"),
newFieldDecl("S", "string", "required"),
newFieldDecl("B", "bool", "required"),
},
}
w := new(bytes.Buffer)
e := s.Generate(w)
if e != nil {
t.Error("error: ", e)
} else {
t.Logf("%v", w.String())
}
}
func TestFieldDecl_Generate(t *testing.T) {
samples := []struct {
Name, Type, Tag, Out string
}{
{"N", "int", "default=1", `if self.N == 0 { self.N = 1 }`},
{"N", "uint", "default=1", `if self.N == 0 { self.N = 1 }`},
{"N", "float32", "default=1", `if self.N == 0 { self.N = 1 }`},
{"B", "bool", "default=true", `if self.B == false { self.B = true }`},
{"S", "string", "default=hello", `if self.S == "" { self.S = "hello" }`},
{"F", "func()", "default=func(){}", `if self.F == nil { self.F = func(){} }`},
{"S", "[]int", "default=[]int{}", `if self.S == nil { self.S = []int{} }`},
{"M", "map[int]bool", "default=make(map[int]bool)", `if self.M == nil { self.M = make(map[int]bool) }`},
{"D", "*Dog", "default=&Dog{}", `if self.D == nil { self.D = &Dog{} }`},
{"I", "interface{}", "default=0", `if self.I == nil { self.I = 0 }`},
{"A", "[2]int", `default=[2]int{}`, `if self.A == nil { self.A = [2]int{} }`},
{"List", "[]int", "required", `if self.List == nil { return errors.New("List must not nil") }`},
{"List", "[]int", "zero", `if self.List != nil { return errors.New("List must nil") }`},
{"N", "int", "min=0", `if self.N < 0 { return errors.New("N must greater than or equal 0") }`},
{"N", "int", "max=100", `if self.N > 100 { return errors.New("N must less than or equal 100") }`},
{"N", "int", "gt=0", `if self.N <= 0 { return errors.New("N must greater than 0") }`},
{"N", "int", "lt=100", `if self.N >= 100 { return errors.New("N must less than 100") }`},
{"List", "[]int", "len=2", `if len(self.List) != 2 { return errors.New("List length must 2") }`},
{"List", "[]int", "lenmin=1", `if len(self.List) < 1 { return errors.New("List length must greater than or equal 1") }`},
{"List", "[]int", "lenmax=10", `if len(self.List) > 10 { return errors.New("List length must less than or equal 10") }`},
}
w := new(bytes.Buffer)
for _, sample := range samples {
f := newFieldDecl(sample.Name, sample.Type, sample.Tag)
e := f.Generate(w, "self")
if e != nil {
t.Errorf("error: %v", e)
}
re := regexp.MustCompile(`[\s\n]+`)
out := re.ReplaceAllString(w.String(), " ")
out = strings.Trim(out, " ")
if out != sample.Out {
t.Errorf("not match:\nEXPECT:\t%v\nOUT:\t%v", sample.Out, out)
} else {
t.Logf("match: %v", out)
}
w.Reset()
}
}
|
package main
import (
"log"
"context"
"bufio"
"fmt"
"os"
"strings"
pb "../proto"
"google.golang.org/grpc"
)
func conectarNodo(ip string, port string) *grpc.ClientConn {
var conn *grpc.ClientConn
log.Printf("Intentando iniciar conexión con " + ip + ":" + port)
host := ip + ":" + port
conn, err := grpc.Dial(host, grpc.WithInsecure())
if err != nil {
log.Fatalf("did not connect: %s", err)
}
return conn
}
func main() {
log.Printf("= INICIANDO CLIENTE =\n")
conn := conectarNodo("127.0.0.1", "9000")
c := pb.NewServicioNodoClient(conn)
//log.Printf("Conectado al nodo " + ip + ":" + port)
estado, err := c.ObtenerEstado(context.Background(), new(pb.Vacio))
if err != nil {
log.Fatalf("Error al llamar a ObtenerEstado(): %s", err)
}
log.Printf("Estado del nodo seleccionado: " + estado.Estado)
//Receive Command
reader := bufio.NewReader(os.Stdin)
for {
fmt.Print("-> ")
text, _ := reader.ReadString('\n')
// convert CRLF to LF
text = strings.Replace(text, "\n", "", -1)
words := strings.Split(text, " ")
if strings.Compare("get", words[0]) == 0 {
fmt.Println("get")
} else {
fmt.Println("Usage:\n get")
}
}
}
|
// Copyright 2015-2016 Sevki <s@sevki.org>. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cc
import (
"fmt"
"strings"
"bldy.build/build/executor"
"bldy.build/build/racy"
"path/filepath"
)
type CLib struct {
Name string `cxx_library:"name" cc_library:"name"`
Sources []string `cxx_library:"srcs" cc_library:"srcs" build:"path" ext:".c,.S,.cpp,.cc"`
Dependencies []string `cxx_library:"deps" cc_library:"deps"`
Includes []string `cxx_library:"headers" cc_library:"includes" build:"path" ext:".h,.c,.S"`
Headers []string `cxx_library:"exported_headers" cc_library:"hdrs" build:"path" ext:".h,.c,.S"`
CompilerOptions []string `cxx_library:"compiler_flags" cc_library:"copts"`
LinkerOptions []string `cxx_library:"linker_flags" cc_library:"linkopts"`
LinkStatic bool `cxx_library:"linkstatic" cc_library:"linkstatic"`
AlwaysLink bool `cxx_library:"alwayslink" cc_library:"alwayslink"`
}
func (cl *CLib) Hash() []byte {
r := racy.New(
racy.AllowExtension(".h"),
racy.AllowExtension(".S"),
racy.AllowExtension(".c"),
)
r.HashStrings(CCVersion, cl.Name, "clib")
if cl.LinkStatic {
r.HashStrings("static")
}
r.HashStrings(cl.CompilerOptions...)
r.HashStrings(cl.LinkerOptions...)
r.HashFiles(cl.Sources...)
r.HashFiles([]string(cl.Includes)...)
return r.Sum(nil)
}
func (cl *CLib) Build(e *executor.Executor) error {
params := []string{"-c"}
params = append(params, cl.CompilerOptions...)
params = append(params, cl.LinkerOptions...)
params = append(params, includes(cl.Includes)...)
params = append(params, cl.Sources...)
if err := e.Exec(Compiler(), CCENV, params); err != nil {
return fmt.Errorf(err.Error())
}
libName := fmt.Sprintf("%s.a", cl.Name)
params = []string{"-rs", libName}
params = append(params, cl.LinkerOptions...)
// This is done under the assumption that each src file put in this thing
// here will comeout as a .o file
for _, f := range cl.Sources {
_, filename := filepath.Split(f)
params = append(params, fmt.Sprintf("%s.o", filename[:strings.LastIndex(filename, ".")]))
}
if err := e.Exec(Archiver(), CCENV, params); err != nil {
return fmt.Errorf(err.Error())
}
return nil
}
func (cl *CLib) Installs() map[string]string {
exports := make(map[string]string)
libName := fmt.Sprintf("%s.a", cl.Name)
if cl.AlwaysLink {
exports[libName] = libName
} else {
exports[filepath.Join("lib", libName)] = libName
}
return exports
}
func (cl *CLib) GetName() string {
return cl.Name
}
func (cl *CLib) GetDependencies() []string {
return cl.Dependencies
}
|
package main
import (
"bufio"
"fmt"
"log"
"os"
"strings"
)
var badPairs = []string{"ab", "cd", "pq", "xy"}
var vowels = []string{"a", "e", "i", "o", "u"}
func main() {
file, err := os.Open("../input")
if err != nil {
log.Fatalln("Cannot read file", err)
}
defer file.Close()
scanner := bufio.NewScanner(file)
count := 0
for scanner.Scan() {
s := scanner.Text()
if isNiceString(s) {
count++
}
}
fmt.Println(count)
}
func isNiceString(s string) bool {
if contains(s, badPairs...) {
return false
}
if !hasDoubleLetter(s) {
return false
}
if !hasAtLeastThreeVowels(s) {
return false
}
return true
}
func contains(s string, tokens ...string) bool {
for _, token := range tokens {
if strings.Contains(s, token) {
return true
}
}
return false
}
func hasDoubleLetter(s string) bool {
for _, letter := range s {
sLetter := string(letter)
double := sLetter + sLetter
if strings.Contains(s, double) {
return true
}
}
return false
}
func hasAtLeastThreeVowels(s string) bool {
sliceContains := func(s string, coll ...string) bool {
for _, elem := range coll {
if string(elem) == s {
return true
}
}
return false
}
count := 0
for _, letter := range s {
if sliceContains(string(letter), vowels...) {
count++
}
if count == 3 {
return true
}
}
return false
}
|
package task
import (
"bankBigData/BankServerJournal/db/query"
"bankBigData/BankServerJournal/entity"
"bankBigData/BankServerJournal/excel"
"bankBigData/_public/util"
"gitee.com/johng/gf/g"
"gitee.com/johng/gf/g/util/gconv"
"github.com/360EntSecGroup-Skylar/excelize"
)
var iStartPage = 2
const infoSheetName = "Sheet1" //
const infoRowNum = 35 // 一行要写入的字段数
var infoColNum = 6 // 起始列
func PageUser(page int) g.List {
limit := 100
offset := (page - 1) * limit
if page > 1 {
offset += 1
}
allUser, _ := db_query_loan.QueryPageUserInfo_Pt(offset, limit)
return allUser
}
func writeInfoFile(xlsx *excelize.File, pageUser g.List) bool {
if len(pageUser) < 1 {
return true
}
for _, v := range pageUser {
item := entity.Customer_info{}
if ok := gconv.Struct(v, &item); ok == nil {
iWriteInfoFileLine(item, xlsx)
}
}
iStartPage += 1
pageUser = PageUser(iStartPage)
return writeInfoFile(xlsx, pageUser)
}
func iWriteInfoFileLine(userInfo entity.Customer_info, xlsx *excelize.File) {
lineData := [infoRowNum]interface{}{}
tempUserInfo, _ := db_query_loan.GetUserInfo_buss(userInfo.Id)
// 业务经理录入的信息覆盖用户自己的信息
if tempUserInfo.CustomerId != 0 {
userInfo = tempUserInfo
userInfo.Id = userInfo.CustomerId
}
// 序号
lineData[0] = (infoColNum - 6) + 1
iLoadUserInfoToLineArr(&lineData, userInfo)
iLoadUserInfoJobToLineArr(&lineData, userInfo)
iLoadUserLoanToLineArr(&lineData, userInfo)
iLoadUserBankToLineArr(&lineData, userInfo)
iLoadUserFilingToLineArr(&lineData, userInfo)
for y := 0; y < infoRowNum; y++ {
excel.ModifyExcelCellByAxis(xlsx, infoSheetName, excel.ChangIndexToAxis(infoColNum, y), lineData[y])
}
infoColNum += 1
}
func iLoadUserInfoToLineArr(lineData *[infoRowNum]interface{}, userInfo entity.Customer_info) {
// 基本信息
lineData[1] = userInfo.Name
lineData[2] = userInfo.Gender
lineData[4] = userInfo.PhoneNumber
lineData[5] = userInfo.IdentityCard
lineData[6] = userInfo.City
lineData[7] = userInfo.County
lineData[8] = userInfo.Town
lineData[9] = userInfo.Village
// 政治面貌
IsPartyMember := ""
if userInfo.IsPartyMember == -1 {
IsPartyMember = "否"
} else if userInfo.IsPartyMember == 1 {
IsPartyMember = "是"
}
PartyMemberTime := ""
if userInfo.PartyMemberTime != 0 {
// todo 格式化日期
}
lineData[18] = IsPartyMember
lineData[19] = PartyMemberTime
lineData[20] = userInfo.PartyMemberAddr
}
// 务工
func iLoadUserInfoJobToLineArr(lineData *[infoRowNum]interface{}, userInfo entity.Customer_info) {
newData, _ := db_query_loan.GetUserInfo_buss_job(userInfo.Id)
if newData.CustomerId == 0 {
newData, _ = db_query_loan.GetUserInfoJob(userInfo.Id)
}
lineData[10] = newData.Province
lineData[11] = newData.City
lineData[12] = newData.County
lineData[13] = newData.Town
lineData[14] = newData.Village
lineData[15] = ""
lineData[16] = newData.Industry
lineData[17] = newData.CorporateName
}
// 辖内农信社信息
func iLoadUserBankToLineArr(lineData *[infoRowNum]interface{}, userInfo entity.Customer_info) {
data, _ := db_query_loan.GetOrgDepartmentInfo(userInfo.Id)
sonBank := data.DepartmentName
parentBank := data.DepartmentName
if data.ParentId == 0 {
sonBank = ""
} else {
parentBank = ""
}
lineData[30] = parentBank
lineData[31] = sonBank
lineData[32] = data.TelNumber
}
// 建档、评级信息
func iLoadUserFilingToLineArr(lineData *[infoRowNum]interface{}, userInfo entity.Customer_info) {
data, _ := db_query_loan.GetPtCustomerInfoFiling(userInfo.Id)
is_filing := ""
is_rate := ""
if data.IsFiling == -1 {
is_filing = "否"
} else if data.IsFiling == 1 {
is_filing = "是"
}
if data.IsRate == -1 {
is_rate = "否"
} else if data.IsRate == 1 {
is_rate = "是"
}
lineData[21] = is_filing
lineData[22] = is_rate
lineData[23] = data.CreditRating
lineData[24] = data.CreditLimit
}
// 贷款
func iLoadUserLoanToLineArr(lineData *[infoRowNum]interface{}, userInfo entity.Customer_info) {
data := entity.LoanInfo{}
if util.DayReg.MatchString(userInfo.IdentityCard) {
data, _ = db_query_loan.QueryByIdCardLast(userInfo.IdentityCard, "xd_col4,xd_col5,xd_col6,xd_col7,xd_col18")
}
lineData[25] = data.XdCol4
lineData[26] = data.XdCol5
lineData[27] = data.XdCol6
lineData[28] = data.XdCol7
lineData[29] = data.XdCol18
}
|
package main
import (
"encoding/json"
"fmt"
)
type response1 struct {
Page int
Fruits []string
}
type resposne2 struct {
Page int `json:"page"`
Fruits []string `json:"fruits"`
}
func main() {
res1D := &response1{
Page: 1,
Fruits: []string{"apple", "peach", "pear"},
}
res1B, _ := json.Marshal(res1D)
fmt.Println(string(res1B))
res2D := &resposne2{
Page: 1,
Fruits: []string{"apple", "peach", "pear"},
}
res2B, _ := json.Marshal(res2D)
fmt.Println(string(res2B))
str := `{"page": 1, "fruits": ["apple", "peach"]}`
res := resposne2{}
json.Unmarshal([]byte(str), &res)
fmt.Println(res)
fmt.Printf("%+v\n", res)
}
|
package main
import (
"bufio"
"fmt"
"io/ioutil"
"os"
)
//好像需要go build操作
//文件写操作 os.OpenFile
//func OpenFile(name string,flg int,perm FileMode)(*File,error){ perm FileMode是文件权限
//}
//os.O_CREATE表示没有该文件会创建该文件,os.O_APPEND表示直接在以前的基础上面添加新内容 0644表示八进制的权限
//os.O_TRUNC 表示每次写都清理之前的文件
func writedem1(){
fileobj,err:=os.OpenFile("./test.txt",os.O_WRONLY|os.O_CREATE|os.O_TRUNC,0644)
if err!=nil{
fmt.Printf("open file failed!,err:%v",err)
return
}
//write
fileobj.Write([]byte("hello world!\n"))
fileobj.WriteString("你好 world!\n")
fileobj.Close()
}
//buifo进行写
func writedemo2(){
fileobj,err:=os.OpenFile("./test.txt",os.O_WRONLY|os.O_CREATE|os.O_TRUNC,0644)
if err!=nil{
fmt.Printf("open file failed!,err:%v",err)
return
}
defer fileobj.Close()
writer:=bufio.NewWriter(fileobj) //创建一个写的对象
writer.WriteString("hello world! buifo进行写 \n") //将数据写入缓存
writer.Flush() //将缓存的内容写入文件
}
//通过ioutil进行写
func writedemo3(){
str:="通过ioutil进行写"
err:=ioutil.WriteFile("./test.txt",[]byte(str),0666)
if err!=nil{
fmt.Printf("write file failed!,err:%v",err)
return
}
}
func main(){
//writedem1()
//writedemo2()
writedemo3()
}
|
package trylock_test
import (
"testing"
"github.com/chappjc/trylock"
)
func TestExample(t *testing.T) {
var mu trylock.Mutex
t.Log(mu.TryLock())
t.Log(mu.TryLock())
mu.Unlock()
t.Log(mu.TryLock())
// Output:
// true
// false
// true
}
|
/*
Package coinbase_api is a Go interface to the CoinBase.com
API. It may be used to design automated Bitcoin trading
systems.
Currently implemented endpoints:
* get account balance
* get receive address
* get exchange rate
* purchase bitcoins
* sell bitcoins
The global variable `ApiKey` is used to store your Coinbase API
key. If this is empty, attempts to make authenticated requests
will result in an ErrNotAuthenticated error being returned.
*/
/*
Copyright (c) 2013 Kyle Isom <kyle@tyrfingr.is>
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package coinbase_api
|
package main //nolint:testpackage
import (
"net/http"
"net/http/httptest"
"strings"
"testing"
)
const staticTestPath = "headlamp_testdata/static_files/"
// Is supposed to return the index.html if there is no static file.
func TestSpaHandlerMissing(t *testing.T) {
req, err := http.NewRequest("GET", "/headlampxxx", nil)
if err != nil {
t.Fatal(err)
}
rr := httptest.NewRecorder()
handler := spaHandler{staticPath: staticTestPath, indexPath: "index.html", baseURL: "/headlamp"}
handler.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
t.Errorf("handler returned wrong status code: got %v want %v",
status, http.StatusOK)
}
indexExpected := "The index."
if !strings.HasPrefix(rr.Body.String(), indexExpected) {
t.Errorf("handler returned unexpected body: got :%v: want :%v:",
rr.Body.String(), indexExpected)
}
}
// Works with a baseURL to get the index.html.
func TestSpaHandlerBaseURL(t *testing.T) {
req, err := http.NewRequest("GET", "/headlamp/", nil)
if err != nil {
t.Fatal(err)
}
rr := httptest.NewRecorder()
handler := spaHandler{staticPath: staticTestPath, indexPath: "index.html", baseURL: "/headlamp"}
handler.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
t.Errorf("handler returned wrong status code: got %v want %v",
status, http.StatusOK)
}
indexExpected := "The index."
if !strings.HasPrefix(rr.Body.String(), indexExpected) {
t.Errorf("handler returned unexpected body: got :%v: want :%v:",
rr.Body.String(), indexExpected)
}
}
// Works with a baseURL to get other files.
func TestSpaHandlerOtherFiles(t *testing.T) {
req, err := http.NewRequest("GET", "/headlamp/example.css", nil) //nolint:noctx
if err != nil {
t.Fatal(err)
}
rr := httptest.NewRecorder()
handler := spaHandler{staticPath: staticTestPath, indexPath: "index.html", baseURL: "/headlamp"}
handler.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
t.Errorf("handler returned wrong status code: got %v want %v",
status, http.StatusOK)
}
expectedCSS := ".somecss"
if !strings.HasPrefix(rr.Body.String(), expectedCSS) {
t.Errorf("handler returned unexpected body: got :%v: want :%v:",
rr.Body.String(), expectedCSS)
}
}
|
package cron
import (
"github.com/spf13/cobra"
)
func NewCronWorkflowCommand() *cobra.Command {
var command = &cobra.Command{
Use: "cron",
Short: "manage cron workflows",
Run: func(cmd *cobra.Command, args []string) {
cmd.HelpFunc()(cmd, args)
},
}
command.AddCommand(NewGetCommand())
command.AddCommand(NewListCommand())
command.AddCommand(NewCreateCommand())
command.AddCommand(NewDeleteCommand())
command.AddCommand(NewLintCommand())
command.AddCommand(NewSuspendCommand())
command.AddCommand(NewResumeCommand())
return command
}
|
package models
import (
"github.com/astaxie/beego/orm"
)
type CameraOnlineStat struct {
Id int `orm:"column(id);auto;pk"`
BeforeTime string `orm:"column(beforetime);size(32)"`
AfterTime string `orm:"column(aftertime);size(32)"`
OnlineNum int `orm:"column(onlinenumber)"`
OfflineNum int `orm:"column(offlinenumber)"`
}
func (t *CameraOnlineStat) TableName() string{
return "CameraOnlineStat"
}
func init() {
orm.RegisterModelWithPrefix("eag_",new(CameraOnlineStat))
}
func AddCamerOnlineStatData(beforetime string, aftertime string, onlinenumber int, offlinenumber int) (int64,error){
o := orm.NewOrm()
v := &CameraOnlineStat{BeforeTime:beforetime, AfterTime:aftertime, OnlineNum:onlinenumber,OfflineNum:offlinenumber}
id, err := o.Insert(v)
return id, err
}
|
package schema
import (
"github.com/facebook/ent/dialect"
"github.com/facebook/ent"
"github.com/facebook/ent/schema/field"
)
// UserAccount holds the schema definition for the UserAccount entity.
type UserAccount struct {
ent.Schema
}
// Mixin of the UserAccount.
func (UserAccount) Mixin() []ent.Mixin {
return []ent.Mixin{}
}
func (UserAccount) Config() ent.Config {
return ent.Config{
Table: "user_account",
}
}
// Fields of the UserAccount.
func (UserAccount) Fields() []ent.Field {
return []ent.Field{
field.Int64("id").Comment(`主键`),
field.String("user_identity").Optional().Comment(`用户标识`),
field.Int64("user").Optional().Comment(`用户账号`),
field.String("password").Default("").Comment(`用户登录密码`),
field.String("salt").Default("").Comment(`密码盐`),
field.Time("created_at").Optional().SchemaType(map[string]string{dialect.MySQL: "datetime"}).Comment(`创建时间`),
field.Time("updated_at").Optional().SchemaType(map[string]string{dialect.MySQL: "datetime"}).Comment(`更新时间`),
field.Time("deleted_at").Optional().SchemaType(map[string]string{dialect.MySQL: "datetime"}).Comment(`删除时间`),
}
}
// Edges of the UserAccount.
func (UserAccount) Edges() []ent.Edge {
return nil
}
|
package main
import "fmt"
func main() {
var avg, grade int
fmt.Scanf("%d", &grade)
fmt.Scanf("%d", &avg)
fmt.Println(avg*2 - grade)
}
|
package mid
import (
"net/http"
"github.com/Yangshuting/golang_model/lib"
"github.com/Yangshuting/golang_model/model"
"github.com/Yangshuting/golang_model/storage"
"github.com/labstack/echo"
"gopkg.in/mgo.v2/bson"
)
func AuthMid(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
session := c.QueryParam("session")
user, err := auth(c, session)
if err != nil {
return c.JSON(http.StatusBadRequest, lib.WXError(err.Error(), lib.STATUS_BAD_REQUEST))
}
lib.SetUser(c, user)
return next(c)
}
}
func auth(c echo.Context, session string) (*model.KuaiMaoUser, error) {
cc := c.Get("cc").(*lib.Cusctx)
uid, err := storage.GetRedis(session)
if err != nil {
return nil, err
}
user := model.FindByID(cc, bson.ObjectIdHex(uid))
return user, nil
}
|
package environment
import "github.com/go-playground/validator/v10"
type Sqlite struct {
DatabaseName string `envconfig:"SQLITE_DATABASE" validate:"required"`
}
func (s Sqlite) IsValid() bool {
return validator.New().Struct(s) == nil
}
|
package cli
import (
"flag"
"fmt"
"os"
"github.com/neil-berg/blockchain/blockchain"
)
// CLI is the command line interface shape
type CLI struct {
Chain *blockchain.Blockchain
}
func (cli *CLI) printUsage() {
fmt.Println("Error parsing CLI commands. \nCLI usage:")
fmt.Println("\taddblock --data <some data>")
fmt.Println("\tprintchain")
}
// Run starts the CLI
func (cli *CLI) Run() {
addBlockCmd := flag.NewFlagSet("addblock", flag.ExitOnError)
addBlockData := addBlockCmd.String("data", "", "String of the block data")
printChainCmd := flag.NewFlagSet("printchain", flag.ExitOnError)
if len(os.Args) == 1 {
cli.printUsage()
return
}
switch os.Args[1] {
case "addblock":
addBlockCmd.Parse(os.Args[2:])
case "printchain":
printChainCmd.Parse(os.Args[2:])
default:
cli.printUsage()
// os.Exit(1)
}
if addBlockCmd.Parsed() {
if *addBlockData == "" {
addBlockCmd.Usage()
os.Exit(1)
}
cli.addBlock(*addBlockData)
}
if printChainCmd.Parsed() {
cli.printChain()
}
}
func (cli *CLI) addBlock(data string) {
err := cli.Chain.AddBlock(data)
if err != nil {
fmt.Println("Failed to add block")
}
fmt.Printf("Successfully added %s", data)
}
func (cli *CLI) printChain() {
iterator := cli.Chain.GetNewIterator()
for {
block := iterator.Next()
isGenesis := len(block.PrevHash) == 0
if isGenesis {
fmt.Println("======== GENESIS ========")
} else {
fmt.Println("======== BLOCK ========")
}
fmt.Printf("Timestamp:\t %v\n", block.Timestamp)
fmt.Printf("Data:\t\t %s\n", block.Data)
fmt.Printf("Hash:\t\t %x\n", block.Hash)
fmt.Printf("Previous hash:\t %x\n", block.PrevHash)
fmt.Printf("Nonce: \t\t %d\n", block.Nonce)
if isGenesis {
break
}
}
}
|
package main
import (
"context"
"database/sql"
"log"
"time"
_ "github.com/go-sql-driver/mysql"
)
func main() {
db, err := sql.Open("mysql", "root:root@tcp(db:3306)/testDB")
if err != nil {
log.Fatal("error in connecting to DB:", err)
}
//db.SetConnMaxLifetime()
db.SetMaxOpenConns(100)
defer db.Close()
err = db.Ping()
if err != nil {
log.Fatalf("error in db PING, err: %v", err)
}
ctx, cancelFn := context.WithTimeout(context.Background(), 10*time.Second)
defer cancelFn()
tx, err := db.BeginTx(ctx, nil)
if err != nil {
log.Fatalf("error in beginning transaction, err: %v", err)
}
_, err = tx.Exec("INSERT INTO Person(Name, Age) VALUES(\"dicky\", 12)")
if err != nil {
errRollback := tx.Rollback()
if errRollback != nil {
log.Fatalf("error rollback, err: %v", errRollback)
}
log.Printf("rollback success for INSERT INTO Person")
return
}
_, err = tx.Exec("INSERT INTO Person(Name, Age) VALUES(\"dicky\", 13)")
if err != nil {
errRollback := tx.Rollback()
if errRollback != nil {
log.Fatalf("error rollback, err: %v", errRollback)
}
log.Printf("rollback success for INSERT INTO Man")
return
}
err = tx.Commit()
if err != nil {
errRollback := tx.Rollback()
if errRollback != nil {
log.Fatalf("error rollback, err: %v", errRollback)
}
log.Printf("rollback success for Commit")
return
}
}
|
package logger
/**
* Created by Zf_D on 2015-02-28
*/
import (
"fmt"
"log"
"os"
"runtime"
"strings"
"strconv"
"time"
"sync"
"io"
)
const (
_ = iota //日志等级
Lv_Debug //1
Lv_Info //2
Lv_Warn //3
Lv_Error //4
)
const (
_ = int64(1) << (iota * 10) //大小
KB //1024
MB //1048576
GB //1073741824
TB //1099511627776
)
const DateFormat = "2006-01-02 15-04-05"
var (
projectPath = GetProjectPath() //项目路径
fileDir string = "log" //文件夹
fileName string = "log.log" //文件名
consoleFlag bool = true //是否输出控制台
logLevel int = Lv_Debug //日志等级
backupType int = 1 //1为按大小备份,2为按时间备份
backupSize int64 = 1 * MB //备份大小
logFile *os.File //日志文件
logger *log.Logger //日志对象
lock *sync.RWMutex = new(sync.RWMutex) //锁
)
//设置文件名及路径
func SetFilePath(arg_fileDir string, arg_fileName string) {
if arg_fileDir != "" {
fileDir = arg_fileDir
}
if arg_fileName != "" {
fileName = arg_fileName
logger = getLogger()
}
}
//设置等级
func SetLogLevel(arg_logLevel int) {
if arg_logLevel >= Lv_Debug && arg_logLevel <= Lv_Error {
logLevel = arg_logLevel
}
}
//是否输出控制台
func SetConsoleFlag(arg_consoleFlag bool) {
consoleFlag = arg_consoleFlag
}
//获取项目路径
func GetProjectPath() string {
path, _ := os.Getwd()
return strings.SplitN(path, "src", -1)[0]
}
func getLogger() *log.Logger {
logFileDir := projectPath + "\\" + fileDir
logFilePath := logFileDir + "\\" + fileName
//创建文件夹
err := os.MkdirAll(logFileDir, os.ModePerm)
if err != nil {
log.Fatal(err.Error())
return nil
}
//打开或创建文件
logFile, err = os.OpenFile(logFilePath, os.O_RDWR|os.O_APPEND|os.O_CREATE, os.ModePerm)
if err != nil {
log.Fatal(err.Error())
return nil
}
return log.New(logFile, "", log.Ldate|log.Ltime|log.Lshortfile)
}
func console(s string) {
if consoleFlag {
_, file, line, _ := runtime.Caller(2)
file = file[strings.LastIndex(file, "/")+1:]
fmt.Println(file+":"+strconv.Itoa(line), s)
}
}
func output(level int, suffix string, v []interface{}) {
if logLevel <= level {
lock.Lock()
defer lock.Unlock()
if logger == nil {
logger = getLogger()
}
s := suffix
for i := 0; i < len(v); i++ {
s += " " + fmt.Sprintf("%v", v[i])
}
logger.Output(2, s)
console(s)
}
}
func Debug(v ...interface{}) {
output(Lv_Debug, "[D]", v)
}
func Info(v ...interface{}) {
output(Lv_Info, "[I]", v)
}
func Warn(v ...interface{}) {
output(Lv_Warn, "[W]", v)
}
func Error(v ...interface{}) {
output(Lv_Error, "[E]", v)
}
//设置按大小备份
func SetSizeBackup(tempSec int, size int64) {
backupType = 1
backupSize = size
if tempSec > 0 {
go func() {
timer := time.NewTicker((time.Duration)(tempSec) * time.Second)
for {
select {
case <-timer.C:
if 1 != backupType {
return
}
if fileSize() >= backupSize {
backup()
}
}
}
}()
}
}
//设置按时间备份
func SetDailyBackup(tempSec int) {
backupType = 2
if tempSec > 0 {
go func() {
timer := time.NewTicker((time.Duration)(tempSec) * time.Second)
for {
select {
case <-timer.C:
if 2 != backupType {
return
}
backup()
}
}
}()
}
}
func fileSize() int64 {
logFilePath := projectPath + "\\" + fileDir + "\\" + fileName
fileInfo, err := os.Stat(logFilePath)
if err != nil {
log.Fatal(err.Error())
return 0
}
return fileInfo.Size()
}
func copyFile(fromFilePath string, toFilePath string) {
fromFile, err := os.Open(fromFilePath)
if err != nil {
log.Fatal(err.Error())
}
defer fromFile.Close()
toFile, err := os.Create(toFilePath)
if err != nil {
log.Fatal(err.Error())
}
defer toFile.Close()
_, err = io.Copy(toFile, fromFile)
if err != nil {
log.Fatal(err.Error())
}
}
func backup() {
lock.Lock()
defer lock.Unlock()
logFileDir := projectPath + "\\" + fileDir
backupDir := logFileDir + "\\backup"
logFilePath := logFileDir + "\\" +fileName
backupFilePath := backupDir + "\\" + time.Now().Format(DateFormat) + ".log"
//创建文件夹
err := os.MkdirAll(backupDir, os.ModePerm)
if err != nil {
log.Fatal(err.Error())
}
//先关闭日志文件
if logFile != nil {
logFile.Close()
}
//备份旧的日志
copyFile(logFilePath, backupFilePath)
//清空,重新写入
logFile, err = os.OpenFile(logFilePath, os.O_RDWR|os.O_TRUNC|os.O_APPEND|os.O_CREATE, os.ModePerm)
if err != nil {
log.Fatal(err.Error())
}
logger = log.New(logFile, "", log.Ldate|log.Ltime|log.Lshortfile)
}
|
package public
import (
"context"
"encoding/json"
"errors"
"fmt"
"github.com/shopspring/decimal"
"github.com/tealeg/xlsx"
"strings"
"tpay_backend/merchantapi/internal/common"
"tpay_backend/model"
"tpay_backend/utils"
"tpay_backend/merchantapi/internal/svc"
"tpay_backend/merchantapi/internal/types"
"github.com/tal-tech/go-zero/core/logx"
)
type IdentifyBatchTransferFileLogic struct {
logx.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
func NewIdentifyBatchTransferFileLogic(ctx context.Context, svcCtx *svc.ServiceContext) IdentifyBatchTransferFileLogic {
return IdentifyBatchTransferFileLogic{
Logger: logx.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *IdentifyBatchTransferFileLogic) IdentifyBatchTransferFile(merchantId int64, req types.IdentifyBatchTransferFileRequest) (*types.IdentifyBatchTransferFileReply, error) {
//1.确认文件是否存在
fileInfo, err := model.NewUploadFileLogModel(l.svcCtx.DbEngine).FindOne(req.FileName)
if err != nil {
l.Errorf("查询文件信息失败,err:%v", err)
return nil, common.NewCodeError(common.FileNotExist)
}
if fileInfo.AccountId != merchantId || fileInfo.AccountType != model.UploadFileLogAccountTypeMerchant {
l.Errorf("文件不是该商户的, fileInfo:%+v ; err:%v ", fileInfo, err)
return nil, common.NewCodeError(common.FileNotExist)
}
//2.获取数据,分析、处理数据
fileContentList, err := l.getBatchAnalysisData(req.FileName, merchantId)
if err != nil {
l.Errorf("处理数据发生错误, err:%v", err)
return nil, err
}
//3.获取统计分析结果
resultData, err := l.getBatchAnalysisResult(fileContentList)
if err != nil {
l.Errorf("处理数据发生错误, err:%v", err)
return nil, err
}
resultData.MerchantId = merchantId
//4.插入批量订单
if err := model.NewTransferBatchOrderModel(l.svcCtx.DbEngine).Insert(resultData); err != nil {
l.Errorf("插入批量订单失败,err:%v", err)
return nil, common.NewCodeError(common.SysDBAdd)
}
return &types.IdentifyBatchTransferFileReply{
BatchNo: resultData.BatchNo,
}, nil
}
// 获取数据,分析、处理数据
func (l *IdentifyBatchTransferFileLogic) getBatchAnalysisData(fileName string, merchantId int64) (datas []*model.FileContent, errs error) {
l.Infof("数据分析开始 start ")
//从云存储获取文件
bytes, err := l.svcCtx.CloudStorage.GetObject(fileName)
if err != nil {
l.Errorf("从云存储获取文件失败,FileName:%s, err:%v", fileName, err)
return nil, common.NewCodeError(common.FileNotExist)
}
//分析文件信息,得到结果
xlFile, err := xlsx.OpenBinary(bytes)
if err != nil {
l.Errorf("open failed: %s\n", err)
return nil, common.NewCodeError(common.SysUnKnow)
}
//查询商户信息
merchant, err := model.NewMerchantModel(l.svcCtx.DbEngine).FindOneById(merchantId)
if err != nil {
l.Errorf("查询商户信息出错, err:%v", err)
return nil, common.NewCodeError(common.GetLoginUserInfoFailed)
}
//查询商家转账配置
currencyInfo, err := model.NewCurrencyModel(l.svcCtx.DbEngine).FindByCurrency(merchant.Currency)
if err != nil {
l.Errorf("查询商户币种出错,err:%v", err)
return nil, common.NewCodeError(common.SysDBErr)
}
var errArgs []string
for _, sheet := range xlFile.Sheets {
for j, row := range sheet.Rows {
if j == 0 { //去掉标题
continue
}
if len(row.Cells) == 0 { //去掉空行
continue
}
data := &model.FileContent{
Row: fmt.Sprintf("%d", j), //行号
ChannelCode: "", //平台代付通道code
AccountName: "", //收款人姓名
CardNumber: "", //收款卡号
BankName: "", //银行名称
BankBranchName: "", //支行名称
Amount: 0, //金额
Remark: "", //备注
}
//序号 *收款人姓名 *收款卡号 *银行名称 支行名称 *代付通道代码 *金额 备注
for k, cell := range row.Cells {
text := cell.String()
switch k {
case 0: //序号(这是商家批量转账的序号,后台不做处理)
case 1: //收款人姓名
data.AccountName = text
case 2: //收款卡号
data.CardNumber = text
case 3: //银行名称
data.BankName = text
case 4: //支行名称
data.BankBranchName = text
case 5: //代付通道代码
data.ChannelCode = text
case 6: //金额
amount, err := decimal.NewFromString(text)
if err != nil {
l.Errorf("行号%v, 金额错误,err:%v", j, err)
continue
//return nil, err
}
//2.35-->235
if currencyInfo.IsDivideHundred == model.DivideHundred {
//2.35 * 100
amount = amount.Mul(decimal.NewFromInt(100))
}
//235
data.Amount = amount.IntPart()
case 7: //备注
data.Remark = text
}
l.Infof("行号%v 第%v列, 内容: %s\n", j, k, text)
}
// 判断必填字段是否都必填了
switch "" {
case data.AccountName:
fallthrough
case data.CardNumber:
fallthrough
case data.BankName:
fallthrough
case data.ChannelCode:
errArgs = append(errArgs, fmt.Sprintf("第%v行", j))
default:
}
if data.Amount == 0 {
errArgs = append(errArgs, fmt.Sprintf("第%v行", j))
}
datas = append(datas, data)
}
}
if len(errArgs) != 0 {
errTempStr := "文件%v有误,请修正后提交"
return nil, errors.New(fmt.Sprintf(errTempStr, strings.Join(errArgs, "、")))
}
//
l.Infof("数据分析完 end ")
return datas, nil
}
//获取分析结果
func (l *IdentifyBatchTransferFileLogic) getBatchAnalysisResult(fileContentList []*model.FileContent) (*model.TransferBatchOrder, error) {
resultData := &model.TransferBatchOrder{
BatchNo: utils.GetUniqueId(), // 批量号
TotalNumber: 0, // 订单总笔数
TotalAmount: 0, // 订单总金额
Status: model.BatchStatusInit, // 批量状态
//MerchantId: "", // 商户id
FileContent: "", // 文件内容json
GenerateAll: model.GenerateAllUndone, // 是否已全部生成订单 1-完成,2-未完成
}
jsonByte, err := json.Marshal(fileContentList)
if err != nil {
l.Errorf("json编码文件内容失败, err:%v", err)
return nil, err
}
resultData.FileContent = string(jsonByte)
for _, data := range fileContentList {
resultData.TotalNumber += 1
resultData.TotalAmount += data.Amount
}
return resultData, nil
}
|
package todolist
type Store interface {
Initialize()
LoadPending() ([]*Todo, error)
LoadArchived() ([]*Todo, error)
LoadBacklog(filepath string) ([]*Todo, error)
GetBacklogFilepath() string
AppendBacklog(filepath string, todos []*Todo)
DeleteBacklog(filepath string)
Save(todos []*Todo)
Import(filepath string) ([]*Todo, error)
Export(filepath string, todos []*Todo)
}
|
package render
import (
"net/http"
)
// html渲染器
type HtmlRender struct {
}
// 渲染
func (htmlRender HtmlRender) Render(write http.ResponseWriter, result Result) {
write.Header().Set("Content-Type", "text/html")
write.WriteHeader(result.code)
write.Write([]byte(result.data.(string)))
}
|
package p2p
import (
"sync"
"testing"
)
func TestDispatcher(t *testing.T) {
dp := NewDispatcher()
sb := NewSubscriber("", make(chan *Message, 128), false, "test")
types := sb.MessageType()
dp.Register(sb)
mt, _ := dp.subscribersMap.Load(types)
if mt == nil {
t.Fatal("register fail")
}
dp.Deregister(sb)
mt2, _ := dp.subscribersMap.Load(types)
s, _ := mt2.(*sync.Map).Load(sb)
if s != nil {
t.Fatal("deregister fail")
}
}
|
package Observer
import (
"sync"
"testing"
"time"
)
func TestFib(t *testing.T) {
//for x:= range Fib(10){
// fmt.Println(x)
//}
n := eventSubject{Observers:sync.Map{}} //如关注的微博主更新微博
obs1 := eventObserver{ID:1,Time:time.Now()}
obs2 := eventObserver{ID:2,Time:time.Now()}
n.AddListener(obs1)
n.AddListener(obs2)
for x:=range Fib(10){
n.Notify(Event{Data:x}) //关注的微博循环发通知
}
}
|
package scanner
import (
"bytes"
"fmt"
"path/filepath"
"strconv"
"strings"
"unicode/utf8"
"go/token"
"h12.io/gombi/scan"
)
const (
ScanComments Mode = 1 << iota // return comments as COMMENT tokens
dontInsertSemis // do not automatically insert semicolons - for testing only
)
var newlineValue = []byte{'\n'}
type Scanner struct {
tokScanner scan.Scanner
mode Mode // scanning mode
src []byte
preSemi bool
semiPos int
file *token.File // source file handle
fileBase int // cache of file.Base()
dir string // cache of the directory portion of file.Name()
lineStart int // record the start position of each line
errScanner scan.Scanner
ErrorCount int // number of errors encountered
err ErrorHandler // error reporting; or nil
}
type Mode uint
type ErrorHandler func(pos token.Position, msg string)
func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) {
if file.Size() != len(src) {
panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src)))
}
s.tokScanner = scan.Scanner{Matcher: getTokenMatcher()}
s.errScanner = scan.Scanner{Matcher: getErrorMatcher()}
s.src = skipBOM(src)
s.tokScanner.SetSource(s.src)
s.errScanner.SetSource(s.src)
s.file = file
s.fileBase = s.file.Base()
s.dir, _ = filepath.Split(file.Name())
s.err = err
s.mode = mode
s.ErrorCount = 0
s.preSemi = false
s.semiPos = 0
}
func skipBOM(buf []byte) []byte {
r, size := utf8.DecodeRune(buf)
if size > 0 && r == 0xFEFF {
buf = buf[size:]
}
return buf
}
func (s *Scanner) Scan() (token.Pos, token.Token, string) {
for s.tokScanner.Scan() {
var val []byte
t := s.tokScanner.Token()
//fmt.Println(token.Token(t.ID), t, string(s.src[t.Lo:t.Hi]))
switch t.ID {
case tWhitespace:
s.semiPos = t.Hi + 1
continue
case tNewline:
if s.preSemi {
s.preSemi = false
if s.mode&dontInsertSemis == 0 {
t.ID, t.Lo, val = tSemiColon, s.semiPos, newlineValue
break
}
}
s.file.AddLine(t.Hi)
s.lineStart = t.Hi
continue
case tIdentifier, tInt, tFloat, tImag, tRune, tString, tReturn, tBreak, tContinue, tFallthrough:
s.preSemi, s.semiPos = true, t.Hi+1
val = s.src[t.Lo:t.Hi]
case tRightParen, tRightBrack, tRightBrace, tInc, tDec:
s.preSemi, s.semiPos = true, t.Hi+1
case tLineComment, tLineCommentEOF, tLineCommentInfo:
if s.preSemi {
s.preSemi = false
if s.mode&dontInsertSemis == 0 {
s.tokScanner.SetPos(t.Lo)
t.ID, t.Lo, val = tSemiColon, s.semiPos, newlineValue
break
}
}
val = s.src[t.Lo:t.Hi]
if t.ID == tLineCommentInfo && t.Lo == s.lineStart {
s.interpretLineComment(val, t.Hi)
}
if val[len(val)-1] == '\n' {
s.file.AddLine(t.Hi)
s.lineStart = t.Hi
val = val[:len(val)-1]
}
if s.mode&ScanComments == 0 {
continue
}
if t.ID == tLineCommentEOF && t.Hi < len(s.src) {
t, val = s.handleError(t.Lo, t.Hi)
break
}
t.ID = tComment
val = stripCR(val)
case tGeneralCommentML:
if s.preSemi {
s.preSemi = false
if s.mode&dontInsertSemis == 0 {
s.tokScanner.SetPos(t.Lo)
t.ID, t.Lo, val = tSemiColon, s.semiPos, newlineValue
break
}
}
val = s.src[t.Lo:t.Hi]
for i, c := range val {
if c == '\n' {
s.file.AddLine(t.Lo + i + 1)
}
}
if s.mode&ScanComments == 0 {
continue
}
t.ID = tComment
val = stripCR(val)
case tGeneralCommentSL:
if s.preSemi {
s.preSemi = false
if s.mode&dontInsertSemis == 0 {
t = t.Copy()
for s.tokScanner.Scan() {
nt := s.tokScanner.Token()
switch nt.ID {
case tWhitespace, tGeneralCommentSL:
continue
case tEOF, tNewline, tLineComment, tLineCommentEOF,
tLineCommentInfo, tGeneralCommentML, eIncompleteComment:
s.tokScanner.SetPos(t.Lo)
t.ID, t.Lo, val = tSemiColon, s.semiPos, newlineValue
goto returnSemi
default:
s.tokScanner.SetPos(t.Hi)
goto returnComment
}
}
returnSemi:
break
}
}
returnComment:
if s.mode&ScanComments == 0 {
continue
}
t.ID = tComment
val = stripCR(s.src[t.Lo:t.Hi])
case tInterpretedStringLit:
s.preSemi, s.semiPos = true, t.Hi+1
t.ID = tString
val = s.src[t.Lo:t.Hi]
case tRawStringLit:
s.preSemi, s.semiPos = true, t.Hi+1
t.ID = tString
val = s.src[t.Lo:t.Hi]
for i, c := range val {
if c == '\n' {
s.file.AddLine(t.Lo + i + 1)
}
}
val = stripCR(val)
case tEOF:
if s.preSemi {
s.preSemi = false
if s.mode&dontInsertSemis == 0 {
s.tokScanner.SetPos(t.Lo)
t.ID, t.Lo, val = tSemiColon, s.semiPos, newlineValue
}
}
case tSemiColon:
s.preSemi = false
val = s.src[t.Lo:t.Hi]
case eIncompleteComment:
if s.preSemi {
s.preSemi = false
if s.mode&dontInsertSemis == 0 {
s.tokScanner.SetPos(t.Lo)
t.ID, t.Lo, val = tSemiColon, s.semiPos, newlineValue
break
}
}
t.ID = tComment
val = s.src[t.Lo:t.Hi]
s.error(t.Lo, "comment not terminated")
case eOctalLit:
t.ID = tInt
val = s.src[t.Lo:t.Hi]
s.error(t.Lo, "illegal octal number")
case eHexLit:
t.ID = tInt
val = s.src[t.Lo:t.Hi]
s.error(t.Lo, "illegal hexadecimal number")
case eIllegal:
t, val = s.handleError(t.Lo, t.Hi)
default:
s.preSemi = false
if t.ID < firstOp || t.ID > lastOp {
val = s.src[t.Lo:t.Hi]
}
}
return token.Pos(s.fileBase + t.Lo), token.Token(t.ID), string(val)
}
return token.Pos(s.fileBase + len(s.src)), token.EOF, ""
}
func stripCR(b []byte) []byte {
i := 0
for _, ch := range b {
if ch != '\r' {
b[i] = ch
i++
}
}
return b[:i]
}
func (s *Scanner) interpretLineComment(text []byte, pos int) {
// get filename and line number, if any
if i := bytes.LastIndex(text, []byte{':'}); i > 0 {
if line, err := strconv.Atoi(strings.TrimSpace(string(text[i+1:]))); err == nil && line > 0 {
// valid //line filename:line comment
filename := string(bytes.TrimSpace(text[len(commentInfoPrefix):i]))
if filename != "" {
filename = filepath.Clean(filename)
if !filepath.IsAbs(filename) {
// make filename relative to current directory
filename = filepath.Join(s.dir, filename)
}
}
// update scanner position
s.file.AddLineInfo(pos, filename, line) // +len(text)+1 since comment applies to next line
}
}
}
var commentInfoPrefix = []byte("//line ")
const (
mBOM = "illegal byte order mark"
mNUL = "illegal character NUL"
mUTF8 = "illegal UTF-8 encoding"
)
func (s *Scanner) handleError(pos, errPos int) (t *scan.Token, val []byte) {
t = s.errScanner.Token()
s.errScanner.SetPos(pos)
s.errScanner.Scan()
//fmt.Println(t.ID, string(s.src[t.Lo:t.Hi]))
switch t.ID {
case eBOM:
t.ID = eIllegal
s.error(errPos+1, mBOM)
case eBOMInComment:
t.ID = tComment
s.error(errPos, mBOM)
case eBOMInRune:
t.ID = tRune
s.error(errPos-2, mBOM)
case eBOMInStr:
t.ID = tString
s.error(errPos-2, mBOM)
case eNUL:
t.ID = eIllegal
s.error(errPos+1, mBOM)
case eNULInStr:
t.ID = tString
s.error(errPos, mNUL)
case eUTF8:
t.ID = eIllegal
s.error(pos, mUTF8)
case eUTF8Rune:
t.ID = tRune
s.error(errPos, mUTF8)
case eUTF8Str:
t.ID = tString
s.error(errPos, mUTF8)
case eEscape:
r := decodeRune(s.src[errPos:])
t.ID = tRune
s.error(errPos, fmt.Sprintf("illegal character %#U in escape sequence", r))
case eBigU:
t.ID = tRune
s.error(errPos-1, "escape sequence is invalid Unicode code point")
case eEscapeUnknown:
t.ID = tRune
s.error(errPos, "unknown escape sequence")
case eIncompleteRune:
t.ID = tRune
s.error(pos, "rune literal not terminated")
case eIncompleteEscape:
t.ID = tRune
s.error(errPos, "escape sequence not terminated")
case eIncompleteStr:
t.ID = tString
s.error(pos, "string literal not terminated")
case eIncompleteRawStr:
t.ID = tString
s.error(pos, "raw string literal not terminated")
case eRune:
t.ID = tRune
s.error(pos, "illegal rune literal")
default:
t.ID = eIllegal
t.Hi = pos + 1
s.error(pos, fmt.Sprintf("illegal character %#U", decodeRune(s.src[pos:])))
}
val = s.src[t.Lo:t.Hi]
s.tokScanner.SetPos(t.Hi)
return
}
func (s *Scanner) error(errPos int, msg string) {
s.ErrorCount++
if s.err != nil {
s.err(s.file.Position(token.Pos(s.fileBase+errPos)), msg)
}
}
func decodeRune(bs []byte) rune {
r, _ := utf8.DecodeRune(bs)
if r == utf8.RuneError {
return rune(bs[0])
}
return r
}
|
package postgres
import (
"github.com/google/uuid"
"github.com/neuronlabs/neuron-core/config"
"github.com/neuronlabs/neuron-core/repository"
"github.com/neuronlabs/neuron-postgres/internal"
"github.com/neuronlabs/neuron-postgres/log"
)
var _ repository.Factory = &Factory{}
// Factory is the pq.Postgres factory.
type Factory struct {
transactionContainer *transactionsContainer
}
func newFactory() *Factory {
return &Factory{
transactionContainer: newTXContainer(),
}
}
// DriverName gets the Factory repository name.
// Implements repository.Postgres interface.
func (f *Factory) DriverName() string {
return FactoryName
}
// New creates new PQ repository for the provided config 'cfg'.
func (f *Factory) New(cfg *config.Repository) (repository.Repository, error) {
repoConfig, err := internal.RepositoryConfig(cfg)
if err != nil {
log.Debugf("Getting postgres repository config failed: %v", err)
return nil, err
}
r := &Postgres{
id: uuid.New(),
Config: repoConfig,
f: f,
coreConfig: cfg,
}
id := f.transactionContainer.txMapID(cfg)
r.txMapID = id
return r, nil
}
|
package sc
import (
"context"
"fmt"
"net"
"time"
)
type Container struct {
Name string
Hostname string
Active bool
IPChange bool
Addresses []net.IP
LastAddresses []net.IP
Verbose bool
}
func New(name, hostname string, verbose bool) Container {
res := Container{
name,
hostname,
false,
false,
[]net.IP{},
[]net.IP{},
verbose,
}
return res
}
func (c *Container) Lookup() {
c.Active = false
c.Addresses = []net.IP{}
r := &net.Resolver{
PreferGo: true,
Dial: func(ctx context.Context, network, address string) (net.Conn, error) {
d := net.Dialer{
Timeout: time.Millisecond * time.Duration(10000),
}
return d.DialContext(ctx, network, "127.0.0.11:53")
},
}
ips, err := r.LookupIP(context.Background(), "ip4", c.Hostname)
if err == nil ||
len(ips) > 0 ||
(len(ips) == 1 && ips[0].String() != "0.0.0.0") {
c.Active = true
c.Addresses = ips
}
c.IPChange = !Equal(c.LastAddresses, c.Addresses)
if c.IPChange {
c.LastAddresses = c.Addresses
}
}
func Equal(last, current []net.IP) bool {
if len(last) != len(current) {
return false
} else {
for _, i := range current {
if !Contains(last, i) {
return false
}
}
return true
}
}
func Contains(ia []net.IP, ci net.IP) bool {
for _, i := range ia {
if i.String() == ci.String() {
return true
}
}
return false
}
func (c Container) Print() {
if c.Verbose {
fmt.Println("-", c.Name)
fmt.Println("| - Hostname:", c.Hostname)
fmt.Println("| - Active:", c.Active)
if c.Active {
fmt.Println("| - IPs:")
for _, ip := range c.Addresses {
fmt.Println("| | ", ip.String())
}
}
}
}
|
package master
import (
"github.com/OHopiak/fractal-load-balancer/core"
"github.com/labstack/echo/v4"
"net/http"
)
func (m *Master) RegisterWorker(request *core.RegisterWorkerRequest, ip string) *core.RegisterWorkerResponse {
worker, err := m.AddWorker(core.Host{
IP: ip,
Port: request.Port,
})
if err != nil {
return &core.RegisterWorkerResponse{
Status: "FAILED",
}
}
return &core.RegisterWorkerResponse{
Status: "OK",
WorkerId: worker.ID,
}
}
func (m *Master) registerWorkerHandler(c echo.Context) error {
request := new(core.RegisterWorkerRequest)
err := c.Bind(request)
if err != nil {
return err
}
return c.JSON(http.StatusCreated, m.RegisterWorker(request, c.RealIP()))
}
|
// defer,panic,recover使用
package main
import "fmt"
func main() {
// a
// c
// d
// panic: 55
// goroutine 1 [running]:
// main.f()
// /donnol/Project/Golang/src/jdscript.com/day_test/2017_02_23/main.go:40 +0xd1
// main.panicDefer()
// /donnol/Project/Golang/src/jdscript.com/day_test/2017_02_23/main.go:24 +0x3e
// main.main()
// /donnol/Project/Golang/src/jdscript.com/day_test/2017_02_23/main.go:7 +0x20
// panicDefer() // 执行了defer,但是defer里面没有recover,所以最后还是panic了
// a
// c
// 55
// d
// h
panicDeferRecover() // 执行了defer,因为defer里面执行了recover方法,所以程序可以继续执行
goOn()
}
func goOn() {
fmt.Println("h")
}
func panicDefer() {
defer func() { // 必须要先声明defer,否则不能捕获到panic异常
fmt.Println("c")
// if err:=recover();err!=nil{
// fmt.Println(err) // 这里的err其实就是panic传入的内容,55
// }
fmt.Println("d")
}()
f()
}
func panicDeferRecover() {
defer func() { // 必须要先声明defer,否则不能捕获到panic异常
fmt.Println("c")
if err := recover(); err != nil {
fmt.Println(err) // 这里的err其实就是panic传入的内容,55
}
fmt.Println("d")
}()
f()
}
func f() {
fmt.Println("a")
panic(55)
fmt.Println("b")
fmt.Println("f")
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//6. ZigZag Conversion
//The string "PAYPALISHIRING" is written in a zigzag pattern on a given number of rows like this: (you may want to display this pattern in a fixed font for better legibility)
//P A H N
//A P L S I I G
//Y I R
//And then read line by line: "PAHNAPLSIIGYIR"
//Write the code that will take a string and make this conversion given a number of rows:
//string convert(string s, int numRows);
//Example 1:
//Input: s = "PAYPALISHIRING", numRows = 3
//Output: "PAHNAPLSIIGYIR"
//Example 2:
//Input: s = "PAYPALISHIRING", numRows = 4
//Output: "PINALSIGYAHRPI"
//Explanation:
//P I N
//A L S I G
//Y A H R
//P I
//func convert(s string, numRows int) string {
//}
// Time Is Money
|
// Copyright (c) 2014 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"io/ioutil"
"log"
"math/rand"
"os"
"os/exec"
"path/filepath"
"runtime"
"sync"
"time"
rpc "github.com/conformal/btcrpcclient"
"github.com/conformal/btcutil"
)
// ChainServer describes the arguments necessary to connect a btcwallet
// instance to a btcd websocket RPC server.
type ChainServer struct {
connect string
user string
pass string
certPath string
keyPath string
cert []byte
}
// For now, hardcode a single already-running btcd connection that is used for
// each actor. This should be changed to start a new btcd with the --simnet
// flag, and each actor can connect to the spawned btcd process.
var defaultChainServer = ChainServer{
connect: "localhost:18556", // local simnet btcd
user: "rpcuser",
pass: "rpcpass",
}
// Communication is consisted of the necessary primitives used
// for communication between the main goroutine and actors.
type Communication struct {
upstream chan btcutil.Address
downstream chan btcutil.Address
stop chan struct{}
}
const connRetry = 15
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
rand.Seed(int64(time.Now().Nanosecond()))
var wg sync.WaitGroup
// Number of actors
var actorsAmount = 1
actors := make([]*Actor, 0, actorsAmount)
com := Communication{
upstream: make(chan btcutil.Address, actorsAmount),
downstream: make(chan btcutil.Address, actorsAmount),
stop: make(chan struct{}, actorsAmount),
}
btcdHomeDir := btcutil.AppDataDir("btcd", false)
defaultChainServer.certPath = filepath.Join(btcdHomeDir, "rpc.cert")
defaultChainServer.keyPath = filepath.Join(btcdHomeDir, "rpc.key")
cert, err := ioutil.ReadFile(defaultChainServer.certPath)
if err != nil {
log.Fatalf("Cannot read certificate: %v", err)
}
defaultChainServer.cert = cert
btcdArgs := []string{
"--simnet",
"-u" + defaultChainServer.user,
"-P" + defaultChainServer.pass,
"--rpccert=" + defaultChainServer.certPath,
"--rpckey=" + defaultChainServer.keyPath,
"--profile=",
}
log.Println("Starting btcd on simnet...")
btcd := exec.Command("btcd", btcdArgs...)
if err := btcd.Start(); err != nil {
log.Fatalf("Couldn't start btcd: %v", err)
}
// Create and start RPC client.
rpcConf := rpc.ConnConfig{
Host: defaultChainServer.connect,
Endpoint: "ws",
User: defaultChainServer.user,
Pass: defaultChainServer.pass,
Certificates: defaultChainServer.cert,
}
var client *rpc.Client
for i := 0; i < connRetry; i++ {
if client, err = rpc.New(&rpcConf, nil); err != nil {
time.Sleep(time.Duration(i) * 50 * time.Millisecond)
continue
}
break
}
if client == nil {
log.Printf("Cannot start btcd rpc client: %v", err)
Kill(actors, btcd, wg)
return
}
// If we panic somewhere, at least try to stop the spawned wallet
// processes.
defer func() {
if r := recover(); r != nil {
log.Println("Panic! Shuting down actors...")
for _, a := range actors {
func() {
// Ignore any other panics that may
// occur during panic handling.
defer recover()
a.Stop()
a.Cleanup()
}()
}
panic(r)
}
}()
// Create actors.
for i := 0; i < actorsAmount; i++ {
a, err := NewActor(&defaultChainServer, uint16(18557+i))
if err != nil {
log.Printf("Cannot create actor on %s: %v", "localhost:"+a.args.port, err)
continue
}
actors = append(actors, a)
}
// Start actors.
for _, a := range actors {
go func(a *Actor, com Communication) {
if err := a.Start(os.Stderr, os.Stdout, com); err != nil {
log.Printf("Cannot start actor on %s: %v", "localhost:"+a.args.port, err)
// TODO: reslice actors when one actor cannot start
}
}(a, com)
}
addressTable := make([]btcutil.Address, actorsAmount)
for i := 0; i < actorsAmount; i++ {
addressTable[i] = <-com.upstream
}
// Start mining.
miner, err := NewMiner(addressTable, com.stop)
if err != nil && miner == nil { // Miner didn't start at all
Kill(actors, btcd, wg)
return
} else if err != nil && miner != nil { // Miner started so we have to shut it down
miner.Shutdown()
Kill(actors, btcd, wg)
return
}
// Add mining btcd listen interface as a node
client.AddNode("localhost:18550", rpc.ANAdd)
out:
for {
select {
case addr := <-com.upstream:
com.downstream <- addr
case <-com.stop:
break out
}
}
// TODO: Collect statistics from the blockchain
log.Println("Time to die")
// Shutdown miner.
miner.Shutdown()
// Kill actors and initial btcd instance.
Kill(actors, btcd, wg)
}
// Kill shuts down actors and the initial btcd process.
func Kill(actors []*Actor, btcd *exec.Cmd, wg sync.WaitGroup) {
// Kill initial btcd instance.
if err := btcd.Process.Kill(); err != nil {
log.Printf("Cannot kill initial btcd process: %v", err)
}
btcd.Wait()
for _, a := range actors {
wg.Add(1)
go func(a *Actor) {
defer wg.Done()
if err := a.Stop(); err != nil {
log.Printf("Cannot stop actor on %s: %v", "localhost:"+a.args.port, err)
return
}
if err := a.Cleanup(); err != nil {
log.Printf("Cannot cleanup actor on %s directory: %v", "localhost:"+a.args.port, err)
return
}
log.Printf("Actor on %s shutdown successfully", "localhost:"+a.args.port)
}(a)
}
wg.Wait()
}
|
package command
import (
"context"
"time"
constant "github.com/angryronald/guestlist/internal/guest"
"github.com/angryronald/guestlist/internal/guest/domain/service/guest"
"github.com/angryronald/guestlist/internal/guest/public"
)
// GuestArrivedCommand encapsulate process for guest arrives in Command
type GuestArrivedCommand struct {
service guest.ServiceInterface
}
// NewGuestArrivedCommand build an Command for guest arrives
func NewGuestArrivedCommand(
service guest.ServiceInterface,
) GuestArrivedCommand {
return GuestArrivedCommand{
service: service,
}
}
func (c GuestArrivedCommand) Execute(ctx context.Context, request interface{}) (*public.GuestArrivedResponse, error) {
payload := request.(public.GuestArrivedRequest)
guest, err := c.service.GetGuestByName(ctx, payload.Name)
if err != nil {
return nil, err
}
if guest.TimeArrived != nil {
return nil, constant.ErrAlreadyArrived
}
totalAvailableSpace, err := c.service.GetAvailableSpace(ctx)
if err != nil {
return nil, err
}
isAbleToProceed := true
if payload.AccompanyingGuests > guest.AccompanyingGuests || totalAvailableSpace < payload.AccompanyingGuests {
isAbleToProceed = false
}
if !isAbleToProceed {
return nil, constant.ErrInsufficientSpace
}
guest.ActualAccompanyingGuests = guest.AccompanyingGuests
if guest.AccompanyingGuests != payload.AccompanyingGuests {
guest.ActualAccompanyingGuests = payload.AccompanyingGuests
}
now := time.Now().UTC()
guest.TimeArrived = &now
guest, err = c.service.UpdateGuest(ctx, guest)
if err != nil {
return nil, err
}
return &public.GuestArrivedResponse{
Name: guest.Name,
}, nil
}
|
//author xinbing
//time 2018/9/4 15:42
package utilities
import (
"fmt"
"testing"
)
func TestGetRandomNumStr(t *testing.T) {
fmt.Println(GetRandomStr(32))
fmt.Println(GetRandomNumStr(32))
}
|
package melee
import (
"encoding/csv"
"github.com/realm/realm-server/items"
"github.com/realm/realm-server/items/weapons"
)
// resolveGrip resolves a given string to an EGrip.
func resolveGrip(str string) EGrip {
return grips[str]
}
// ParseCSV parses a csv file into an array of melee items.
func ParseCSV(reader *csv.Reader) ([]Melee, error) {
var weaponClass weapons.EWeaponClass
var grip EGrip
ret := []Melee{}
rows, err := reader.ReadAll()
if err != nil {
return ret, err
}
for _, row := range rows[1:] {
if len(row[1]) == 0 {
if tempWC := weapons.ResolveWeaponClass(row[0]); len(tempWC) > 0 {
weaponClass = tempWC
} else if tempGrip := resolveGrip(row[0]); len(tempGrip) > 0 {
grip = tempGrip
}
} else {
ret = append(ret, Melee{
Item: items.Item{
Name: row[0],
Class: string(weaponClass),
Type: row[5],
Price: items.ParseCurrencyFromString(row[7]),
Weight: items.ParseWeightFromString(row[6]),
Desc: row[8],
},
DmgS: items.ParseDieRollFromString(row[1]),
DmgM: items.ParseDieRollFromString(row[2]),
DmgType: weapons.ResolveDamageType(row[4]),
Crit: items.ParseMultiplierFromString(row[3]),
Grip: grip,
})
}
}
return ret, nil
}
|
package tunnel
import (
"bytes"
"crypto/sha1"
"encoding/binary"
"fmt"
"io"
"net"
)
func sendMessage(conn net.Conn, flag Flag, data []byte) (err error) {
buf := bytes.NewBuffer(nil)
buf.WriteByte(1)
buf.WriteByte(byte(flag))
dl := uint32(len(data))
if dl > 0 {
len := make([]byte, 4)
binary.LittleEndian.PutUint32(len, dl)
buf.WriteByte(1)
buf.Write(len)
buf.Write(data)
} else {
buf.WriteByte(0)
}
_, err = io.Copy(conn, buf)
return
}
func parseMessage(conn net.Conn) (flag Flag, data []byte, err error) {
// check head
buf := make([]byte, 1)
_, err = conn.Read(buf)
if err != nil {
return
}
if buf[0] != 1 {
err = fmt.Errorf("invalid head")
return
}
// parse flag
buf = make([]byte, 2)
_, err = conn.Read(buf)
if err != nil {
return
}
flag = Flag(buf[0])
// parse data
hasData := buf[1] == 1
if hasData {
buf = make([]byte, 4)
_, err = conn.Read(buf)
if err != nil {
return
}
len := binary.LittleEndian.Uint32(buf)
buf := bytes.NewBuffer(nil)
_, err = io.CopyN(buf, conn, int64(len))
if err != nil {
return
}
data = buf.Bytes()
}
return
}
func genSecret(password string) []byte {
h := sha1.New()
h.Write([]byte("gox.tunnel"))
h.Write([]byte(password))
return h.Sum(nil)
}
|
package server
import (
"log"
"net"
"orm/ormpb"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
)
type Server struct {
s *grpc.Server
}
type UserBoilerOrm struct {
ormpb.UserServiceServer
}
func New(useReflection bool) *Server {
s := grpc.NewServer()
ormpb.RegisterUserServiceServer(s, &UserBoilerOrm{})
//ormpb.RegisterBoilerServer(s, &BoilerOrm{})
if !useReflection {
reflection.Register(s)
log.Println("gRPC reflection enabled")
}
return &Server{
s: s,
}
}
func GetServer() *Server {
return &Server{}
}
func (s *Server) Serve(l net.Listener) *Server {
log.Println("works as a gRPC server")
go func() {
if err := s.s.Serve(l); err != nil {
log.Println(err)
}
}()
return s
}
func (s *Server) Stop() error {
s.s.GracefulStop()
return nil
}
|
package web
import (
"fmt"
"github.com/dgrijalva/jwt-go"
fiber "github.com/gofiber/fiber/v2"
jwtware "github.com/gofiber/jwt/v2"
"github.com/google/uuid"
"github.com/iamtraining/forum/entity"
)
type SessionData struct {
Form interface{}
User entity.User
LoggedIn bool
}
func (h *Handler) Extract(c *fiber.Ctx) (entity.User, error) {
t := c.Cookies("forum-Token")
claims := jwt.MapClaims{}
_, err := jwt.ParseWithClaims(t, claims, func(token *jwt.Token) (interface{}, error) {
return []byte("SECRET_KEY"), nil
})
if err != nil {
return entity.User{}, err
}
err = claims.Valid()
if err != nil {
DeleteToken(c)
return entity.User{}, err
}
id, err := uuid.Parse(claims["id"].(string))
if err != nil {
DeleteToken(c)
return entity.User{}, err
}
if id == uuid.Nil {
DeleteToken(c)
return entity.User{}, fmt.Errorf("null uuid")
}
user, _ := h.store.User(id)
fmt.Println("extract", user)
return user, nil
}
/*
func (h *Handler) Restricted(c *fiber.Ctx) error {
local := c.Locals("user").(*jwt.Token)
claims := local.Claims.(jwt.MapClaims)
id := claims["id"].(uuid.UUID)
user, _ := h.store.User(id)
fmt.Println("extract", user)
}
*/
func Protect() fiber.Handler {
return jwtware.New(jwtware.Config{
//ErrorHandler: err,
SigningKey: []byte("SECRET_KEY"),
})
}
func err(c *fiber.Ctx, err error) error {
if err.Error() == "Missing or malformed JWT" {
return c.
Status(fiber.StatusUnauthorized).
JSON(fiber.Map{"message": "Missing or malformed JWT"})
} else {
return c.
Status(fiber.StatusUnauthorized).
JSON(fiber.Map{"message": "Invalid or expired JWT"})
}
}
func (h *Handler) GetSessionData(c *fiber.Ctx) SessionData {
var data SessionData
var err error
data.User, err = h.Extract(c)
if err != nil {
data.LoggedIn = false
} else {
data.LoggedIn = true
}
if data.Form == nil {
data.Form = map[string]string{}
}
fmt.Println(data.User.Username)
return data
}
func Logout(c *fiber.Ctx) {
c.ClearCookie()
return
}
|
package main
import (
"fmt"
"github.com/Wan-Mi/RPCDemos/thriftDemo/hello"
"net"
"os"
"git.apache.org/thrift.git/lib/go/thrift"
)
func main() {
transportFactory := thrift.NewTBufferedTransportFactory(8192)
protocolFactory := thrift.NewTCompactProtocolFactory()
transport, err := thrift.NewTSocket(net.JoinHostPort("127.0.0.1", "8988"))
if err != nil {
fmt.Fprintln(os.Stderr, "error resolving address:", err)
os.Exit(1)
}
useTransport := transportFactory.GetTransport(transport)
client := hello.NewHelloServiceClientFactory(useTransport, protocolFactory)
if err := transport.Open(); err != nil {
fmt.Fprintln(os.Stderr, "Error opening socket to 127.0.0.1:8988", " ", err)
os.Exit(1)
}
defer transport.Close()
res, err := client.SayHello("jack",2)
if err != nil {
fmt.Println("Hello failed:", err)
return
}
fmt.Println("response: ", res)
fmt.Println("well done")
}
|
package factoryreset
import (
"errors"
"os"
"github.com/rancher-sandbox/rancher-desktop/src/go/privileged-service/pkg/manage"
"github.com/sirupsen/logrus"
"golang.org/x/sys/windows"
"golang.org/x/sys/windows/svc"
)
const svcName = "RancherDesktopPrivilegedService"
// stopPrivilegedService will stop the Rancher Desktop privileged service if it
// is running.
func stopPrivilegedService() error {
err := manage.ControlService(svcName, svc.Stop, svc.Stopped)
if err == nil {
logrus.Tracef("successfully stopped %s", svcName)
return nil
}
if errors.Is(err, windows.ERROR_SERVICE_NOT_ACTIVE) {
logrus.Tracef("ignoring failure to stop %s: %s", svcName, err)
return nil
}
if errors.Is(err, windows.ERROR_ACCESS_DENIED) {
logrus.Tracef("ignoring failure to stop %s: %s", svcName, err)
return nil
}
if errors.Is(err, windows.ERROR_SERVICE_DOES_NOT_EXIST) {
logrus.Tracef("ignoring failure to stop %s: %s", svcName, err)
return nil
}
if errors.Is(err, os.ErrDeadlineExceeded) {
logrus.Tracef("ignoring failure to stop %s: %s", svcName, err)
return nil
}
return err
}
|
// Copyright © 2021 Attestant Limited.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package best
import (
"context"
"testing"
"time"
eth2client "github.com/attestantio/go-eth2-client"
"github.com/attestantio/go-eth2-client/spec/altair"
"github.com/attestantio/vouch/mock"
"github.com/prysmaticlabs/go-bitfield"
"github.com/rs/zerolog"
"github.com/stretchr/testify/require"
)
// populatedBitvector creates a populated bitlist.
func populatedBitvector(set uint64) bitfield.Bitvector128 {
res := bitfield.NewBitvector128()
for i := uint64(0); i < set; i++ {
res.SetBitAt(i, true)
}
return res
}
func TestScore(t *testing.T) {
ctx := context.Background()
s, err := New(ctx,
WithLogLevel(zerolog.Disabled),
WithTimeout(2*time.Second),
WithSyncCommitteeContributionProviders(map[string]eth2client.SyncCommitteeContributionProvider{
"good": mock.NewSyncCommitteeContributionProvider(),
}),
)
require.NoError(t, err)
tests := []struct {
name string
contribution *altair.SyncCommitteeContribution
score float64
}{
{
name: "Nil",
score: 0,
},
{
name: "Empty",
contribution: &altair.SyncCommitteeContribution{
Slot: 1,
SubcommitteeIndex: 2,
AggregationBits: populatedBitvector(0),
},
score: 0,
},
{
name: "Full",
contribution: &altair.SyncCommitteeContribution{
Slot: 1,
SubcommitteeIndex: 2,
AggregationBits: populatedBitvector(128),
},
score: 128,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
score := s.scoreSyncCommitteeContribution(ctx, "test", test.contribution)
require.Equal(t, test.score, score)
})
}
}
|
// uji coba ambil data dari newsapi org kategori bisnis
package main
import (
"io/ioutil"
//"log"
"net/http"
"NewsAPISPE/consume/models"
"encoding/json"
"fmt"
"database/sql"
_ "github.com/go-sql-driver/mysql"
)
func main(){
response, err := http.Get("https://newsapi.org/v2/top-headlines?apikey=6bc3cbc8dcf3473fb2527028734aedee&country=id&category=health")
if err != nil {
panic(err.Error())
}
defer response.Body.Close()
responseData, err := ioutil.ReadAll(response.Body)
if err != nil{
panic(err.Error())
}
var responseObjek models.Response
json.Unmarshal(responseData, &responseObjek)
fmt.Println(responseObjek.Totalresult)
fmt.Println(len(responseObjek.Data))
//fmt.Println(responseObjek.Data)
for i := 0; i<len(responseObjek.Data); i++{
fmt.Println("Source Id ",i," : ", responseObjek.Data[i].Source.Id)
fmt.Println("Source Name ",i," : ", responseObjek.Data[i].Source.Name)
fmt.Println("Author ",i," : ", responseObjek.Data[i].Author)
fmt.Println("Title ",i," : ", responseObjek.Data[i].Title)
fmt.Println("Description ",i," : ", responseObjek.Data[i].Description)
fmt.Println("Url ",i," : ", responseObjek.Data[i].Url)
fmt.Println("UrlToImage ",i," : ", responseObjek.Data[i].UrlToImage)
fmt.Println("PublishedAt ",i," : ", responseObjek.Data[i].PublishedAt)
fmt.Println("Content ",i," : ", responseObjek.Data[i].Content)
fmt.Println("----------------------------------------")
}
Insert(responseObjek)
}
func dbConn() (db *sql.DB) {
dbDriver := "mysql"
dbUser := "root"
dbPass := ""
dbName := "spenews"
db, err := sql.Open(dbDriver, dbUser+":"+dbPass+"@/"+dbName)
if err != nil {
panic(err.Error())
}
return db
}
func Insert(responseObjek models.Response) {
db := dbConn()
for i := 0; i<len(responseObjek.Data); i++{
sourceid := responseObjek.Data[i].Source.Id
sourcename := responseObjek.Data[i].Source.Name
author := responseObjek.Data[i].Author
title := responseObjek.Data[i].Title
description := responseObjek.Data[i].Description
url := responseObjek.Data[i].Url
UrlToImage := responseObjek.Data[i].UrlToImage
publishedAt := responseObjek.Data[i].PublishedAt
content := responseObjek.Data[i].Content
// variabel bantu untuk
var exists bool
// baca semua data title dan sambil dibandingkan
row := db.QueryRow("SELECT EXISTS(SELECT title FROM health WHERE title = ?)", title)
// jika sudah ada dan error maka seperti ini
if err := row.Scan(&exists); err != nil {
panic(err.Error())
// jika tidak ada maka seperti ini
} else if !exists {
insForm, err := db.Prepare("INSERT INTO health(sourceid, sourcename, author, title, description, url, UrlToImage, publishedAt, content) VALUES(?,?,?,?,?,?,?,?,?)")
if err != nil {
panic(err.Error())
}
insForm.Exec(sourceid, sourcename, author, title, description, url, UrlToImage, publishedAt, content)
}
}
defer db.Close()
}
|
package tls
import (
"crypto/x509"
"crypto/x509/pkix"
"net"
"github.com/pkg/errors"
"github.com/openshift/installer/pkg/asset"
"github.com/openshift/installer/pkg/asset/installconfig"
)
// KubeAPIServerToKubeletSignerCertKey is a key/cert pair that signs the kube-apiserver to kubelet client certs.
type KubeAPIServerToKubeletSignerCertKey struct {
SelfSignedCertKey
}
var _ asset.WritableAsset = (*KubeAPIServerToKubeletSignerCertKey)(nil)
// Dependencies returns the dependency of the root-ca, which is empty.
func (c *KubeAPIServerToKubeletSignerCertKey) Dependencies() []asset.Asset {
return []asset.Asset{}
}
// Generate generates the root-ca key and cert pair.
func (c *KubeAPIServerToKubeletSignerCertKey) Generate(parents asset.Parents) error {
cfg := &CertCfg{
Subject: pkix.Name{CommonName: "kube-apiserver-to-kubelet-signer", OrganizationalUnit: []string{"openshift"}},
KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
Validity: ValidityOneYear,
IsCA: true,
}
return c.SelfSignedCertKey.Generate(cfg, "kube-apiserver-to-kubelet-signer")
}
// Name returns the human-friendly name of the asset.
func (c *KubeAPIServerToKubeletSignerCertKey) Name() string {
return "Certificate (kube-apiserver-to-kubelet-signer)"
}
// KubeAPIServerToKubeletCABundle is the asset the generates the kube-apiserver-to-kubelet-ca-bundle,
// which contains all the individual client CAs.
type KubeAPIServerToKubeletCABundle struct {
CertBundle
}
var _ asset.Asset = (*KubeAPIServerToKubeletCABundle)(nil)
// Dependencies returns the dependency of the cert bundle.
func (a *KubeAPIServerToKubeletCABundle) Dependencies() []asset.Asset {
return []asset.Asset{
&KubeAPIServerToKubeletSignerCertKey{},
}
}
// Generate generates the cert bundle based on its dependencies.
func (a *KubeAPIServerToKubeletCABundle) Generate(deps asset.Parents) error {
var certs []CertInterface
for _, asset := range a.Dependencies() {
deps.Get(asset)
certs = append(certs, asset.(CertInterface))
}
return a.CertBundle.Generate("kube-apiserver-to-kubelet-ca-bundle", certs...)
}
// Name returns the human-friendly name of the asset.
func (a *KubeAPIServerToKubeletCABundle) Name() string {
return "Certificate (kube-apiserver-to-kubelet-ca-bundle)"
}
// KubeAPIServerToKubeletClientCertKey is the asset that generates the kube-apiserver to kubelet client key/cert pair.
type KubeAPIServerToKubeletClientCertKey struct {
SignedCertKey
}
var _ asset.Asset = (*KubeAPIServerToKubeletClientCertKey)(nil)
// Dependencies returns the dependency of the the cert/key pair
func (a *KubeAPIServerToKubeletClientCertKey) Dependencies() []asset.Asset {
return []asset.Asset{
&KubeAPIServerToKubeletSignerCertKey{},
}
}
// Generate generates the cert/key pair based on its dependencies.
func (a *KubeAPIServerToKubeletClientCertKey) Generate(dependencies asset.Parents) error {
ca := &KubeAPIServerToKubeletSignerCertKey{}
dependencies.Get(ca)
cfg := &CertCfg{
Subject: pkix.Name{CommonName: "system:kube-apiserver", Organization: []string{"kube-master"}},
KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
Validity: ValidityOneYear,
}
return a.SignedCertKey.Generate(cfg, ca, "kube-apiserver-to-kubelet-client", DoNotAppendParent)
}
// Name returns the human-friendly name of the asset.
func (a *KubeAPIServerToKubeletClientCertKey) Name() string {
return "Certificate (kube-apiserver-to-kubelet-client)"
}
// KubeAPIServerLocalhostSignerCertKey is a key/cert pair that signs the kube-apiserver server cert for SNI localhost.
type KubeAPIServerLocalhostSignerCertKey struct {
SelfSignedCertKey
}
var _ asset.WritableAsset = (*KubeAPIServerLocalhostSignerCertKey)(nil)
// Dependencies returns the dependency of the root-ca, which is empty.
func (c *KubeAPIServerLocalhostSignerCertKey) Dependencies() []asset.Asset {
return []asset.Asset{}
}
// Generate generates the root-ca key and cert pair.
func (c *KubeAPIServerLocalhostSignerCertKey) Generate(parents asset.Parents) error {
cfg := &CertCfg{
Subject: pkix.Name{CommonName: "kube-apiserver-localhost-signer", OrganizationalUnit: []string{"openshift"}},
KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
Validity: ValidityTenYears,
IsCA: true,
}
return c.SelfSignedCertKey.Generate(cfg, "kube-apiserver-localhost-signer")
}
// Load reads the asset files from disk.
func (c *KubeAPIServerLocalhostSignerCertKey) Load(f asset.FileFetcher) (bool, error) {
return c.loadCertKey(f, "kube-apiserver-localhost-signer")
}
// Name returns the human-friendly name of the asset.
func (c *KubeAPIServerLocalhostSignerCertKey) Name() string {
return "Certificate (kube-apiserver-localhost-signer)"
}
// KubeAPIServerLocalhostCABundle is the asset the generates the kube-apiserver-localhost-ca-bundle,
// which contains all the individual client CAs.
type KubeAPIServerLocalhostCABundle struct {
CertBundle
}
var _ asset.Asset = (*KubeAPIServerLocalhostCABundle)(nil)
// Dependencies returns the dependency of the cert bundle.
func (a *KubeAPIServerLocalhostCABundle) Dependencies() []asset.Asset {
return []asset.Asset{
&KubeAPIServerLocalhostSignerCertKey{},
}
}
// Generate generates the cert bundle based on its dependencies.
func (a *KubeAPIServerLocalhostCABundle) Generate(deps asset.Parents) error {
var certs []CertInterface
for _, asset := range a.Dependencies() {
deps.Get(asset)
certs = append(certs, asset.(CertInterface))
}
return a.CertBundle.Generate("kube-apiserver-localhost-ca-bundle", certs...)
}
// Name returns the human-friendly name of the asset.
func (a *KubeAPIServerLocalhostCABundle) Name() string {
return "Certificate (kube-apiserver-localhost-ca-bundle)"
}
// KubeAPIServerLocalhostServerCertKey is the asset that generates the kube-apiserver serving key/cert pair for SNI localhost.
type KubeAPIServerLocalhostServerCertKey struct {
SignedCertKey
}
var _ asset.Asset = (*KubeAPIServerLocalhostServerCertKey)(nil)
// Dependencies returns the dependency of the the cert/key pair
func (a *KubeAPIServerLocalhostServerCertKey) Dependencies() []asset.Asset {
return []asset.Asset{
&KubeAPIServerLocalhostSignerCertKey{},
}
}
// Generate generates the cert/key pair based on its dependencies.
func (a *KubeAPIServerLocalhostServerCertKey) Generate(dependencies asset.Parents) error {
ca := &KubeAPIServerLocalhostSignerCertKey{}
dependencies.Get(ca)
cfg := &CertCfg{
Subject: pkix.Name{CommonName: "system:kube-apiserver", Organization: []string{"kube-master"}},
KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
Validity: ValidityOneDay,
DNSNames: []string{
"localhost",
},
IPAddresses: []net.IP{net.ParseIP("127.0.0.1"), net.ParseIP("::1")},
}
return a.SignedCertKey.Generate(cfg, ca, "kube-apiserver-localhost-server", AppendParent)
}
// Name returns the human-friendly name of the asset.
func (a *KubeAPIServerLocalhostServerCertKey) Name() string {
return "Certificate (kube-apiserver-localhost-server)"
}
// KubeAPIServerServiceNetworkSignerCertKey is a key/cert pair that signs the kube-apiserver server cert for SNI service network.
type KubeAPIServerServiceNetworkSignerCertKey struct {
SelfSignedCertKey
}
var _ asset.WritableAsset = (*KubeAPIServerServiceNetworkSignerCertKey)(nil)
// Dependencies returns the dependency of the root-ca, which is empty.
func (c *KubeAPIServerServiceNetworkSignerCertKey) Dependencies() []asset.Asset {
return []asset.Asset{}
}
// Generate generates the root-ca key and cert pair.
func (c *KubeAPIServerServiceNetworkSignerCertKey) Generate(parents asset.Parents) error {
cfg := &CertCfg{
Subject: pkix.Name{CommonName: "kube-apiserver-service-network-signer", OrganizationalUnit: []string{"openshift"}},
KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
Validity: ValidityTenYears,
IsCA: true,
}
return c.SelfSignedCertKey.Generate(cfg, "kube-apiserver-service-network-signer")
}
// Load reads the asset files from disk.
func (c *KubeAPIServerServiceNetworkSignerCertKey) Load(f asset.FileFetcher) (bool, error) {
return c.loadCertKey(f, "kube-apiserver-service-network-signer")
}
// Name returns the human-friendly name of the asset.
func (c *KubeAPIServerServiceNetworkSignerCertKey) Name() string {
return "Certificate (kube-apiserver-service-network-signer)"
}
// KubeAPIServerServiceNetworkCABundle is the asset the generates the kube-apiserver-service-network-ca-bundle,
// which contains all the individual client CAs.
type KubeAPIServerServiceNetworkCABundle struct {
CertBundle
}
var _ asset.Asset = (*KubeAPIServerServiceNetworkCABundle)(nil)
// Dependencies returns the dependency of the cert bundle.
func (a *KubeAPIServerServiceNetworkCABundle) Dependencies() []asset.Asset {
return []asset.Asset{
&KubeAPIServerServiceNetworkSignerCertKey{},
}
}
// Generate generates the cert bundle based on its dependencies.
func (a *KubeAPIServerServiceNetworkCABundle) Generate(deps asset.Parents) error {
var certs []CertInterface
for _, asset := range a.Dependencies() {
deps.Get(asset)
certs = append(certs, asset.(CertInterface))
}
return a.CertBundle.Generate("kube-apiserver-service-network-ca-bundle", certs...)
}
// Name returns the human-friendly name of the asset.
func (a *KubeAPIServerServiceNetworkCABundle) Name() string {
return "Certificate (kube-apiserver-service-network-ca-bundle)"
}
// KubeAPIServerServiceNetworkServerCertKey is the asset that generates the kube-apiserver serving key/cert pair for SNI service network.
type KubeAPIServerServiceNetworkServerCertKey struct {
SignedCertKey
}
var _ asset.Asset = (*KubeAPIServerServiceNetworkServerCertKey)(nil)
// Dependencies returns the dependency of the the cert/key pair
func (a *KubeAPIServerServiceNetworkServerCertKey) Dependencies() []asset.Asset {
return []asset.Asset{
&KubeAPIServerServiceNetworkSignerCertKey{},
&installconfig.InstallConfig{},
}
}
// Generate generates the cert/key pair based on its dependencies.
func (a *KubeAPIServerServiceNetworkServerCertKey) Generate(dependencies asset.Parents) error {
ca := &KubeAPIServerServiceNetworkSignerCertKey{}
installConfig := &installconfig.InstallConfig{}
dependencies.Get(ca, installConfig)
serviceAddress, err := cidrhost(installConfig.Config.Networking.ServiceNetwork[0].IPNet, 1)
if err != nil {
return errors.Wrap(err, "failed to get service address for kube-apiserver from InstallConfig")
}
cfg := &CertCfg{
Subject: pkix.Name{CommonName: "system:kube-apiserver", Organization: []string{"kube-master"}},
KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
Validity: ValidityOneDay,
DNSNames: []string{
"kubernetes", "kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster.local",
"openshift", "openshift.default",
"openshift.default.svc",
"openshift.default.svc.cluster.local",
},
IPAddresses: []net.IP{net.ParseIP(serviceAddress)},
}
return a.SignedCertKey.Generate(cfg, ca, "kube-apiserver-service-network-server", AppendParent)
}
// Name returns the human-friendly name of the asset.
func (a *KubeAPIServerServiceNetworkServerCertKey) Name() string {
return "Certificate (kube-apiserver-service-network-server)"
}
// KubeAPIServerLBSignerCertKey is a key/cert pair that signs the kube-apiserver server cert for SNI load balancer.
type KubeAPIServerLBSignerCertKey struct {
SelfSignedCertKey
}
var _ asset.WritableAsset = (*KubeAPIServerLBSignerCertKey)(nil)
// Dependencies returns the dependency of the root-ca, which is empty.
func (c *KubeAPIServerLBSignerCertKey) Dependencies() []asset.Asset {
return []asset.Asset{}
}
// Generate generates the root-ca key and cert pair.
func (c *KubeAPIServerLBSignerCertKey) Generate(parents asset.Parents) error {
cfg := &CertCfg{
Subject: pkix.Name{CommonName: "kube-apiserver-lb-signer", OrganizationalUnit: []string{"openshift"}},
KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
Validity: ValidityTenYears,
IsCA: true,
}
return c.SelfSignedCertKey.Generate(cfg, "kube-apiserver-lb-signer")
}
// Load reads the asset files from disk.
func (c *KubeAPIServerLBSignerCertKey) Load(f asset.FileFetcher) (bool, error) {
return c.loadCertKey(f, "kube-apiserver-lb-signer")
}
// Name returns the human-friendly name of the asset.
func (c *KubeAPIServerLBSignerCertKey) Name() string {
return "Certificate (kube-apiserver-lb-signer)"
}
// KubeAPIServerLBCABundle is the asset the generates the kube-apiserver-lb-ca-bundle,
// which contains all the individual client CAs.
type KubeAPIServerLBCABundle struct {
CertBundle
}
var _ asset.Asset = (*KubeAPIServerLBCABundle)(nil)
// Dependencies returns the dependency of the cert bundle.
func (a *KubeAPIServerLBCABundle) Dependencies() []asset.Asset {
return []asset.Asset{
&KubeAPIServerLBSignerCertKey{},
}
}
// Generate generates the cert bundle based on its dependencies.
func (a *KubeAPIServerLBCABundle) Generate(deps asset.Parents) error {
var certs []CertInterface
for _, asset := range a.Dependencies() {
deps.Get(asset)
certs = append(certs, asset.(CertInterface))
}
return a.CertBundle.Generate("kube-apiserver-lb-ca-bundle", certs...)
}
// Name returns the human-friendly name of the asset.
func (a *KubeAPIServerLBCABundle) Name() string {
return "Certificate (kube-apiserver-lb-ca-bundle)"
}
// KubeAPIServerExternalLBServerCertKey is the asset that generates the kube-apiserver serving key/cert pair for SNI external load balancer.
type KubeAPIServerExternalLBServerCertKey struct {
SignedCertKey
}
var _ asset.Asset = (*KubeAPIServerExternalLBServerCertKey)(nil)
// Dependencies returns the dependency of the the cert/key pair
func (a *KubeAPIServerExternalLBServerCertKey) Dependencies() []asset.Asset {
return []asset.Asset{
&KubeAPIServerLBSignerCertKey{},
&installconfig.InstallConfig{},
}
}
// Generate generates the cert/key pair based on its dependencies.
func (a *KubeAPIServerExternalLBServerCertKey) Generate(dependencies asset.Parents) error {
ca := &KubeAPIServerLBSignerCertKey{}
installConfig := &installconfig.InstallConfig{}
dependencies.Get(ca, installConfig)
cfg := &CertCfg{
Subject: pkix.Name{CommonName: "system:kube-apiserver", Organization: []string{"kube-master"}},
KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
Validity: ValidityOneDay,
DNSNames: []string{
apiAddress(installConfig.Config),
},
}
return a.SignedCertKey.Generate(cfg, ca, "kube-apiserver-lb-server", AppendParent)
}
// Name returns the human-friendly name of the asset.
func (a *KubeAPIServerExternalLBServerCertKey) Name() string {
return "Certificate (kube-apiserver-external-lb-server)"
}
// KubeAPIServerInternalLBServerCertKey is the asset that generates the kube-apiserver serving key/cert pair for SNI internal load balancer.
type KubeAPIServerInternalLBServerCertKey struct {
SignedCertKey
}
var _ asset.Asset = (*KubeAPIServerInternalLBServerCertKey)(nil)
// Dependencies returns the dependency of the the cert/key pair
func (a *KubeAPIServerInternalLBServerCertKey) Dependencies() []asset.Asset {
return []asset.Asset{
&KubeAPIServerLBSignerCertKey{},
&installconfig.InstallConfig{},
}
}
// Generate generates the cert/key pair based on its dependencies.
func (a *KubeAPIServerInternalLBServerCertKey) Generate(dependencies asset.Parents) error {
ca := &KubeAPIServerLBSignerCertKey{}
installConfig := &installconfig.InstallConfig{}
dependencies.Get(ca, installConfig)
cfg := &CertCfg{
Subject: pkix.Name{CommonName: "system:kube-apiserver", Organization: []string{"kube-master"}},
KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
Validity: ValidityOneDay,
DNSNames: []string{
internalAPIAddress(installConfig.Config),
},
}
return a.SignedCertKey.Generate(cfg, ca, "kube-apiserver-internal-lb-server", AppendParent)
}
// Name returns the human-friendly name of the asset.
func (a *KubeAPIServerInternalLBServerCertKey) Name() string {
return "Certificate (kube-apiserver-internal-lb-server)"
}
// KubeAPIServerCompleteCABundle is the asset the generates the kube-apiserver-complete-server-ca-bundle,
// which contains all the certs that are valid to confirm the kube-apiserver identity.
type KubeAPIServerCompleteCABundle struct {
CertBundle
}
var _ asset.Asset = (*KubeAPIServerCompleteCABundle)(nil)
// Dependencies returns the dependency of the cert bundle.
func (a *KubeAPIServerCompleteCABundle) Dependencies() []asset.Asset {
return []asset.Asset{
&KubeAPIServerLocalhostCABundle{},
&KubeAPIServerServiceNetworkCABundle{},
&KubeAPIServerLBCABundle{},
}
}
// Generate generates the cert bundle based on its dependencies.
func (a *KubeAPIServerCompleteCABundle) Generate(deps asset.Parents) error {
var certs []CertInterface
for _, asset := range a.Dependencies() {
deps.Get(asset)
certs = append(certs, asset.(CertInterface))
}
return a.CertBundle.Generate("kube-apiserver-complete-server-ca-bundle", certs...)
}
// Name returns the human-friendly name of the asset.
func (a *KubeAPIServerCompleteCABundle) Name() string {
return "Certificate (kube-apiserver-complete-server-ca-bundle)"
}
// KubeAPIServerCompleteClientCABundle is the asset the generates the kube-apiserver-complete-client-ca-bundle,
// which contains all the certs that are valid for the kube-apiserver to trust for clients.
type KubeAPIServerCompleteClientCABundle struct {
CertBundle
}
var _ asset.Asset = (*KubeAPIServerCompleteClientCABundle)(nil)
// Dependencies returns the dependency of the cert bundle.
func (a *KubeAPIServerCompleteClientCABundle) Dependencies() []asset.Asset {
return []asset.Asset{
&AdminKubeConfigCABundle{}, // admin.kubeconfig
&KubeletClientCABundle{}, // signed kubelet certs
&KubeControlPlaneCABundle{}, // controller-manager, scheduler
&KubeAPIServerToKubeletCABundle{}, // kube-apiserver to kubelet (kubelet piggy-backs on KAS client-ca)
&KubeletBootstrapCABundle{}, // used to create the kubelet kubeconfig files that are used to create CSRs
}
}
// Generate generates the cert bundle based on its dependencies.
func (a *KubeAPIServerCompleteClientCABundle) Generate(deps asset.Parents) error {
var certs []CertInterface
for _, asset := range a.Dependencies() {
deps.Get(asset)
certs = append(certs, asset.(CertInterface))
}
return a.CertBundle.Generate("kube-apiserver-complete-client-ca-bundle", certs...)
}
// Name returns the human-friendly name of the asset.
func (a *KubeAPIServerCompleteClientCABundle) Name() string {
return "Certificate (kube-apiserver-complete-client-ca-bundle)"
}
|
package main
import (
"fmt"
"log"
"os"
"strconv"
"strings"
"github.com/joho/godotenv"
"github.com/syndtr/goleveldb/leveldb"
tgbotapi "gopkg.in/telegram-bot-api.v1"
)
// Bot create bot with few useful methods
type Bot struct {
API *tgbotapi.BotAPI
Subscriptions []*Subscription
DB *leveldb.DB
}
// NewBot creates new Telegram Bot and return pointer to it or error
func NewBot() (*Bot, error) {
// Load env
err := godotenv.Load()
if err != nil {
log.Printf(".env file is not found; %v", err)
return nil, err
}
// Load DB
db, err := leveldb.OpenFile("./.subscriptions.db", nil)
if err != nil {
log.Printf("cannot create DB file in ./subscriptions.db; %v", err)
return nil, err
}
bot, err := tgbotapi.NewBotAPI(os.Getenv("TELEGRAM_BOT_API"))
if err != nil {
log.Printf("cannot create Bot API; token: %v", err)
return nil, err
}
appURL := fmt.Sprintf("%s/webhook/%s", os.Getenv("APP_URL"), bot.Token)
bot.SetWebhook(tgbotapi.NewWebhook(appURL))
tgbot := Bot{
API: bot,
Subscriptions: make([]*Subscription, 0),
DB: db,
}
err = tgbot.restoreSubscriptions()
if err != nil {
log.Printf("cannot restore subscriptions; %v", err)
return nil, err
}
return &tgbot, nil
}
func (b *Bot) restoreSubscriptions() error {
iter := b.DB.NewIterator(nil, nil)
for iter.Next() {
sr := string(iter.Key())
chats := strings.Split(string(iter.Value()), ", ")
for _, chat := range chats {
chatID, _ := strconv.Atoi(chat)
b.Subscribe(chatID, sr)
}
}
iter.Release()
err := iter.Error()
if err != nil {
return err
}
return nil
}
// Listen telegram API updates
func (b *Bot) Listen() {
defer b.DB.Close()
updates, _ := b.API.ListenForWebhook("/webhook/" + b.API.Token)
for update := range updates {
b.parseCommand(update)
}
}
func (b *Bot) parseCommand(update tgbotapi.Update) {
msg := update.Message
cmd := msg.Command()
chat := update.Message.Chat.ID
if cmd != "" {
switch cmd {
// Subscribe
case "/subscribe":
err := b.Subscribe(chat, msg.CommandArguments())
if err != nil {
b.SendMessage(chat, err.Error())
} else {
b.SendMessage(chat, fmt.Sprintf("You successfully subscribed to: %s", msg.CommandArguments()))
}
// Unsubscribe
case "/unsubscribe":
b.Unsubscribe(chat, msg.CommandArguments())
b.SendMessage(chat, fmt.Sprintf("You successfully unsubscribed from: %s", msg.CommandArguments()))
// Start
case "/start":
b.SendMessage(chat, `
Hi there! List of available commands:
/subscribe <subreddit-name> ex. /subscribe PS4Deals
/unsubscribe <subreddit-name>
Bot is checking every minute for new updates in subreddits you subscribed to and
send them to you.
`)
}
}
}
// Subscribe chat to subreddit
func (b *Bot) Subscribe(chat int, sr string) error {
exists := -1
for i, sub := range b.Subscriptions {
if sub.Name == sr {
exists = i
break
}
}
if exists != -1 {
b.Subscriptions[exists].Subscribe(b, chat)
} else {
sub, err := NewSubscription(sr, b.DB)
if err != nil {
return err
}
sub.Subscribe(b, chat)
go sub.Monitor(b)
b.Subscriptions = append(b.Subscriptions, sub)
}
return nil
}
// Unsubscribe chat from subreddit
func (b *Bot) Unsubscribe(chat int, sr string) {
in := -1
for i, sub := range b.Subscriptions {
if sr == sub.Name {
in = i
}
}
if in != -1 {
b.Subscriptions[in].Unsubscribe(chat)
}
}
// SendMessage to chat
func (b *Bot) SendMessage(chat int, msg string) {
b.API.Send(tgbotapi.NewMessage(chat, msg))
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ucadata
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestHangulJamoHasOnlyOneWeight(t *testing.T) {
for i := 0x1100; i < 0x11FF; i++ {
require.Equal(t, uint64(0), DUCET0900Table.MapTable4[rune(i)]&0xFFFFFFFFFFFF0000)
}
}
func TestFirstIsNotZero(t *testing.T) {
// the logic depends on the fact that at least one of the first uint16 is not 0
for _, weights := range DUCET0900Table.LongRuneMap {
require.NotEqual(t, weights[0], 0)
}
}
|
package saucecloud
import (
"fmt"
"strings"
"github.com/saucelabs/saucectl/internal/job"
"github.com/saucelabs/saucectl/internal/testcafe"
)
// TestcafeRunner represents the SauceLabs cloud implementation
type TestcafeRunner struct {
CloudRunner
Project testcafe.Project
}
// RunProject runs the defined tests on sauce cloud
func (r *TestcafeRunner) RunProject() (int, error) {
exitCode := 1
if err := r.validateTunnel(r.Project.Sauce.Tunnel.ID); err != nil {
return 1, err
}
if r.Project.DryRun {
if err := r.dryRun(r.Project, r.Project.RootDir, r.Project.Sauce.Sauceignore, r.getSuiteNames()); err != nil {
return exitCode, err
}
return 0, nil
}
fileID, err := r.archiveAndUpload(r.Project, r.Project.RootDir, r.Project.Sauce.Sauceignore)
if err != nil {
return exitCode, err
}
passed := r.runSuites(fileID)
if passed {
return 0, nil
}
return exitCode, nil
}
func (r *TestcafeRunner) getSuiteNames() string {
var names []string
for _, s := range r.Project.Suites {
names = append(names, s.Name)
}
return strings.Join(names, ", ")
}
func (r *TestcafeRunner) runSuites(fileID string) bool {
sigChan := r.registerSkipSuitesOnSignal()
defer unregisterSignalCapture(sigChan)
jobOpts, results, err := r.createWorkerPool(r.Project.Sauce.Concurrency)
if err != nil {
return false
}
defer close(results)
// Submit suites to work on
jobsCount := r.calcTestcafeJobsCount(r.Project.Suites)
go func() {
for _, s := range r.Project.Suites {
if len(s.Simulators) > 0 {
for _, d := range s.Simulators {
for _, pv := range d.PlatformVersions {
jobOpts <- job.StartOptions{
ConfigFilePath: r.Project.ConfigFilePath,
DisplayName: s.Name,
App: fmt.Sprintf("storage:%s", fileID),
Suite: s.Name,
Framework: "testcafe",
FrameworkVersion: r.Project.Testcafe.Version,
BrowserName: s.BrowserName,
BrowserVersion: s.BrowserVersion,
PlatformName: d.PlatformName,
PlatformVersion: pv,
DeviceName: d.Name,
Name: s.Name,
Build: r.Project.Sauce.Metadata.Build,
Tags: r.Project.Sauce.Metadata.Tags,
Tunnel: job.TunnelOptions{
ID: r.Project.Sauce.Tunnel.ID,
Parent: r.Project.Sauce.Tunnel.Parent,
},
ScreenResolution: s.ScreenResolution,
RunnerVersion: r.Project.RunnerVersion,
Experiments: r.Project.Sauce.Experiments,
}
}
}
} else {
jobOpts <- job.StartOptions{
ConfigFilePath: r.Project.ConfigFilePath,
DisplayName: s.Name,
App: fmt.Sprintf("storage:%s", fileID),
Suite: s.Name,
Framework: "testcafe",
FrameworkVersion: r.Project.Testcafe.Version,
BrowserName: s.BrowserName,
BrowserVersion: s.BrowserVersion,
PlatformName: s.PlatformName,
Name: s.Name,
Build: r.Project.Sauce.Metadata.Build,
Tags: r.Project.Sauce.Metadata.Tags,
Tunnel: job.TunnelOptions{
ID: r.Project.Sauce.Tunnel.ID,
Parent: r.Project.Sauce.Tunnel.Parent,
},
ScreenResolution: s.ScreenResolution,
RunnerVersion: r.Project.RunnerVersion,
Experiments: r.Project.Sauce.Experiments,
}
}
}
close(jobOpts)
}()
return r.collectResults(r.Project.Artifacts.Download, results, jobsCount)
}
func (r *TestcafeRunner) calcTestcafeJobsCount(suites []testcafe.Suite) int {
jobsCount := 0
for _, s := range suites {
if len(s.Simulators) > 0 {
for _, d := range s.Simulators {
jobsCount += len(d.PlatformVersions)
}
} else {
jobsCount++
}
}
return jobsCount
}
|
package dbft
const (
TABLE_POS_VOTE = "pos_vote"
TABLE_POS_ASSET = "pos_asset"
TABLE_EPOCH_INFO = "epoch_info"
POS_VOTE_VOTE_ID = "vote_id"
POS_VOTE_TXID = "txid"
POS_VOTE_ACCOUNT_ID = "account_id"
POS_VOTE_PEERID = "peerid"
POS_VOTING_POWER = "voting_power"
POS_VOTE_EPOCH = "vote_epoch"
POS_ASSET_ACCOUNT_ID = "account_id"
POS_ASSET_BALANCE = "balance"
POS_ASSET_FROZEN_AMOUNT = "frozen_amount"
POS_ASSET_AVAILABLE_AMOUNT = "available_amount"
POS_EPOCH_NO = "epoch_no"
POS_DEPEND_EPOCH_NO = "depend_epoch_no"
POS_BEGIN_NUM = "begin_num"
POS_END_NUM = "end_num"
POS_TARGET_BLK_NUM = "target_blk_num"
POS_WITNESSLIST = "witness_list"
INTIME = "intime"
)
|
package planet
import (
"upper.io/db"
)
// Get Feed Service
func GetFeedService(db db.Database) Service {
return &BasicService{
Db: db,
name: "feed",
idName: "id",
}
}
// Get Feed Item Service
func GetFeedItemService(db db.Database) Service {
return &BasicService{
Db: db,
name: "item",
idName: "id",
}
}
// basic service
type BasicService struct {
Db db.Database
name string
idName string
}
// Generate key condition
func (s *BasicService) KeyCond(k KeyPtr) db.Cond {
return db.Cond{
s.idName: k,
}
}
// Get collection
func (s *BasicService) Collection() (db.Collection, error) {
return s.Db.Collection(s.name)
}
// Create entity
func (s *BasicService) Create(p EntityPtr) (err error) {
coll, err := s.Collection()
if err != nil {
return
}
_, err = coll.Append(p)
return
}
// Retrieve entity
func (s *BasicService) List(c CondPtr, l ListPtr) (err error) {
coll, err := s.Collection()
if err != nil {
return
}
res := coll.Find(c)
defer res.Close()
err = res.All(l)
return
}
// Retrieve entity
func (s *BasicService) Retrieve(k KeyPtr, p EntityPtr) (err error) {
coll, err := s.Collection()
if err != nil {
return
}
res := coll.Find(s.KeyCond(k))
defer res.Close()
err = res.One(p)
return
}
// Update entity
func (s *BasicService) Update(k KeyPtr, p EntityPtr) (err error) {
coll, err := s.Collection()
if err != nil {
return
}
res := coll.Find(s.KeyCond(k))
defer res.Close()
err = res.Update(p)
return
}
// Delete entity
func (s *BasicService) Delete(k KeyPtr) (err error) {
coll, err := s.Collection()
if err != nil {
return
}
res := coll.Find(s.KeyCond(k))
defer res.Close()
err = res.Remove()
return
}
|
package main
import (
"encoding/json"
"errors"
"fmt"
"io"
"os"
"github.com/awalterschulze/gographviz"
address "github.com/hashicorp/go-terraform-address"
tfjson "github.com/hashicorp/terraform-json"
)
func newPlan(planInput io.Reader) (*tfjson.Plan, error) {
parsed := &tfjson.Plan{}
dec := json.NewDecoder(planInput)
dec.DisallowUnknownFields()
if err := dec.Decode(parsed); err != nil {
return nil, err
}
return parsed, nil
}
func newState(planInput io.Reader) (*tfjson.State, error) {
parsed := &tfjson.State{}
dec := json.NewDecoder(planInput)
dec.DisallowUnknownFields()
if err := dec.Decode(parsed); err != nil {
return nil, err
}
return parsed, nil
}
func usage() {
fmt.Fprint(os.Stderr, "usage: tf2json <plan.json|state.json>")
}
func openPlanOrState(fName string) (interface{}, error) {
f, err := os.Open(fName)
if err != nil {
return nil, fmt.Errorf("could not file file: %w", err)
}
plan, err := newPlan(f)
if err != nil {
return nil, fmt.Errorf("error making plan: %w", err)
}
if plan.PlannedValues != nil && plan.PriorState != nil && plan.Config != nil {
return plan, nil
}
if _, err := f.Seek(0, 0); err != nil {
return nil, err
}
state, err := newState(f)
if err != nil {
return nil, fmt.Errorf("error making state: %w", err)
}
if state.Values != nil {
return state, nil
}
return nil, nil
}
func main() {
if len(os.Args) != 2 {
usage()
os.Exit(1)
}
inFile := os.Args[1]
err := realMain(inFile)
if err != nil {
fmt.Fprint(os.Stderr, err.Error())
os.Exit(1)
}
}
func realMain(inFile string) error {
i, err := openPlanOrState(inFile)
if err != nil {
return err
}
graph := gographviz.NewEscape()
graph.SetDir(true)
graph.SetName("G")
graph.SetStrict(true)
graph.AddAttr("G", "rankdir", "LR")
graph.AddAttr("G", "newrank", "true")
graph.AddAttr("G", "compoun", "true")
gv := Graph{graph}
switch ps := i.(type) {
case *tfjson.Plan:
if err := gv.Plan(ps); err != nil {
return err
}
case *tfjson.State:
if err := gv.State(ps); err != nil {
return err
}
default:
return errors.New("couldn't detect file type")
}
output := graph.String()
fmt.Println(output)
return nil
}
func (g *Graph) State(state *tfjson.State) error {
g.gv.AddSubGraph("G", "state", nil)
g.Walk("state", state.Values.RootModule)
return nil
}
func (g *Graph) Plan(plan *tfjson.Plan) error {
g.gv.AddSubGraph("G", "planned", nil)
g.gv.AddSubGraph("G", "prior", nil)
g.Walk("planned", plan.PlannedValues.RootModule)
g.Walk("prior", plan.PriorState.Values.RootModule)
return nil
}
type Graph struct {
gv gographviz.Interface
}
func (g *Graph) Walk(graphName string, m *tfjson.StateModule) string {
var maddr string
if m.Address == "" {
maddr = graphName
} else {
maddr = fmt.Sprintf("%s: %s", graphName, m.Address)
}
// add the module
g.gv.AddNode(graphName, maddr, nil)
for _, r := range m.Resources {
a, err := address.NewAddress(r.Address)
if err != nil {
panic(err)
}
label := map[string]string{
"label": a.ResourceSpec.String(),
"shape": "box",
}
rName := fmt.Sprintf("%s.%s", maddr, a.ResourceSpec.String())
if a.Mode == address.DataResourceMode {
label["color"] = "green"
} else {
label["color"] = "blue"
}
g.gv.AddNode(graphName, rName, label)
g.gv.AddEdge(maddr, rName, true, nil)
}
for _, c := range m.ChildModules {
p := g.Walk(graphName, c)
g.gv.AddEdge(maddr, p, true, nil)
}
return maddr
}
|
package auth
import (
"context"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider"
"github.com/gofor-little/xerror"
)
// ForgotPassword will initiate a forgot password request.
//
// - Use auth.ChangePassword and auth.ChangePasswordConfirm to update a user's password that doesn't require resetting.
//
// - Use auth.UpdateExpiredPassword if the user has a requirement for their password to be changed.
func ForgotPassword(ctx context.Context, emailAddress string) (*cognitoidentityprovider.ForgotPasswordOutput, error) {
output, err := CognitoClient.ForgotPassword(ctx, &cognitoidentityprovider.ForgotPasswordInput{
ClientId: aws.String(CognitoClientID),
Username: aws.String(emailAddress),
})
if err != nil {
return nil, xerror.Wrap("failed to send forgot password request", err)
}
return output, nil
}
// ForgotPasswordConfirm will confirm a forgot password request.
func ForgotPasswordConfirm(ctx context.Context, confirmationCode string, emailAddress string, newPassword string) error {
if _, err := CognitoClient.ConfirmForgotPassword(ctx, &cognitoidentityprovider.ConfirmForgotPasswordInput{
ClientId: aws.String(CognitoClientID),
ConfirmationCode: aws.String(confirmationCode),
Password: aws.String(newPassword),
Username: aws.String(emailAddress),
}); err != nil {
return xerror.Wrap("failed to send forgot password confirmation request", err)
}
return nil
}
|
package main
import "github.com/slawek87/GOstorageClient/example"
func main() {
example.Example()
}
|
package hquery
import (
"errors"
"strings"
"github.com/kirillrdy/nadeshiko/html"
"github.com/sparkymat/webdsl/css"
)
type Selection struct {
rootNode html.Node
selector css.Selector
}
func Select(node html.Node, selector css.Selector) Selection {
return Selection{rootNode: node, selector: selector}
}
func (sel Selection) First() (html.Node, error) {
if sel.matchesNode(sel.rootNode) {
return sel.rootNode, nil
}
return html.Node{}, errors.New("unable to find")
}
func (sel Selection) matchesNode(node html.Node) bool {
switch sel.selector.(type) {
case css.Class:
return nodeMatchesClass(node, sel.selector.(css.Class))
case css.Id:
return nodeMatchesId(node, sel.selector.(css.Id))
case css.SelectorChain:
return false
case css.SelectorList:
return nodeMatchesSelectorList(node, sel.selector.(css.SelectorList))
case css.SelectorChild:
return false
}
return false
}
func nodeMatchesSelectorList(node html.Node, list css.SelectorList) bool {
var presence bool
for _, selector := range list {
presence = presence || Select(node, selector).matchesNode(node)
}
return presence
}
func nodeMatchesClass(node html.Node, class css.Class) bool {
var classes []string
for _, attr := range node.Attributes {
if attr.Name == "class" {
classes = strings.Split(attr.Value, " ")
}
}
if classes != nil {
for _, currentClass := range classes {
if string(class) == currentClass {
return true
}
}
}
return false
}
func nodeMatchesId(node html.Node, id css.Id) bool {
var idToCheck string
for _, attr := range node.Attributes {
if attr.Name == "id" {
idToCheck = attr.Value
}
}
return (idToCheck == string(id))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.