text
stringlengths 11
4.05M
|
|---|
package main
import (
"github.com/therecipe/qt/core"
"github.com/therecipe/qt/gui"
)
type CustomTableModel struct {
core.QAbstractTableModel
_ func() `constructor:"init"`
m_data [][]float64
m_mapping map[string]*core.QRect
m_columnCount int
m_rowCount int
}
func (c *CustomTableModel) init() {
c.m_data = make([][]float64, 0)
c.m_mapping = make(map[string]*core.QRect)
c.m_columnCount = 4
c.m_rowCount = 15
for i := 0; i < c.m_rowCount; i++ {
dataVec := make([]float64, c.m_columnCount)
for k := 0; k < len(dataVec); k++ {
if k%2 == 0 {
dataVec[k] = float64(i*50) + core.QRandomGenerator_Global().Bounded(20)
} else {
dataVec[k] = core.QRandomGenerator_Global().Bounded(100)
}
}
c.m_data = append(c.m_data, dataVec)
}
c.ConnectRowCount(func(*core.QModelIndex) int { return len(c.m_data) })
c.ConnectColumnCount(func(*core.QModelIndex) int { return c.m_columnCount })
c.ConnectHeaderData(func(section int, orientation core.Qt__Orientation, role int) *core.QVariant {
if role != int(core.Qt__DisplayRole) {
return core.NewQVariant()
}
if orientation == core.Qt__Horizontal {
if section%2 == 0 {
return core.NewQVariant1("x")
}
return core.NewQVariant1("y")
}
return core.NewQVariant1(section + 1)
})
c.ConnectData(func(index *core.QModelIndex, role int) *core.QVariant {
if role == int(core.Qt__DisplayRole) || role == int(core.Qt__EditRole) {
return core.NewQVariant1(int(c.m_data[index.Row()][index.Column()]))
} else if role == int(core.Qt__BackgroundRole) {
for i, rect := range c.m_mapping {
if rect.Contains3(index.Column(), index.Row()) {
return core.NewQVariant1(gui.NewQColor6(i))
}
}
return core.NewQVariant1(gui.NewQColor2(core.Qt__white))
}
return core.NewQVariant()
})
c.ConnectSetData(func(index *core.QModelIndex, value *core.QVariant, role int) bool {
if index.IsValid() && role == int(core.Qt__EditRole) {
c.m_data[index.Row()][index.Column()] = value.ToDouble(nil)
c.DataChanged(index, index, nil)
return true
}
return false
})
c.ConnectFlags(func(index *core.QModelIndex) core.Qt__ItemFlag {
return c.FlagsDefault(index) | core.Qt__ItemIsEditable
})
}
func (c *CustomTableModel) clearMapping() { c.m_mapping = make(map[string]*core.QRect) }
func (c *CustomTableModel) addMapping(color string, area *core.QRect) { c.m_mapping[color] = area }
|
package upstream_notify
import (
"context"
"errors"
"github.com/tal-tech/go-zero/core/logx"
"tpay_backend/model"
"tpay_backend/payapi/internal/logic"
"tpay_backend/payapi/internal/svc"
"tpay_backend/utils"
)
type SyncOrder struct {
logx.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
func NewSyncOrder(ctx context.Context, svcCtx *svc.ServiceContext) *SyncOrder {
return &SyncOrder{
Logger: logx.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *SyncOrder) SyncPayOrder(order *model.PayOrder, orderStatus int64, failReason string) error {
// 1.查询商户
merchant, err := model.NewMerchantModel(l.svcCtx.DbEngine).FindOneByMerchantNo(order.MerchantNo)
if err != nil {
l.Errorf("查询订单商户信息失败, MerchantNo:%v, err:%v", order.MerchantNo, err)
return errors.New("系统内部错误")
}
l.Infof("商户信息:%+v", merchant)
var upErr error
payLogic := logic.NewPayLogic(context.TODO(), l.svcCtx, merchant)
switch orderStatus {
case model.PayOrderStatusPaid:
// 1.判断实际支付金额是否与请求上游金额一致
if order.UpstreamAmount != order.PaymentAmount {
l.Errorf("代收订单[%v]上游请求金额与实际支付金额不一致, reqAmount:%v, payAmount:%v", order.OrderNo, order.UpstreamAmount, order.PaymentAmount)
// 计算手续费
order.MerchantFee = utils.CalculatePayOrderFeeMerchant(order.PaymentAmount, order.MerchantSingleFee, order.MerchantRate)
// 账户增加的金额=订单请求金额-商户手续费
order.IncreaseAmount = order.PaymentAmount - order.MerchantFee
}
upErr = payLogic.UpdateOrderPaid(order)
case model.PayOrderStatusFail:
upErr = payLogic.UpdateOrderFail(order, failReason)
default:
l.Errorf("未知的订单状态, order_status:%v", orderStatus)
return errors.New("订单状态不对")
}
if upErr != nil {
l.Errorf("修改订单状态和商户余额失败, orderNo:%v, MerchantNo:%v, err:%v", order.OrderNo, order.MerchantNo, err)
return errors.New("系统内部错误")
}
return nil
}
func (l *SyncOrder) SyncTransferOrder(order *model.TransferOrder, orderStatus int64, failReason string) error {
// 1.查询商户
merchant, err := model.NewMerchantModel(l.svcCtx.DbEngine).FindOneByMerchantNo(order.MerchantNo)
if err != nil {
l.Errorf("查询订单商户信息失败, MerchantNo:%v, err:%v", order.MerchantNo, err)
return errors.New("系统内部错误")
}
l.Infof("商户信息:%+v", merchant)
var upErr error
transferOrder := logic.NewTransferPlaceOrder(context.TODO(), l.svcCtx, merchant)
switch orderStatus {
case model.TransferOrderStatusPaid:
upErr = transferOrder.UpdateOrderPaid(order)
case model.TransferOrderStatusFail:
upErr = transferOrder.UpdateOrderFail(order, failReason)
default:
l.Errorf("未知的订单状态, order_status:%v", orderStatus)
return errors.New("订单状态不对")
}
if upErr != nil {
l.Errorf("修改订单状态和商户余额失败, orderNo:%v, MerchantNo:%v, err:%v", order.OrderNo, order.MerchantNo, err)
return errors.New("系统内部错误")
}
return nil
}
|
package atlas
import "testing"
type TestParams struct {
Files []string
Params *GenerateParams
}
type TestWant struct {
NumFiles, NumAtlases int
}
func TestGenerate(t *testing.T) {
OUTPUT_DIR := "./output"
BUTTONS := []string{
"./fixtures/button.png",
"./fixtures/button_active.png",
"./fixtures/button_hover.png",
}
cases := []struct {
in TestParams
want TestWant
}{
{TestParams{
Files: BUTTONS,
Params: nil,
}, TestWant{
NumFiles: len(BUTTONS),
NumAtlases: 1,
}},
{TestParams{
Files: BUTTONS,
Params: &GenerateParams{
Name: "test-maxsize",
MaxWidth: 124,
MaxHeight: 50,
},
}, TestWant{
NumFiles: 3,
NumAtlases: 3,
}},
}
for _, c := range cases {
got, err := Generate(c.in.Files, OUTPUT_DIR, c.in.Params)
if err != nil {
t.Errorf("Generate threw an error: %s", err.Error())
}
if len(got.Files) != c.want.NumFiles {
t.Errorf("Generate did not use all files: want %v files, got %v", c.want.NumFiles, got.Files)
}
if len(got.Atlases) != c.want.NumAtlases {
t.Errorf("Failed to generate enough atlases: want %v, got %v", c.want.NumAtlases, len(got.Atlases))
}
}
}
|
/*
* Copyright 2018- The Pixie Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package datastudio
import (
// embed is used to load chart/query templates.
_ "embed"
"fmt"
"os"
"path"
"path/filepath"
"strings"
"text/template"
"github.com/Masterminds/sprig/v3"
)
// GenerateViews generates all of the datastudio charts/queries from templates.
func GenerateViews(outPath string, project string, dataset string, reportID string, expPageID string) error {
queryOutPath := path.Join(outPath, "queries")
if err := os.MkdirAll(queryOutPath, 0775); err != nil {
return err
}
chartOutPath := path.Join(outPath, "charts")
if err := os.MkdirAll(chartOutPath, 0775); err != nil {
return err
}
for _, vals := range suiteViews {
vals.Project = project
vals.Dataset = dataset
vals.DSReportID = reportID
vals.DSExperimentPageID = expPageID
path := filepath.Join(queryOutPath, fmt.Sprintf("suite_%s.sql", vals.MetricName))
err := executeTemplate(vals, suiteTemplate, path)
if err != nil {
return err
}
path = filepath.Join(chartOutPath, fmt.Sprintf("suite_%s.json", vals.MetricName))
templ := suiteChartTemplates[vals.Unit]
if err := executeTemplate(vals, templ, path); err != nil {
return err
}
}
for _, vals := range appOverheadSuiteViews {
vals.Project = project
vals.Dataset = dataset
vals.DSReportID = reportID
vals.DSExperimentPageID = expPageID
path := filepath.Join(queryOutPath, fmt.Sprintf("suite_app_overhead_%s.sql", vals.MetricName))
err := executeTemplate(vals, suiteAppOverheadTemplate, path)
if err != nil {
return err
}
path = filepath.Join(chartOutPath, fmt.Sprintf("suite_app_overhead_%s.json", vals.MetricName))
templ := suiteChartTemplates[vals.Unit]
if err := executeTemplate(vals, templ, path); err != nil {
return err
}
}
for _, vals := range experimentViews {
vals.Project = project
vals.Dataset = dataset
name := strings.Join(vals.MetricNames, "_")
path := filepath.Join(queryOutPath, fmt.Sprintf("experiment_%s.sql", name))
if err := executeTemplate(vals, experimentTemplate, path); err != nil {
return err
}
path = filepath.Join(chartOutPath, fmt.Sprintf("experiment_%s.json", name))
templ := expChartTemplates[vals.Unit]
if err := executeTemplate(vals, templ, path); err != nil {
return err
}
}
path := filepath.Join(queryOutPath, "all_suites_workloads_parameters.sql")
vals := &struct {
Project string
Dataset string
}{
Project: project,
Dataset: dataset,
}
if err := executeTemplate(vals, allSuitesWorkloadsParametersTemplate, path); err != nil {
return err
}
return nil
}
//go:embed templates/queries/all_suites_workloads_parameters.sql
var allSuitesWorkloadsParametersTemplate string
//go:embed templates/queries/suite_view.sql
var suiteTemplate string
//go:embed templates/queries/suite_view_app_overhead.sql
var suiteAppOverheadTemplate string
//go:embed templates/queries/experiment_view.sql
var experimentTemplate string
//go:embed templates/charts/suite/bytes.json
var suiteChartByteTemplate string
//go:embed templates/charts/suite/percent.json
var suiteChartPercentTemplate string
//go:embed templates/charts/experiment/percent.json
var expChartPercentTemplate string
//go:embed templates/charts/experiment/bytes.json
var expChartByteTemplate string
//go:embed templates/queries/cpu_usage_preprocessing.sql
var cpuUsagePreprocessing string
var suiteChartTemplates = map[metricUnit]string{
bytes: suiteChartByteTemplate,
percent: suiteChartPercentTemplate,
}
var expChartTemplates = map[metricUnit]string{
bytes: expChartByteTemplate,
percent: expChartPercentTemplate,
}
type metricUnit string
const (
bytes metricUnit = "bytes"
percent metricUnit = "percent"
)
type suiteViewTemplateVals struct {
ChartTitle string
MetricName string
MetricsUsed []string
MetricSelectExpr string
TimeAgg string
Unit metricUnit
CustomPreprocessing string
DSReportID string
DSExperimentPageID string
Project string
Dataset string
}
var suiteViews = []*suiteViewTemplateVals{
{
ChartTitle: "CPU Usage",
MetricName: "cpu_usage",
MetricsUsed: []string{"cpu_usage", "cpu_seconds_counter"},
MetricSelectExpr: "r.cpu_usage",
TimeAgg: "",
Unit: percent,
CustomPreprocessing: cpuUsagePreprocessing,
},
{
ChartTitle: "Max Heap Usage (ignoring table store)",
MetricName: "max_memory_ex_table",
MetricsUsed: []string{"heap_size_bytes", "table_size"},
MetricSelectExpr: "r.heap_size_bytes - r.table_size",
TimeAgg: "max(max_memory_ex_table)",
Unit: bytes,
},
{
ChartTitle: "Max Heap Usage",
MetricName: "max_heap_size",
MetricsUsed: []string{"heap_size_bytes"},
MetricSelectExpr: "r.heap_size_bytes",
TimeAgg: "max(max_heap_size)",
Unit: bytes,
},
{
ChartTitle: "Max RSS Memory Usage",
MetricName: "max_rss",
MetricsUsed: []string{"rss"},
MetricSelectExpr: "r.rss",
TimeAgg: "max(max_rss)",
Unit: bytes,
},
{
ChartTitle: "HTTP Data Loss",
MetricName: "http_data_loss",
MetricsUsed: []string{"http_data_loss"},
MetricSelectExpr: "r.http_data_loss",
TimeAgg: "array_agg(http_data_loss ORDER BY ts DESC LIMIT 1)[OFFSET(0)]",
Unit: percent,
},
}
var appOverheadSuiteViews = []*suiteViewTemplateVals{
{
ChartTitle: "Application CPU Overhead (% increase over baseline)",
MetricName: "cpu_usage",
MetricsUsed: []string{"cpu_usage", "cpu_seconds_counter"},
MetricSelectExpr: "r.cpu_usage",
TimeAgg: "avg(cpu_usage)",
Unit: percent,
CustomPreprocessing: cpuUsagePreprocessing,
},
{
ChartTitle: "Application RSS Overhead (% increase over baseline)",
MetricName: "max_rss",
MetricsUsed: []string{"rss"},
MetricSelectExpr: "r.rss",
TimeAgg: "max(max_rss)",
Unit: percent,
},
}
type experimentViewTemplateVals struct {
ChartTitle string
MetricNames []string
MetricExprs []string
MetricsUsed []string
Unit metricUnit
CustomPreprocessing string
Project string
Dataset string
}
var experimentViews = []*experimentViewTemplateVals{
{
ChartTitle: "CPU Usage",
MetricNames: []string{"cpu_usage"},
MetricExprs: []string{"r.cpu_usage"},
MetricsUsed: []string{"cpu_seconds_counter", "cpu_usage"},
Unit: percent,
CustomPreprocessing: cpuUsagePreprocessing,
},
// TODO(james): combine the memory stats into one chart.
{
ChartTitle: "Heap (ignoring table store)",
MetricNames: []string{"heap_ex_table_store"},
MetricsUsed: []string{"heap_size_bytes", "table_size"},
MetricExprs: []string{"r.heap_size_bytes - r.table_size"},
Unit: bytes,
},
{
ChartTitle: "RSS",
MetricNames: []string{"rss"},
MetricsUsed: []string{"rss"},
MetricExprs: []string{"r.rss"},
Unit: bytes,
},
{
ChartTitle: "HTTP Data Loss",
MetricNames: []string{"http_data_loss"},
MetricsUsed: []string{"http_data_loss"},
MetricExprs: []string{"r.http_data_loss"},
Unit: percent,
},
}
func executeTemplate(vals interface{}, templ string, outputPath string) error {
t, err := template.New("").Funcs(sprig.TxtFuncMap()).Parse(templ)
if err != nil {
return err
}
f, err := os.Create(outputPath)
if err != nil {
return err
}
defer f.Close()
err = t.Execute(f, vals)
if err != nil {
return err
}
return nil
}
|
package core
import (
"context"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
kubepod "k8s.io/kubernetes/pkg/api/v1/pod"
"sigs.k8s.io/controller-runtime/pkg/client"
"time"
)
const (
GracefulDrainPrefix = "pod-graceful-drain"
WaitLabelKey = GracefulDrainPrefix + "/wait"
DeleteAtAnnotationKey = GracefulDrainPrefix + "/deleteAt"
OriginalLabelsAnnotationKey = GracefulDrainPrefix + "/originalLabels"
)
func IsPodReady(pod *corev1.Pod) bool {
if !kubepod.IsPodReady(pod) {
return false
}
for _, rg := range pod.Spec.ReadinessGates {
_, condition := kubepod.GetPodCondition(&pod.Status, rg.ConditionType)
if condition == nil || condition.Status != corev1.ConditionTrue {
return false
}
}
return true
}
type PodDeletionDelayInfo struct {
Isolated bool
Wait bool
DeleteAtUTC time.Time
}
func GetPodDeletionDelayInfo(pod *corev1.Pod) (PodDeletionDelayInfo, error) {
result := PodDeletionDelayInfo{}
waitLabelValue, hasWaitLabel := pod.Labels[WaitLabelKey]
deleteAtAnnotationValue, hasDeleteAtLabel := pod.Annotations[DeleteAtAnnotationKey]
result.Isolated = hasWaitLabel || hasDeleteAtLabel
result.Wait = len(waitLabelValue) > 0
if hasWaitLabel && !hasDeleteAtLabel {
return result, errors.New("deleteAt annotation does not exits")
}
if !result.Wait {
return result, nil
}
deleteAt, err := time.Parse(time.RFC3339, deleteAtAnnotationValue)
if err != nil {
return result, errors.Wrapf(err, "deleteAt annotation is not RFC3339 format")
}
result.DeleteAtUTC = deleteAt
return result, nil
}
func (i *PodDeletionDelayInfo) GetRemainingTime(now time.Time) time.Duration {
nowUTC := now.UTC()
if !i.Isolated || !i.Wait || nowUTC.After(i.DeleteAtUTC) {
return time.Duration(0)
} else {
return i.DeleteAtUTC.Sub(nowUTC)
}
}
func IsPodInDrainingNode(ctx context.Context, client client.Client, pod *corev1.Pod) (bool, error) {
nodeName := pod.Spec.NodeName
var node corev1.Node
if err := client.Get(ctx, types.NamespacedName{Name: nodeName}, &node); err != nil {
return false, errors.Wrapf(err, "cannot get node %v", nodeName)
}
if node.Spec.Unschedulable {
return true, nil
}
for _, taint := range node.Spec.Taints {
if taint.Key == corev1.TaintNodeUnschedulable {
return true, nil
}
}
return false, nil
}
|
package main
func getKeySize(text string, lGramLength int) int {
repeat := make([]int, 0, len(text))
size := len(text) - lGramLength + 1
for i := 0; i < size; i++ {
first := text[i : i+lGramLength]
for j := i + 1; j < size; j++ {
second := text[j : j+lGramLength]
if first == second {
repeat = append(repeat, j-i)
}
}
}
nods := make([]int, len(text))
for i := 0; i < len(repeat); i++ {
for j := i + 1; j < len(repeat); j++ {
nods[Gcd(repeat[i], repeat[j])]++
}
}
return getIndexOfMaxElem(nods)
}
func decryptKey(text string, keySize int) string {
caesar := make([][]rune, keySize, keySize)
for i, c := range text {
caesar[i%keySize] = append(caesar[i%keySize], c)
}
decryptedKey := make([]rune, 0, keySize)
for _, str := range caesar {
decryptedKey = append(decryptedKey, getKeyLetter(str))
}
return string(decryptedKey)
}
func getKeyLetter(text []rune) rune {
freq := getFrequency(text)
var decrKeyLetter rune
bestDiff := 1.
for i := range freq {
currDif := getNorm(freq, FreqEn, int(i-a))
if currDif < bestDiff {
bestDiff = currDif
decrKeyLetter = i
}
}
return decrKeyLetter
}
|
package driveapicollector
import (
"fmt"
"github.com/scjalliance/drivestream/resource"
drive "google.golang.org/api/drive/v3"
)
// MarshalPermission marshals the given permission as a resource.
func MarshalPermission(perm *drive.Permission) (resource.Permission, error) {
expiration, err := parseRFC3339(perm.ExpirationTime)
if err != nil {
return resource.Permission{}, fmt.Errorf("invalid expiration time: %v", err)
}
return resource.Permission{
ID: perm.Id,
Type: perm.Type,
EmailAddress: perm.EmailAddress,
Domain: perm.Domain,
Role: perm.Role,
DisplayName: perm.DisplayName,
Expiration: expiration,
}, nil
}
|
package graph
import (
"os"
"testing"
"github.com/goava/di/internal/graph/testgraph"
)
func TestGraph_CheckCycles(t *testing.T) {
for _, graph := range testgraph.GraphSlice {
f, err := os.Open("testdata/graph.json")
if err != nil {
t.Error(err)
}
defer f.Close()
g, err := NewGraphFromJSON(f, graph.Name)
if err != nil {
t.Error(err)
}
isDAG := true
if err := CheckCycles(g); err != nil {
isDAG = false
}
if isDAG != graph.IsDAG {
t.Errorf("%s | IsDag are supposed to be %v", graph.Name, graph.IsDAG)
}
}
}
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"strings"
"github.com/sendgrid/rest"
)
func main() {
// Build the URL
const host = "api.sendgrid.com"
endpoint := "/v3/api_keys"
key := os.Getenv("SENDGRID_API_KEY")
// GET
params := url.Values{
"limit": {"100"},
"offset": {"0"},
}
baseURL := &url.URL{
Scheme: "https",
Host: host,
Path: endpoint,
RawQuery: params.Encode(),
}
req, err := http.NewRequest(http.MethodGet, baseURL.String(), nil)
if err != nil {
panic(err)
}
req.Header.Set("Authorization", "Bearer "+key)
response, err := rest.API(req)
if err != nil {
fmt.Println(err)
} else {
defer func() {
err = response.Body.Close()
if err != nil {
fmt.Println("encountered an error closing the response body:", err)
}
}()
var b []byte
b, err = ioutil.ReadAll(response.Body)
if err != nil {
fmt.Println(err)
}
fmt.Println(response.Status)
fmt.Println(string(b))
fmt.Println(response.Header)
}
// POST
body := `{
"name": "My API Key",
"scopes": [
"mail.send",
"alerts.create",
"alerts.read"
]
}`
req, err = http.NewRequest(http.MethodPost, baseURL.String(), strings.NewReader(body))
if err != nil {
panic(err)
}
req.Header.Set("Authorization", "Bearer "+key)
response, err = rest.API(req)
if err != nil {
fmt.Println(err)
} else {
defer func() {
err = response.Body.Close()
if err != nil {
fmt.Println("encountered an error closing the response body:", err)
}
}()
var b []byte
b, err = ioutil.ReadAll(response.Body)
if err != nil {
fmt.Println(err)
}
fmt.Println(response.Status)
fmt.Println(string(b))
fmt.Println(response.Header)
}
// Get a particular return value.
// Note that you can unmarshall into a struct if
// you know the JSON structure in advance.
defer func() {
err = response.Body.Close()
if err != nil {
fmt.Println("encountered an error closing the response body:", err)
}
}()
b, err := ioutil.ReadAll(response.Body)
if err != nil {
fmt.Println(err)
}
var payload struct {
APIKeyID string `json:"api_key_id"`
}
err = json.Unmarshal(b, &payload)
if err != nil {
fmt.Println(err)
}
apiKey := payload.APIKeyID
baseURL = &url.URL{
Scheme: "https",
Host: host,
Path: path.Join(endpoint, apiKey),
}
req, err = http.NewRequest(http.MethodGet, baseURL.String(), nil)
if err != nil {
panic(err)
}
req.Header.Set("Authorization", "Bearer "+key)
response, err = rest.API(req)
if err != nil {
fmt.Println(err)
} else {
defer func() {
err = response.Body.Close()
if err != nil {
fmt.Println("encountered an error closing the response body:", err)
}
}()
var b []byte
b, err = ioutil.ReadAll(response.Body)
if err != nil {
fmt.Println(err)
}
fmt.Println(response.Status)
fmt.Println(string(b))
fmt.Println(response.Header)
}
// PATCH
body = `{
"name": "A New Hope"
}`
req, err = http.NewRequest(http.MethodPatch, baseURL.String(), strings.NewReader(body))
if err != nil {
panic(err)
}
req.Header.Set("Authorization", "Bearer "+key)
response, err = rest.API(req)
if err != nil {
fmt.Println(err)
} else {
defer func() {
err = response.Body.Close()
if err != nil {
fmt.Println("encountered an error closing the response body:", err)
}
}()
var b []byte
b, err = ioutil.ReadAll(response.Body)
if err != nil {
fmt.Println(err)
}
fmt.Println(response.Status)
fmt.Println(string(b))
fmt.Println(response.Header)
}
// PUT
body = `{
"name": "A New Hope",
"scopes": [
"user.profile.read",
"user.profile.update"
]
}`
req, err = http.NewRequest(http.MethodPut, baseURL.String(), strings.NewReader(body))
if err != nil {
panic(err)
}
req.Header.Set("Authorization", "Bearer "+key)
response, err = rest.API(req)
if err != nil {
fmt.Println(err)
} else {
defer func() {
err = response.Body.Close()
if err != nil {
fmt.Println("encountered an error closing the response body:", err)
}
}()
var b []byte
b, err = ioutil.ReadAll(response.Body)
if err != nil {
fmt.Println(err)
}
fmt.Println(response.Status)
fmt.Println(string(b))
fmt.Println(response.Header)
}
// DELETE
req, err = http.NewRequest(http.MethodDelete, baseURL.String(), nil)
if err != nil {
panic(err)
}
req.Header.Set("Authorization", "Bearer "+key)
response, err = rest.API(req)
if err != nil {
fmt.Println(err)
} else {
fmt.Println(response.Status)
fmt.Println(response.Header)
}
}
|
package handler
import (
"log"
"net/http"
"strconv"
"github.com/gin-gonic/gin"
"github.com/quickfixgo/enum"
"github.com/rudeigerc/broker-gateway/service"
"github.com/rudeigerc/broker-gateway/tool"
)
func TradeHandler(c *gin.Context) {
futuresID := c.Query("futures_id")
traderName := c.Query("trader_name")
page, err := strconv.Atoi(c.DefaultQuery("page", "0"))
if err != nil {
ErrorHandler(c, http.StatusBadRequest, "Invalid page number.")
return
}
firm, err := validate(c)
if err != nil {
log.Print(err)
ErrorHandler(c, http.StatusUnauthorized, "Invalid token.")
return
}
total, trades := service.Trade{}.TradesWithCondition(firm.FirmID, futuresID, traderName, page)
data := make([]TradeResponse, len(trades))
for index, trade := range trades {
response := TradeResponse{
TradeID: trade.TradeID,
Quantity: trade.Quantity,
Price: trade.Price,
FuturesID: trade.FuturesID,
Initiator: Trader{
Firm: service.Auth{}.FirmNameByID(trade.InitiatorID),
Side: tool.Convert(enum.Side(trade.InitiatorSide)),
Trader: trade.InitiatorName,
},
Completion: Trader{
Firm: service.Auth{}.FirmNameByID(trade.CompletionID),
Side: tool.Convert(enum.Side(trade.CompletionSide)),
Trader: trade.CompletionName,
},
CreatedAt: trade.CreatedAt.String(),
}
data[index] = response
}
c.JSON(http.StatusOK, gin.H{
"data": data,
"count": len(data),
"total": total,
"page": page,
})
}
func OrderHandler(c *gin.Context) {
futuresID := c.Query("futures_id")
traderName := c.Query("trader_name")
status := c.Query("status")
page, err := strconv.Atoi(c.DefaultQuery("page", "0"))
if err != nil {
ErrorHandler(c, http.StatusBadRequest, "Invalid page number.")
return
}
firm, err := validate(c)
if err != nil {
log.Print(err)
ErrorHandler(c, http.StatusUnauthorized, "Invalid token.")
return
}
total, orders := service.Order{}.OrdersWithCondition(firm.FirmID, futuresID, traderName, status, page)
data := make([]OrderResponse, len(orders))
for index, order := range orders {
response := OrderResponse{
OrderID: order.OrderID,
OrderType: tool.Convert(enum.OrdType(order.OrderType)),
Side: tool.Convert(enum.Side(order.Side)),
FuturesID: order.FuturesID,
Firm: service.Auth{}.FirmNameByID(order.FirmID),
TraderName: order.TraderName,
Quantity: order.Quantity.String(),
OpenQuantity: order.OpenQuantity.String(),
Price: order.Price.String(),
StopPrice: order.StopPrice.String(),
Status: tool.Convert(enum.OrdStatus(order.Status)),
CreatedAt: order.CreatedAt.String(),
UpdatedAt: order.UpdatedAt.String(),
}
data[index] = response
}
c.JSON(http.StatusOK, gin.H{
"data": data,
"count": len(data),
"total": total,
"page": page,
})
}
|
package main
import (
sf "github.com/zyedidia/sfml/v2.3/sfml"
)
var colision bool = false
func Intersects(s1, s2 *sf.Sprite) bool {
isColliding, _:= s1.GetGlobalBounds().Intersects(s2.GetGlobalBounds())
return isColliding
}
func SpawnExplosion(pos sf.Vector2f) {
explosion := NewExplosion(pos)
explosions = append(explosions, explosion)
}
for window.IsOpen() {
for event := window.PollEvent(); event != nil; event = window.PollEvent() {
switch ev := event.(type) {
case sf.EventKeyReleased:
if ev.Code == sf.KeyEscape {
window.Close()
}
case sf.EventClosed:
window.Close()
}
}
|
package models
//1. 目標達成できない時は、とても悔しい 10
//2. 環境のせいで、達成できないことが多い 01
//3. 難題が出てきた時、とっさに「できない」と思う 01
//4. 達成したら、すぐに次の目標を作りたい 10
//5. 目標達成するために、とにかく誰よりも行動する 10
//6. 「できない」ことは断るべきだ 01
//7. 諦めたくなったら、諦めればいい 01
//8. 環境のせいで、達成できないことが多い 01
//9. 報告しづらいことは隠せば問題ない 01
//10.自分なりの結果を出せば良い 01
//59~68
type Commit struct {
One string //y
Two string //n
Three string //n
Four string //y
Five string //y
Six string //n
Seven string //n
Eight string //n
Nine string //n
Ten string //n
Rate int
}
|
package plugins
type Error int
func (e Error) Error() string {
if s, ok := errors[e]; ok {
return s
}
if e < 100 {
return "Unknown transmission error"
} else if e < 200 {
return "Unknown server error"
} else if e < 300 {
return "Unknown client error"
} else {
return "Unknown error"
}
}
var errors = map[Error]string{
Success: "No error. Everything is fine.",
NoSupportedFormat: "No supported formats in common",
Unblocking: "This is a unblocking recieve",
NotImplemented: "Operation is not implemented",
NotDirectory: "Path supplied is not a directory",
DuplicatePlugin: "Plugin is a duplicate. Already handled.",
}
//Transmission errors
const (
Success Error = iota
NoSupportedFormat
)
//Server errors
const (
Unblocking Error = 100 + iota
NotImplemented
NotDirectory
DuplicatePlugin
)
|
package models
import (
"encoding/json"
"io/ioutil"
"testing"
)
func BenchmarkCreateRedditor(b *testing.B) {
data, _ := ioutil.ReadFile("./tests/redditor.json")
redditorExampleJson := string(data)
for i := 0; i < b.N; i++ {
sub := Redditor{}
json.Unmarshal([]byte(redditorExampleJson), &sub)
}
}
|
package main
import "fmt"
func main() {
a := 5
b := 5
count := 0
for a != b {
count++
if count > b+a {
fmt.Println("true")
return
}
a++
b--
}
fmt.Println(count)
fmt.Println("false")
}
|
package main
import (
"errors"
"flag"
"log"
"net"
"os"
"runtime"
"strings"
"sync"
"syscall"
"xip/xip"
)
func main() {
var wg sync.WaitGroup
var blocklistURL = flag.String("blocklistURL", "https://raw.githubusercontent.com/cunnie/sslip.io/main/etc/blocklist.txt", `URL containing a list of "forbidden" names/CIDRs`)
var nameservers = flag.String("nameservers", "ns-aws.sslip.io.,ns-azure.sslip.io.,ns-gce.sslip.io.", "comma-separated list of nameservers")
var addresses = flag.String("addresses",
"sslip.io=78.46.204.247,"+
"sslip.io=2a01:4f8:c17:b8f::2,"+
"ns.sslip.io=52.0.56.137,"+
"ns.sslip.io=52.187.42.158,"+
"ns.sslip.io=104.155.144.4,"+
"ns.sslip.io=2600:1f18:aaf:6900::a,"+
"ns-aws.sslip.io=52.0.56.137,"+
"ns-aws.sslip.io=2600:1f18:aaf:6900::a,"+
"ns-azure.sslip.io=52.187.42.158,"+
"ns-gce.sslip.io=104.155.144.4", "comma-separated list of hosts and corresponding IPv4 and/or IPv6 address(es). If unsure, add to the list rather than replace")
var bindPort = flag.Int("port", 53, "port the DNS server should bind to")
var quiet = flag.Bool("quiet", false, "suppresses logging of each DNS response")
flag.Parse()
log.Printf("%s version %s starting", os.Args[0], xip.VersionSemantic)
log.Printf("blocklist URL: %s, name servers: %s, bind port: %d, quiet: %t",
*blocklistURL, *nameservers, *bindPort, *quiet)
x, logmessages := xip.NewXip(*blocklistURL, strings.Split(*nameservers, ","), strings.Split(*addresses, ","))
for _, logmessage := range logmessages {
log.Println(logmessage)
}
conn, err := net.ListenUDP("udp", &net.UDPAddr{Port: *bindPort})
// common err hierarchy: net.OpError → os.SyscallError → syscall.Errno
switch {
case err == nil:
log.Printf("Successfully bound to all IPs, port %d.\n", *bindPort)
wg.Add(1)
go readFrom(conn, &wg, x, *quiet)
case isErrorPermissionsError(err):
log.Printf("Try invoking me with `sudo` because I don't have permission to bind to port %d.\n", *bindPort)
log.Fatal(err.Error())
case isErrorAddressAlreadyInUse(err):
log.Printf("I couldn't bind to \"0.0.0.0:%d\" (INADDR_ANY, all interfaces), so I'll try to bind to each address individually.\n", *bindPort)
ipCIDRs := listLocalIPCIDRs()
var boundIPsPorts, unboundIPs []string
for _, ipCIDR := range ipCIDRs {
ip, _, err := net.ParseCIDR(ipCIDR)
if err != nil {
log.Printf(`I couldn't parse the local interface "%s".`, ipCIDR)
continue
}
conn, err = net.ListenUDP("udp", &net.UDPAddr{
IP: ip,
Port: *bindPort,
Zone: "",
})
if err != nil {
unboundIPs = append(unboundIPs, ip.String())
} else {
wg.Add(1)
boundIPsPorts = append(boundIPsPorts, conn.LocalAddr().String())
go readFrom(conn, &wg, x, *quiet)
}
}
if len(boundIPsPorts) == 0 {
log.Fatalf("I couldn't bind to any IPs on port %d, so I'm exiting", *bindPort)
}
log.Printf(`I bound to the following IPs: "%s"`, strings.Join(boundIPsPorts, `", "`))
if len(unboundIPs) > 0 {
log.Printf(`I couldn't bind to the following IPs: "%s"`, strings.Join(unboundIPs, `", "`))
}
default:
log.Fatal(err.Error())
}
log.Printf("Ready to answer queries")
wg.Wait()
}
func readFrom(conn *net.UDPConn, wg *sync.WaitGroup, x *xip.Xip, quiet bool) {
defer wg.Done()
for {
query := make([]byte, 512)
_, addr, err := conn.ReadFromUDP(query)
if err != nil {
log.Println(err.Error())
continue
}
go func() {
response, logMessage, err := x.QueryResponse(query, addr.IP)
if err != nil {
log.Println(err.Error())
return
}
_, err = conn.WriteToUDP(response, addr)
if !quiet {
log.Printf("%v.%d %s", addr.IP, addr.Port, logMessage)
}
}()
}
}
func listLocalIPCIDRs() []string {
var ifaces []net.Interface
var cidrStrings []string
var err error
if ifaces, err = net.Interfaces(); err != nil {
panic(err)
}
for _, iface := range ifaces {
var cidrs []net.Addr
if cidrs, err = iface.Addrs(); err != nil {
panic(err)
}
for _, cidr := range cidrs {
cidrStrings = append(cidrStrings, cidr.String())
}
}
return cidrStrings
}
// Thanks https://stackoverflow.com/a/52152912/2510873
func isErrorAddressAlreadyInUse(err error) bool {
var eOsSyscall *os.SyscallError
if !errors.As(err, &eOsSyscall) {
return false
}
var errErrno syscall.Errno // doesn't need a "*" (ptr) because it's already a ptr (uintptr)
if !errors.As(eOsSyscall, &errErrno) {
return false
}
if errErrno == syscall.EADDRINUSE {
return true
}
const WSAEADDRINUSE = 10048
if runtime.GOOS == "windows" && errErrno == WSAEADDRINUSE {
return true
}
return false
}
func isErrorPermissionsError(err error) bool {
var eOsSyscall *os.SyscallError
if errors.As(err, &eOsSyscall) {
if os.IsPermission(eOsSyscall) {
return true
}
}
return false
}
|
/*
Every email consists of a local name and a domain name, separated by the @ sign.
For example, in alice@leetcode.com, alice is the local name, and leetcode.com is the domain name.
Besides lowercase letters, these emails may contain '.'s or '+'s.
If you add periods ('.') between some characters in the local name part of an email address, mail sent there will be forwarded to the same address without dots in the local name. For example, "alice.z@leetcode.com" and "alicez@leetcode.com" forward to the same email address. (Note that this rule does not apply for domain names.)
If you add a plus ('+') in the local name, everything after the first plus sign will be ignored. This allows certain emails to be filtered, for example m.y+name@email.com will be forwarded to my@email.com. (Again, this rule does not apply for domain names.)
It is possible to use both of these rules at the same time.
Given a list of emails, we send one email to each address in the list. How many different addresses actually receive mails?
Note:
1 <= emails[i].length <= 100
1 <= emails.length <= 100
Each emails[i] contains exactly one '@' character.
All local and domain names are non-empty.
Local names do not start with a '+' character.
*/
package main
import (
"fmt"
"strings"
"unicode"
)
func main() {
fmt.Println(uniq([]string{
"alice@leetcode.com",
"alice.z@leetcode.com",
"alicez@leetcode.com",
"m.y+name@email.com",
"my@email.com",
}))
fmt.Println(uniq([]string{
"test.email+alex@leetcode.com",
"test.e.mail+bob.cathy@leetcode.com",
"testemail+david@lee.tcode.com",
}))
}
func uniq(s []string) int {
m := make(map[string]bool)
u := 0
for _, s := range s {
s = canon(s)
if s == "" {
continue
}
if !m[s] {
m[s] = true
u++
}
}
return u
}
func canon(s string) string {
var i int
var r rune
l := ""
ign := false
for i, r = range s {
if r == '+' {
ign = true
continue
}
if r == '.' {
continue
}
if r == '@' {
break
}
if !ign {
l += string(unicode.ToLower(r))
}
}
d := strings.ToLower(s[i:])
if l == "" || d == "" {
return ""
}
m := l + d
if strings.Count(m, "@") != 1 {
return ""
}
return m
}
|
package ircserver
import (
"sort"
"strconv"
"strings"
"time"
"gopkg.in/sorcix/irc.v2"
)
func init() {
Commands["WHOIS"] = &ircCommand{
Func: (*IRCServer).cmdWhois,
MinParams: 1,
}
}
func (i *IRCServer) cmdWhois(s *Session, reply *Replyctx, msg *irc.Message) {
session, ok := i.nicks[NickToLower(msg.Params[0])]
if !ok {
i.sendUser(s, reply, &irc.Message{
Prefix: i.ServerPrefix,
Command: irc.ERR_NOSUCHNICK,
Params: []string{s.Nick, msg.Params[0], "No such nick/channel"},
})
return
}
i.sendUser(s, reply, &irc.Message{
Prefix: i.ServerPrefix,
Command: irc.RPL_WHOISUSER,
Params: []string{s.Nick, session.Nick, session.ircPrefix.User, session.ircPrefix.Host, "*", session.Realname},
})
var channels []string
for channel := range session.Channels {
var prefix string
c := i.channels[channel]
if c.modes['s'] && !s.Operator && !s.Channels[channel] {
continue
}
if c.nicks[NickToLower(session.Nick)][chanop] {
prefix = "@"
}
channels = append(channels, prefix+c.name)
}
sort.Strings(channels)
if len(channels) > 0 {
// TODO(secure): this needs to be split into multiple messages if the line exceeds 510 bytes.
i.sendUser(s, reply, &irc.Message{
Prefix: i.ServerPrefix,
Command: irc.RPL_WHOISCHANNELS,
Params: []string{s.Nick, session.Nick, strings.Join(channels, " ")},
})
}
i.sendUser(s, reply, &irc.Message{
Prefix: i.ServerPrefix,
Command: irc.RPL_WHOISSERVER,
Params: []string{s.Nick, session.Nick, i.ServerPrefix.Name, "RobustIRC"},
})
if session.Operator {
i.sendUser(s, reply, &irc.Message{
Prefix: i.ServerPrefix,
Command: irc.RPL_WHOISOPERATOR,
Params: []string{s.Nick, session.Nick, "is an IRC operator"},
})
}
if session.AwayMsg != "" {
i.sendUser(s, reply, &irc.Message{
Prefix: i.ServerPrefix,
Command: irc.RPL_AWAY,
Params: []string{s.Nick, session.Nick, session.AwayMsg},
})
}
idle := strconv.FormatInt(int64(s.LastActivity.Sub(session.LastNonPing).Seconds()), 10)
signon := strconv.FormatInt(time.Unix(0, session.Created).Unix(), 10)
i.sendUser(s, reply, &irc.Message{
Prefix: i.ServerPrefix,
Command: irc.RPL_WHOISIDLE,
Params: []string{s.Nick, session.Nick, idle, signon, "seconds idle, signon time"},
})
if session.modes['r'] {
i.sendUser(s, reply, &irc.Message{
Prefix: i.ServerPrefix,
Command: "307", // RPL_WHOISREGNICK (not in the RFC)
Params: []string{s.Nick, session.Nick, "user has identified to services"},
})
}
i.sendUser(s, reply, &irc.Message{
Prefix: i.ServerPrefix,
Command: irc.RPL_ENDOFWHOIS,
Params: []string{s.Nick, session.Nick, "End of /WHOIS list"},
})
}
|
package main
import (
//"math"
"fmt"
//"io/ioutil"
)
type Line struct {
x int
y int
z int
}
func main() {
var n int
var x0, y0, x, y int
m := map[Line]bool{}
fmt.Scan(&n, &x0, &y0)
for i:=0; i<n; i++ {
fmt.Scan(&x, &y)
xd := x0-x
yd := y0-y
v := gcd(xd, yd)
xd /= v
yd /= v
z := xd*y0+yd*x0
m[Line{xd,yd,z}] = true
}
fmt.Print(len(m))
}
func gcd(a int, b int) int {
for b != 0 {
a, b = b, a%b
}
return a
}
|
package api
import (
"io"
"net/http"
)
func serveV1SwaggerJSON(w http.ResponseWriter, r *http.Request) {
w.Header().Set("content-type", "application/json")
io.WriteString(w, `{
"components": {
"responses": {
"AssetsDetailedResponse": {
"content": {
"application/json": {
"schema": {
"items": {
"$ref": "#/components/schemas/AssetDetail"
},
"type": "array"
}
}
},
"description": "object containing detailed asset information"
},
"GeneralErrorResponse": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Error"
}
}
},
"description": "error message"
},
"HealthResponse": {
"content": {
"application/json": {
"schema": {
"properties": {
"catching_up": {
"type": "boolean"
},
"database": {
"type": "boolean"
},
"scannerHeight": {
"format": "int64",
"type": "integer"
}
},
"type": "object"
}
}
},
"description": "Returns an health status of Midgard"
},
"NetworkResponse": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/NetworkInfo"
}
}
},
"description": "Returns an object containing Network data"
},
"NodeKeyResponse": {
"content": {
"application/json": {
"schema": {
"items": {
"$ref": "#/components/schemas/NodeKey"
},
"type": "array"
}
}
},
"description": "Returns an object containing Network data"
},
"PoolsDetailedResponse": {
"content": {
"application/json": {
"schema": {
"items": {
"$ref": "#/components/schemas/PoolDetail"
},
"type": "array"
}
}
},
"description": "object containing pool data for that asset"
},
"PoolsResponse": {
"content": {
"application/json": {
"schema": {
"items": {
"$ref": "#/components/schemas/asset"
},
"type": "array"
}
}
},
"description": "array of assets"
},
"StakersAddressDataResponse": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/StakersAddressData"
}
}
},
"description": "array of all the pools the staker is staking in"
},
"StakersAssetDataResponse": {
"content": {
"application/json": {
"schema": {
"items": {
"$ref": "#/components/schemas/StakersAssetData"
},
"type": "array"
}
}
},
"description": "object containing staking data for a specific staker and asset"
},
"StakersResponse": {
"content": {
"application/json": {
"schema": {
"items": {
"$ref": "#/components/schemas/Stakers"
},
"type": "array"
}
}
},
"description": "array of all the stakers"
},
"StatsResponse": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/StatsData"
}
}
},
"description": "object containing global BEPSwap data"
},
"ThorchainConstantsResponse": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ThorchainConstants"
}
}
},
"description": "Get Return an object for the proxied constants endpoint."
},
"ThorchainEndpointsResponse": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ThorchainEndpoints"
}
}
},
"description": "Get Return an object for the proxied pools_addresses endpoint."
},
"ThorchainLastblockResponse": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ThorchainLastblock"
}
}
},
"description": "Get Return an object for the proxied lastblock endpoint."
},
"TotalVolChangesResponse": {
"content": {
"application/json": {
"schema": {
"items": {
"$ref": "#/components/schemas/TotalVolChanges"
},
"type": "array"
}
}
},
"description": "Get Return an array of total volume changes."
},
"TxsResponse": {
"content": {
"application/json": {
"schema": {
"properties": {
"count": {
"format": "int64",
"type": "integer"
},
"txs": {
"items": {
"$ref": "#/components/schemas/TxDetails"
},
"type": "array"
}
},
"type": "object"
}
}
},
"description": "Returns an array of transactions"
}
},
"schemas": {
"AssetDetail": {
"properties": {
"asset": {
"$ref": "#/components/schemas/asset"
},
"dateCreated": {
"format": "int64",
"type": "integer"
},
"priceRune": {
"type": "string"
}
},
"type": "object"
},
"BlockRewards": {
"properties": {
"blockReward": {
"type": "string"
},
"bondReward": {
"type": "string"
},
"stakeReward": {
"type": "string"
}
},
"type": "object"
},
"BondMetrics": {
"properties": {
"averageActiveBond": {
"description": "Average bond of active nodes",
"type": "string"
},
"averageStandbyBond": {
"description": "Average bond of standby nodes",
"type": "string"
},
"maximumActiveBond": {
"description": "Maxinum bond of active nodes",
"type": "string"
},
"maximumStandbyBond": {
"description": "Maximum bond of standby nodes",
"type": "string"
},
"medianActiveBond": {
"description": "Median bond of active nodes",
"type": "string"
},
"medianStandbyBond": {
"description": "Median bond of standby nodes",
"type": "string"
},
"minimumActiveBond": {
"description": "Minumum bond of active nodes",
"type": "string"
},
"minimumStandbyBond": {
"description": "Minumum bond of standby nodes",
"type": "string"
},
"totalActiveBond": {
"description": "Total bond of active nodes",
"type": "string"
},
"totalStandbyBond": {
"description": "Total bond of standby nodes",
"type": "string"
}
},
"type": "object"
},
"Error": {
"properties": {
"error": {
"type": "string"
}
},
"required": [
"error"
],
"type": "object"
},
"NetworkInfo": {
"properties": {
"activeBonds": {
"description": "Array of Active Bonds",
"items": {
"type": "string"
},
"type": "array"
},
"activeNodeCount": {
"description": "Number of Active Nodes",
"type": "integer"
},
"blockRewards": {
"$ref": "#/components/schemas/BlockRewards"
},
"bondMetrics": {
"$ref": "#/components/schemas/BondMetrics"
},
"bondingROI": {
"type": "string"
},
"nextChurnHeight": {
"type": "string"
},
"poolActivationCountdown": {
"description": "The remaining time of pool activation (in blocks)",
"format": "int64",
"type": "integer"
},
"poolShareFactor": {
"type": "string"
},
"stakingROI": {
"type": "string"
},
"standbyBonds": {
"description": "Array of Standby Bonds",
"items": {
"type": "string"
},
"type": "array"
},
"standbyNodeCount": {
"description": "Number of Standby Nodes",
"type": "integer"
},
"totalReserve": {
"description": "Total left in Reserve",
"type": "string"
},
"totalStaked": {
"description": "Total Rune Staked in Pools",
"type": "string"
}
},
"type": "object"
},
"NodeKey": {
"properties": {
"ed25519": {
"description": "ed25519 public key",
"type": "string"
},
"secp256k1": {
"description": "secp256k1 public key",
"type": "string"
}
},
"type": "object"
},
"PoolDetail": {
"properties": {
"asset": {
"$ref": "#/components/schemas/asset"
},
"assetDepth": {
"description": "Total current Asset balance",
"type": "string"
},
"assetROI": {
"description": "Asset return on investment",
"type": "string"
},
"assetStakedTotal": {
"description": "Total Asset staked",
"type": "string"
},
"buyAssetCount": {
"description": "Number of RUNE-\u003eASSET transactions",
"type": "string"
},
"buyFeeAverage": {
"description": "Average sell Asset fee size for RUNE-\u003eASSET (in ASSET)",
"type": "string"
},
"buyFeesTotal": {
"description": "Total fees (in Asset)",
"type": "string"
},
"buySlipAverage": {
"description": "Average trade slip for RUNE-\u003eASSET in %",
"type": "string"
},
"buyTxAverage": {
"description": "Average Asset buy transaction size for (RUNE-\u003eASSET) (in ASSET)",
"type": "string"
},
"buyVolume": {
"description": "Total Asset buy volume (RUNE-\u003eASSET) (in Asset)",
"type": "string"
},
"poolDepth": {
"description": "Total depth of both sides (in RUNE)",
"type": "string"
},
"poolFeeAverage": {
"description": "Average pool fee",
"type": "string"
},
"poolFeesTotal": {
"description": "Total fees",
"type": "string"
},
"poolROI": {
"description": "Pool ROI (average of RUNE and Asset ROI)",
"type": "string"
},
"poolROI12": {
"description": "Pool ROI over 12 months",
"type": "string"
},
"poolSlipAverage": {
"description": "Average pool slip",
"type": "string"
},
"poolStakedTotal": {
"description": "Rune value staked Total",
"type": "string"
},
"poolTxAverage": {
"description": "Average pool transaction",
"type": "string"
},
"poolUnits": {
"description": "Total pool units outstanding",
"type": "string"
},
"poolVolume": {
"description": "Two-way volume of all-time (in RUNE)",
"type": "string"
},
"poolVolume24hr": {
"description": "Two-way volume in 24hrs (in RUNE)",
"type": "string"
},
"price": {
"description": "Price of Asset (in RUNE).",
"type": "string"
},
"runeDepth": {
"description": "Total current Rune balance",
"type": "string"
},
"runeROI": {
"description": "RUNE return on investment",
"type": "string"
},
"runeStakedTotal": {
"description": "Total RUNE staked",
"type": "string"
},
"sellAssetCount": {
"description": "Number of ASSET-\u003eRUNE transactions",
"type": "string"
},
"sellFeeAverage": {
"description": "Average buy Asset fee size for ASSET-\u003eRUNE (in RUNE)",
"type": "string"
},
"sellFeesTotal": {
"description": "Total fees (in RUNE)",
"type": "string"
},
"sellSlipAverage": {
"description": "Average trade slip for ASSET-\u003eRUNE in %",
"type": "string"
},
"sellTxAverage": {
"description": "Average Asset sell transaction size (ASSET\u003eRUNE) (in RUNE)",
"type": "string"
},
"sellVolume": {
"description": "Total Asset sell volume (ASSET\u003eRUNE) (in RUNE).",
"type": "string"
},
"stakeTxCount": {
"description": "Number of stake transactions",
"type": "string"
},
"stakersCount": {
"description": "Number of unique stakers",
"type": "string"
},
"stakingTxCount": {
"description": "Number of stake \u0026 withdraw transactions",
"type": "string"
},
"status": {
"enum": [
"bootstrapped",
"enabled",
"disabled"
],
"type": "string"
},
"swappersCount": {
"description": "Number of unique swappers interacting with pool",
"type": "string"
},
"swappingTxCount": {
"description": "Number of swapping transactions in the pool (buys and sells)",
"type": "string"
},
"withdrawTxCount": {
"description": "Number of withdraw transactions",
"type": "string"
}
},
"type": "object"
},
"Stakers": {
"description": "Staker address",
"example": "tbnb1fj2lqj8dvr5pumfchc7ntlfqd2v6zdxqwjewf5",
"type": "string"
},
"StakersAddressData": {
"properties": {
"poolsArray": {
"items": {
"$ref": "#/components/schemas/asset"
},
"type": "array"
},
"totalEarned": {
"description": "Total value of earnings (in RUNE) across all pools.",
"type": "string"
},
"totalROI": {
"description": "Average of all pool ROIs.",
"type": "string"
},
"totalStaked": {
"description": "Total staked (in RUNE) across all pools.",
"type": "string"
}
},
"type": "object"
},
"StakersAssetData": {
"properties": {
"asset": {
"$ref": "#/components/schemas/asset"
},
"dateFirstStaked": {
"format": "int64",
"type": "integer"
},
"heightLastStaked": {
"format": "int64",
"type": "integer"
},
"stakeUnits": {
"description": "Represents ownership of a pool.",
"type": "string"
}
},
"type": "object"
},
"StatsData": {
"properties": {
"dailyActiveUsers": {
"description": "Daily active users (unique addresses interacting)",
"type": "string"
},
"dailyTx": {
"description": "Daily transactions",
"type": "string"
},
"monthlyActiveUsers": {
"description": "Monthly active users",
"type": "string"
},
"monthlyTx": {
"description": "Monthly transactions",
"type": "string"
},
"poolCount": {
"description": "Number of active pools",
"type": "string"
},
"totalAssetBuys": {
"description": "Total buying transactions",
"type": "string"
},
"totalAssetSells": {
"description": "Total selling transactions",
"type": "string"
},
"totalDepth": {
"description": "Total RUNE balances",
"type": "string"
},
"totalEarned": {
"description": "Total earned (in RUNE Value).",
"type": "string"
},
"totalStakeTx": {
"description": "Total staking transactions",
"type": "string"
},
"totalStaked": {
"description": "Total staked (in RUNE Value).",
"type": "string"
},
"totalTx": {
"description": "Total transactions",
"type": "string"
},
"totalUsers": {
"description": "Total unique swappers \u0026 stakers",
"type": "string"
},
"totalVolume": {
"description": "Total (in RUNE Value) of all assets swapped since start.",
"type": "string"
},
"totalVolume24hr": {
"description": "Total (in RUNE Value) of all assets swapped in 24hrs",
"type": "string"
},
"totalWithdrawTx": {
"description": "Total withdrawing transactions",
"type": "string"
}
},
"type": "object"
},
"ThorchainBooleanConstants": {
"properties": {
"StrictBondStakeRatio": {
"type": "boolean"
}
},
"type": "object"
},
"ThorchainConstants": {
"properties": {
"bool_values": {
"$ref": "#/components/schemas/ThorchainBooleanConstants"
},
"int_64_values": {
"$ref": "#/components/schemas/ThorchainInt64Constants"
},
"string_values": {
"$ref": "#/components/schemas/ThorchainStringConstants"
}
},
"type": "object"
},
"ThorchainEndpoint": {
"properties": {
"address": {
"type": "string"
},
"chain": {
"type": "string"
},
"pub_key": {
"type": "string"
}
},
"type": "object"
},
"ThorchainEndpoints": {
"properties": {
"current": {
"items": {
"$ref": "#/components/schemas/ThorchainEndpoint"
},
"type": "array"
}
},
"type": "object"
},
"ThorchainInt64Constants": {
"properties": {
"BadValidatorRate": {
"format": "int64",
"type": "integer"
},
"BlocksPerYear": {
"format": "int64",
"type": "integer"
},
"DesireValidatorSet": {
"format": "int64",
"type": "integer"
},
"DoubleSignMaxAge": {
"format": "int64",
"type": "integer"
},
"EmissionCurve": {
"format": "int64",
"type": "integer"
},
"FailKeySignSlashPoints": {
"format": "int64",
"type": "integer"
},
"FailKeygenSlashPoints": {
"format": "int64",
"type": "integer"
},
"FundMigrationInterval": {
"format": "int64",
"type": "integer"
},
"JailTimeKeygen": {
"format": "int64",
"type": "integer"
},
"JailTimeKeysign": {
"format": "int64",
"type": "integer"
},
"LackOfObservationPenalty": {
"format": "int64",
"type": "integer"
},
"MinimumBondInRune": {
"format": "int64",
"type": "integer"
},
"MinimumNodesForBFT": {
"format": "int64",
"type": "integer"
},
"MinimumNodesForYggdrasil": {
"format": "int64",
"type": "integer"
},
"NewPoolCycle": {
"format": "int64",
"type": "integer"
},
"ObserveSlashPoints": {
"format": "int64",
"type": "integer"
},
"OldValidatorRate": {
"format": "int64",
"type": "integer"
},
"RotatePerBlockHeight": {
"format": "int64",
"type": "integer"
},
"RotateRetryBlocks": {
"format": "int64",
"type": "integer"
},
"SigningTransactionPeriod": {
"format": "int64",
"type": "integer"
},
"StakeLockUpBlocks": {
"format": "int64",
"type": "integer"
},
"TransactionFee": {
"format": "int64",
"type": "integer"
},
"ValidatorRotateInNumBeforeFull": {
"format": "int64",
"type": "integer"
},
"ValidatorRotateNumAfterFull": {
"format": "int64",
"type": "integer"
},
"ValidatorRotateOutNumBeforeFull": {
"format": "int64",
"type": "integer"
},
"WhiteListGasAsset": {
"format": "int64",
"type": "integer"
},
"YggFundLimit": {
"format": "int64",
"type": "integer"
}
},
"type": "object"
},
"ThorchainLastblock": {
"properties": {
"chain": {
"type": "string"
},
"lastobservedin": {
"format": "int64",
"type": "integer"
},
"lastsignedout": {
"format": "int64",
"type": "integer"
},
"thorchain": {
"format": "int64",
"type": "integer"
}
},
"type": "object"
},
"ThorchainStringConstants": {
"properties": {
"DefaultPoolStatus": {
"type": "string"
}
},
"type": "object"
},
"TotalVolChanges": {
"properties": {
"buyVolume": {
"type": "string"
},
"sellVolume": {
"type": "string"
},
"time": {
"format": "int64",
"type": "integer"
},
"totalVolume": {
"type": "string"
}
},
"type": "object"
},
"TxDetails": {
"properties": {
"date": {
"format": "int64",
"type": "integer"
},
"events": {
"$ref": "#/components/schemas/event"
},
"gas": {
"$ref": "#/components/schemas/gas"
},
"height": {
"type": "string"
},
"in": {
"$ref": "#/components/schemas/tx"
},
"options": {
"$ref": "#/components/schemas/option"
},
"out": {
"items": {
"$ref": "#/components/schemas/tx"
},
"type": "array"
},
"pool": {
"$ref": "#/components/schemas/asset"
},
"status": {
"enum": [
"success",
"refund"
],
"type": "string"
},
"type": {
"enum": [
"swap",
"stake",
"unstake",
"rewards",
"add",
"pool",
"gas",
"refund",
"doubleSwap"
],
"type": "string"
}
}
},
"asset": {
"type": "string"
},
"coin": {
"properties": {
"amount": {
"type": "string"
},
"asset": {
"$ref": "#/components/schemas/asset"
}
},
"type": "object"
},
"coins": {
"items": {
"$ref": "#/components/schemas/coin"
},
"type": "array"
},
"event": {
"properties": {
"fee": {
"type": "string"
},
"slip": {
"type": "string"
},
"stakeUnits": {
"type": "string"
}
},
"type": "object"
},
"gas": {
"properties": {
"amount": {
"type": "string"
},
"asset": {
"$ref": "#/components/schemas/asset"
}
},
"type": "object"
},
"option": {
"properties": {
"asymmetry": {
"type": "string"
},
"priceTarget": {
"type": "string"
},
"withdrawBasisPoints": {
"type": "string"
}
},
"type": "object"
},
"tx": {
"properties": {
"address": {
"type": "string"
},
"coins": {
"$ref": "#/components/schemas/coins"
},
"memo": {
"type": "string"
},
"txID": {
"type": "string"
}
},
"type": "object"
}
}
},
"info": {
"contact": {
"email": "devs@thorchain.org"
},
"description": "The Midgard Public API queries THORChain and any chains linked via the Bifröst and prepares information about the network to be readily available for public users. The API parses transaction event data from THORChain and stores them in a time-series database to make time-dependent queries easy. Midgard does not hold critical information. To interact with BEPSwap and Asgardex, users should query THORChain directly.",
"title": "Midgard Public API",
"version": "1.0.0-oas3"
},
"openapi": "3.0.0",
"paths": {
"/v1/assets": {
"get": {
"description": "Detailed information about a specific asset. Returns enough information to display a unique asset in various user interfaces, including latest price.",
"operationId": "GetAssetInfo",
"parameters": [
{
"description": "One or more comma separated unique asset (CHAIN.SYMBOL)",
"example": [
"BNB.TOMOB-1E1",
"BNB.TCAN-014"
],
"in": "query",
"name": "asset",
"required": true,
"schema": {
"type": "string"
}
}
],
"responses": {
"200": {
"$ref": "#/components/responses/AssetsDetailedResponse"
},
"400": {
"$ref": "#/components/responses/GeneralErrorResponse"
}
},
"summary": "Get Asset Information"
}
},
"/v1/doc": {
"get": {
"description": "Swagger/openapi 3.0 specification generated documents.",
"operationId": "GetDocs",
"responses": {
"200": {
"description": "swagger/openapi 3.0 spec generated docs"
}
},
"summary": "Get Documents",
"tags": [
"Documentation"
]
}
},
"/v1/health": {
"get": {
"description": "Returns an object containing the health response of the API.",
"operationId": "GetHealth",
"responses": {
"200": {
"$ref": "#/components/responses/HealthResponse"
}
},
"summary": "Get Health"
}
},
"/v1/history/total_volume": {
"get": {
"description": "Returns total volume changes of all pools in specified interval",
"operationId": "GetTotalVolChanges",
"parameters": [
{
"description": "Interval of calculations",
"in": "query",
"name": "interval",
"required": true,
"schema": {
"enum": [
"5min",
"hour",
"day",
"week",
"month",
"year"
],
"type": "string"
}
},
{
"description": "Start time of the query as unix timestamp",
"in": "query",
"name": "from",
"required": true,
"schema": {
"format": "int64",
"type": "integer"
}
},
{
"description": "End time of the query as unix timestamp",
"in": "query",
"name": "to",
"required": true,
"schema": {
"format": "int64",
"type": "integer"
}
}
],
"responses": {
"200": {
"$ref": "#/components/responses/TotalVolChangesResponse"
}
},
"summary": "Get Total Volume Changes"
}
},
"/v1/network": {
"get": {
"description": "Returns an object containing Network data",
"operationId": "GetNetworkData",
"responses": {
"200": {
"$ref": "#/components/responses/NetworkResponse"
}
},
"summary": "Get Network Data"
}
},
"/v1/nodes": {
"get": {
"description": "Returns an object containing Node public keys",
"operationId": "GetNodes",
"responses": {
"200": {
"$ref": "#/components/responses/NodeKeyResponse"
}
},
"summary": "Get Node public keys"
}
},
"/v1/pools": {
"get": {
"description": "Returns an array containing all the assets supported on BEPSwap pools",
"operationId": "GetPools",
"responses": {
"200": {
"$ref": "#/components/responses/PoolsResponse"
},
"400": {
"$ref": "#/components/responses/GeneralErrorResponse"
}
},
"summary": "Get Asset Pools"
}
},
"/v1/pools/detail": {
"get": {
"description": "Returns an object containing all the pool details for that asset.",
"operationId": "GetPoolsDetails",
"parameters": [
{
"description": "Specifies the returning view",
"in": "query",
"name": "view",
"schema": {
"default": "full",
"enum": [
"balances",
"simple",
"full"
],
"type": "string"
}
},
{
"description": "One or more comma separated unique asset (CHAIN.SYMBOL)",
"example": [
"BNB.TOMOB-1E1",
"BNB.TCAN-014"
],
"in": "query",
"name": "asset",
"required": true,
"schema": {
"type": "string"
}
}
],
"responses": {
"200": {
"$ref": "#/components/responses/PoolsDetailedResponse"
}
},
"summary": "Get Pools Details"
}
},
"/v1/stakers": {
"get": {
"description": "Returns an array containing the addresses for all stakers.",
"operationId": "GetStakersData",
"responses": {
"200": {
"$ref": "#/components/responses/StakersResponse"
}
},
"summary": "Get Stakers"
}
},
"/v1/stakers/{address}": {
"get": {
"description": "Returns an array containing all the pools the staker is staking in.",
"operationId": "GetStakersAddressData",
"parameters": [
{
"description": "Unique staker address",
"example": "bnb1jxfh2g85q3v0tdq56fnevx6xcxtcnhtsmcu64m",
"in": "path",
"name": "address",
"required": true,
"schema": {
"type": "string"
}
}
],
"responses": {
"200": {
"$ref": "#/components/responses/StakersAddressDataResponse"
}
},
"summary": "Get Staker Data"
}
},
"/v1/stakers/{address}/pools": {
"get": {
"description": "Returns an object containing staking data for the specified staker and pool.",
"operationId": "GetStakersAddressAndAssetData",
"parameters": [
{
"description": "Unique staker address",
"example": "bnb1jxfh2g85q3v0tdq56fnevx6xcxtcnhtsmcu64m",
"in": "path",
"name": "address",
"required": true,
"schema": {
"type": "string"
}
},
{
"description": "One or more comma separated unique asset (CHAIN.SYMBOL)",
"example": [
"BNB.TOMOB-1E1",
"BNB.TCAN-014"
],
"in": "query",
"name": "asset",
"required": true,
"schema": {
"type": "string"
}
}
],
"responses": {
"200": {
"$ref": "#/components/responses/StakersAssetDataResponse"
}
},
"summary": "Get Staker Pool Data"
}
},
"/v1/stats": {
"get": {
"description": "Returns an object containing global stats for all pools and all transactions.",
"operationId": "GetStats",
"responses": {
"200": {
"$ref": "#/components/responses/StatsResponse"
}
},
"summary": "Get Global Stats"
}
},
"/v1/swagger.json": {
"get": {
"description": "Returns human and machine readable swagger/openapi specification.",
"operationId": "GetSwagger",
"responses": {
"200": {
"description": "human and machine readable swagger/openapi specification"
}
},
"summary": "Get Swagger",
"tags": [
"Specification"
]
}
},
"/v1/thorchain/constants": {
"get": {
"description": "Returns a proxied endpoint for the constants endpoint from a local thornode",
"operationId": "GetThorchainProxiedConstants",
"responses": {
"200": {
"$ref": "#/components/responses/ThorchainConstantsResponse"
}
},
"summary": "Get the Proxied THORChain Constants"
}
},
"/v1/thorchain/lastblock": {
"get": {
"description": "Returns a proxied endpoint for the lastblock endpoint from a local thornode",
"operationId": "GetThorchainProxiedLastblock",
"responses": {
"200": {
"$ref": "#/components/responses/ThorchainLastblockResponse"
}
},
"summary": "Get the Proxied THORChain Lastblock"
}
},
"/v1/thorchain/pool_addresses": {
"get": {
"description": "Returns a proxied endpoint for the pool_addresses endpoint from a local thornode",
"operationId": "GetThorchainProxiedEndpoints",
"responses": {
"200": {
"$ref": "#/components/responses/ThorchainEndpointsResponse"
}
},
"summary": "Get the Proxied Pool Addresses"
}
},
"/v1/txs": {
"get": {
"description": "Return an array containing the event details",
"operationId": "GetTxDetails",
"parameters": [
{
"description": "Address of sender or recipient of any in/out tx in event",
"example": "tbnb1fj2lqj8dvr5pumfchc7ntlfqd2v6zdxqwjewf5",
"in": "query",
"name": "address",
"required": false,
"schema": {
"type": "string"
}
},
{
"description": "ID of any in/out tx in event",
"example": "2F624637DE179665BA3322B864DB9F30001FD37B4E0D22A0B6ECE6A5B078DAB4",
"in": "query",
"name": "txid",
"required": false,
"schema": {
"type": "string"
}
},
{
"description": "Any asset used in event (CHAIN.SYMBOL)",
"example": "BNB.TOMOB-1E1",
"in": "query",
"name": "asset",
"required": false,
"schema": {
"type": "string"
}
},
{
"description": "One or more comma separated unique types of event",
"example": [
"swap",
"stake",
"unstake",
"add",
"refund",
"doubleSwap"
],
"in": "query",
"name": "type",
"required": false,
"schema": {
"type": "string"
}
},
{
"description": "pagination offset",
"in": "query",
"name": "offset",
"required": true,
"schema": {
"format": "int64",
"minimum": 0,
"type": "integer"
}
},
{
"description": "pagination limit",
"in": "query",
"name": "limit",
"required": true,
"schema": {
"format": "int64",
"maximum": 50,
"minimum": 0,
"type": "integer"
}
}
],
"responses": {
"200": {
"$ref": "#/components/responses/TxsResponse"
}
},
"summary": "Get details of a tx by address, asset or tx-id"
}
}
},
"servers": [
{
"url": "http://127.0.0.1:8080"
},
{
"url": "https://127.0.0.1:8080"
}
]
}`)
}
|
package index
import (
"github.com/juju/errgo"
)
type field_int_neq_t struct {
field_with_int_value_t
field_t
}
func (self *field_int_neq_t) Add(id Id, value interface{}) error {
var err error = nil
if id < 0 {
return ErrorUnsaved
}
conn := self.idx.Conn()
defer conn.Close()
_, err = conn.Do("ZADD", self.DataKey(), value, id)
if err != nil {
return errgo.Mask(err)
}
return nil
}
func (self *field_int_neq_t) Del(id Id) error {
if id < 0 {
return ErrorUnsaved
}
conn := self.idx.Conn()
defer conn.Close()
_, err := conn.Do("ZREM", self.DataKey(), id)
if err != nil {
return errgo.Mask(err)
}
return nil
}
func (self *field_int_neq_t) Filter(op string, value interface{}, sourceKey string, targetKey string) error {
// value is expected to be an array of 2 items
// val := value.([]interface{})
return errgo.New("not implemented")
}
|
func findDuplicate(nums []int) int {
slow:=0
fast:=0
for ;;{
slow = nums[slow]
fast = nums[nums[fast]]
if slow == fast{
fmt.Println(slow)
fmt.Println(fast)
break
}
}
fast = 0
for ;;{
fast = nums[fast]
slow = nums[slow]
if slow==fast{
break
}
}
return nums[slow]
}
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package benchmarks
import (
"bytes"
"github.com/elastic/go-elasticsearch/v8/benchmarks/runner"
)
var (
Config map[string]string
Actions []Action
DataSources = make(map[string]*bytes.Buffer)
DefaultRepetitions = 1000
)
// Action represents a benchmarked action.
//
type Action struct {
Name string
Category string
Environment string
NumWarmups int
NumRepetitions int
NumOperations int
SetupFunc runner.RunnerFunc
RunnerFunc runner.RunnerFunc
}
// Register appends op to the list of operations.
//
func Register(a Action) error {
Actions = append(Actions, a)
return nil
}
|
package ecode
import (
"fmt"
"testing"
)
func TestEcode(t *testing.T) {
fmt.Println(TokenInvidErr.Message())
}
|
package kubelet
import (
"fmt"
"io"
log "github.com/sirupsen/logrus"
"github.com/argoproj/argo/errors"
)
type KubeletExecutor struct {
cli *kubeletClient
}
func NewKubeletExecutor() (*KubeletExecutor, error) {
log.Infof("Creating a kubelet executor")
cli, err := newKubeletClient()
if err != nil {
return nil, errors.InternalWrapError(err)
}
return &KubeletExecutor{
cli: cli,
}, nil
}
func (k *KubeletExecutor) GetFileContents(containerID string, sourcePath string) (string, error) {
return "", errors.Errorf(errors.CodeNotImplemented, "GetFileContents() is not implemented in the kubelet executor.")
}
func (k *KubeletExecutor) CopyFile(containerID string, sourcePath string, destPath string, compressionLevel int) error {
return errors.Errorf(errors.CodeNotImplemented, "CopyFile() is not implemented in the kubelet executor.")
}
func (k *KubeletExecutor) GetOutputStream(containerID string, combinedOutput bool) (io.ReadCloser, error) {
if !combinedOutput {
log.Warn("non combined output unsupported")
}
return k.cli.GetLogStream(containerID)
}
func (k *KubeletExecutor) GetExitCode(containerID string) (string, error) {
log.Infof("Getting exit code of %s", containerID)
_, status, err := k.cli.GetContainerStatus(containerID)
if err != nil {
return "", errors.InternalWrapError(err, "Could not get container status")
}
if status != nil && status.State.Terminated != nil {
return fmt.Sprint(status.State.Terminated.ExitCode), nil
}
return "", nil
}
func (k *KubeletExecutor) WaitInit() error {
return nil
}
// Wait for the container to complete
func (k *KubeletExecutor) Wait(containerID string) error {
return k.cli.WaitForTermination(containerID, 0)
}
// Kill kills a list of containerIDs first with a SIGTERM then with a SIGKILL after a grace period
func (k *KubeletExecutor) Kill(containerIDs []string) error {
for _, containerID := range containerIDs {
err := k.cli.KillGracefully(containerID)
if err != nil {
return err
}
}
return nil
}
|
package postgres
import (
"fmt"
"go-movie-app/api"
"log"
"os"
"github.com/jinzhu/gorm"
"github.com/joho/godotenv"
)
type movieRepository struct {
db *gorm.DB
tableName string
}
func newPostgresClient() (*gorm.DB, error) {
err := godotenv.Load(".env")
if err != nil {
return nil, err
}
host := os.Getenv("HOST")
port := os.Getenv("PORT")
user := os.Getenv("USER")
password := os.Getenv("PASSWORD")
dbname := os.Getenv("DBNAME")
db, err := gorm.Open("postgres", fmt.Sprintf("host=%s port=%s user=%s dbname=%s sslmode=disable password=%s", host, port, user, dbname, password))
if err != nil {
log.Fatal(err)
}
db.LogMode(false)
db.AutoMigrate(api.Movie{})
return db, nil
}
func NewMovieRepository(tableName string) (api.MovieRepository, error) {
repo := movieRepository{
tableName: tableName,
}
db, err := newPostgresClient()
if err != nil {
return nil, err
}
repo.db = db
return repo, nil
}
func (m movieRepository) GetMovie(id string) (*api.Movie, error) {
var movie api.Movie
m.db.First(&movie, id)
return &movie, nil
}
func (m movieRepository) GetMovies() ([]*api.Movie, error) {
movies := []*api.Movie{}
m.db.Find(&movies)
return movies, nil
}
func (m movieRepository) PostMovie(movie *api.Movie) error {
if err := m.db.Create(&movie).Error; err != nil {
return err
}
return nil
}
func (m movieRepository) PutMovie(movie *api.Movie) (*api.Movie, error) {
if err := m.db.Save(movie).Error; err != nil {
return nil, err
}
return nil, nil
}
func (m movieRepository) DeleteMovie(id string) error {
if err := m.db.Delete(id).Error; err != nil {
return err
}
return nil
}
|
package main
import (
"log"
"time"
"github.com/shanghuiyang/rpi-devices/dev"
"github.com/shanghuiyang/rpi-devices/util"
)
func main() {
oled, err := dev.NewOLED(128, 32)
if err != nil {
log.Printf("failed to create an oled, error: %v", err)
return
}
util.WaitQuit(oled.Close)
for {
t := time.Now().Format("15:04:05")
if err := oled.Display(t, 19, 0, 25); err != nil {
log.Printf("failed to display time, error: %v", err)
break
}
time.Sleep(1 * time.Second)
}
}
|
/*
Package vcs provides controllers to communicate with the package repo registry.
*/
package vcs
|
package main
import (
"fmt"
"test"
)
func main() {
directions := []string{"S2N", "S2W", "E2W", "E2S", "N2S", "N2E", "W2E", "W2N", "S2E", "E2N", "N2W", "W2S"}
//创建12条路
for i := 0; i < len(directions); i++ {
new(test.Road).Init(directions[i])
}
//打开控制器
new(test.LampController).Init()
//避免主goroutine迅速运行完
var input string
fmt.Scanln(&input)
}
|
package design
import . "goa.design/goa/v3/dsl"
var _ = Service("loan", func() {
Description("The loan service makes it possible to view, add or remove loans")
HTTP(func() {
Path("/loans")
})
Method("listLoans", func() {
Description("List all stored loans")
Result(CollectionOf(Loan), func() {
View("tiny")
})
HTTP(func() {
GET("/")
Response(StatusOK)
})
})
Method("getLoan", func() {
Description("get loan by ID")
Payload(func() {
Field(1, "id", Int, "ID of loan to show")
Field(2, "view", String, "View to render", func() {
Enum("default", "tiny")
Default("default")
})
Required("id")
})
Result(Loan)
Error("not_found", NotFound, "Loan not found")
HTTP(func() {
GET("/{id}")
Param("view")
Response(StatusOK)
Response("not_found", StatusNotFound)
})
})
})
var Loan = ResultType("application/vnd.cellar.loan", func() {
Description("A Loan describes a loan in loan service.")
TypeName("Loan")
Attributes(func() {
Attribute("id", Int, "ID is the unique id of the loan", func() {
Example(1)
})
Attribute("amount", Float64, "Loan amount", func() {
Example(50)
Minimum(50)
Maximum(500)
})
Attribute("lender_id", Int, "id of lender", func() {
Example(1)
})
Attribute("borrower_id", Int, "id of borrower", func() {
Example(1)
})
Attribute("description", String, "description")
Attribute("payback_date", String, "Date on which the loan is supposed to be paid back")
})
View("default", func() {
Attribute("id")
Attribute("amount")
Attribute("lender_id")
Attribute("borrower_id")
Attribute("description")
Attribute("payback_date")
})
View("tiny", func() {
Attribute("id")
Attribute("amount")
Attribute("lender_id")
Attribute("borrower_id")
})
Required("id", "amount", "lender_id", "borrower_id")
})
|
/*
Copyright 2017 by GoWeb author: gdccmcm14@live.com.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License
*/
package util
import (
"database/sql"
"fmt"
"github.com/astaxie/beego"
"github.com/astaxie/beego/orm"
_ "github.com/go-sql-driver/mysql"
"github.com/hunterhug/GoWeb/models/admin"
"os"
//"time"
)
func Createtb() {
beego.Trace("data init start")
admin.InitData()
beego.Trace("data init end")
}
func Syncdb(force bool) {
beego.Trace("db, sync db start")
Createdb(force)
Connect()
Createconfig()
Createtb()
beego.Trace("sync db end, please reopen app again")
}
func Updaterbac() {
TRUNCATETable([]string{beego.AppConfig.String("rbac_group_table"), beego.AppConfig.String("rbac_node_table")})
Connect()
admin.InsertGroup()
admin.InsertNodes()
}
func Createconfig() {
name := "default" // database alias name
force := true // drop table force
verbose := true // print log
err := orm.RunSyncdb(name, force, verbose)
if err != nil {
beego.Error("database config set to force error:" + err.Error())
}
}
//创建数据库
func Createdb(force bool) {
beego.Trace("create database start")
db_type := beego.AppConfig.String("db_type")
db_host := beego.AppConfig.String("db_host")
db_port := beego.AppConfig.String("db_port")
db_user := beego.AppConfig.String("db_user")
db_pass := beego.AppConfig.String("db_pass")
db_name := beego.AppConfig.String("db_name")
var dns string
var sqlstring, sql1string string
switch db_type {
case "mysql":
dns = fmt.Sprintf("%s:%s@tcp(%s:%s)/?charset=utf8", db_user, db_pass, db_host, db_port)
sqlstring = fmt.Sprintf("CREATE DATABASE if not exists `%s` CHARSET utf8 COLLATE utf8_general_ci", db_name)
sql1string = fmt.Sprintf("DROP DATABASE IF EXISTS `%s`", db_name)
if force {
fmt.Println(sql1string)
}
fmt.Println(sqlstring)
break
default:
beego.Critical("db driver not support:", db_type)
return
}
db, err := sql.Open(db_type, dns)
if err != nil {
panic(err.Error())
}
if force {
_, err = db.Exec(sql1string)
}
_, err1 := db.Exec(sqlstring)
if err != nil || err1 != nil {
beego.Error("db exec error:", err, err1)
panic(err.Error())
} else {
beego.Trace("database ", db_name, " created")
}
defer db.Close()
beego.Trace("create database end")
}
func TRUNCATETable(table []string) {
beego.Trace("delete tables start")
db_type := beego.AppConfig.String("db_type")
db_host := beego.AppConfig.String("db_host")
db_port := beego.AppConfig.String("db_port")
db_user := beego.AppConfig.String("db_user")
db_pass := beego.AppConfig.String("db_pass")
db_name := beego.AppConfig.String("db_name")
var dns string
var sqlstring string
switch db_type {
case "mysql":
dns = fmt.Sprintf("%s:%s@tcp(%s:%s)/%s?charset=utf8", db_user, db_pass, db_host, db_port, db_name)
default:
beego.Critical("db driver not support:", db_type)
return
}
db, err := sql.Open(db_type, dns)
defer db.Close()
if err != nil {
panic(err.Error())
}
for _, i := range table {
beego.Trace("table deleting:" + i)
sqlstring = fmt.Sprintf("TRUNCATE TABLE `%s`", i)
_, err = db.Exec(sqlstring)
if err != nil {
beego.Error("table delete error:" + err.Error())
panic(err.Error())
} else {
beego.Trace("table delete success:" + i)
}
}
beego.Trace("delete table end")
}
func Connect() {
beego.Trace("database start to connect")
var dns string
db_type := beego.AppConfig.String("db_type")
db_host := beego.AppConfig.String("db_host")
db_port := beego.AppConfig.String("db_port")
if db_port == "" {
db_port = "3306"
}
db_user := beego.AppConfig.String("db_user")
db_pass := beego.AppConfig.String("db_pass")
db_name := beego.AppConfig.String("db_name")
switch db_type {
case "mysql":
orm.RegisterDriver("mysql", orm.DRMySQL)
//orm.DefaultTimeLoc = time.UTC
dns = fmt.Sprintf("%s:%s@tcp(%s:%s)/%s?charset=utf8", db_user, db_pass, db_host, db_port, db_name)
break
default:
beego.Critical("db driver not support:", db_type)
return
}
err := orm.RegisterDataBase("default", db_type, dns)
if err != nil {
beego.Error("register data:" + err.Error())
panic(err.Error())
}
if beego.AppConfig.String("dblog") == "open" {
beego.Trace("develop mode,debug database: db.log")
orm.Debug = true
w, e := os.OpenFile("log/db.log", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if e != nil {
beego.Error(e.Error())
}
orm.DebugLog = orm.NewLog(w)
}
}
|
package main
import "fmt"
func main() {
mapa := make(map[string]int)
fmt.Println(mapa)
mapa["Juan"] = 32
mapa["Yessica"] = 41
mapa["Darío"] = 32
fmt.Println(mapa)
for i, v := range mapa {
fmt.Println(i, v)
}
valueJ, ok := mapa["Juan"]
fmt.Println(valueJ, ok)
valueM, ok := mapa["Maria"]
fmt.Println(valueM, ok)
// ciudades := make(map[string]string, 5)
ciudades := make(map[string]string)
ciudades["Mexico"] = "CDMX"
ciudades["Brasil"] = "Rio de Janeiro"
ciudades["Italia"] = "Roma"
fmt.Println(ciudades)
fmt.Println()
campeonato := map[string]int{
"Barcelona": 39,
"Real Madrid": 38,
"Chivas": 37,
"Boca Juniors": 30,
}
campeonato["River Plate"] = 25
campeonato["Chivas"] = 25
delete(campeonato, "Real Madrid")
for key, value := range campeonato {
fmt.Printf("El equipo %s, tiene un puntaje de: %d \n", key, value)
}
fmt.Println()
mineiroValue, mineiroExists := campeonato["Mineiro"]
fmt.Printf("El puntaje capturado es %d, y el equipo existe %t \n", mineiroValue, mineiroExists)
chivasValue, chivasExists := campeonato["Chivas"]
fmt.Printf("El puntaje capturado es %d, y el equipo existe %t \n", chivasValue, chivasExists)
}
|
package server
import (
"net"
"sync"
"sync/atomic"
"errors"
"dnsgo/layer"
"log"
)
var (
ErrClosed = errors.New("closed dns server")
)
type DNSServer interface {
Serve() error
Addr() *net.UDPAddr
Shutdown()
}
type server struct {
addr *net.UDPAddr
conn *net.UDPConn
closeOnce sync.Once
closed int32
mutex sync.Mutex
}
func NewServer(addr string) (DNSServer, error) {
udpAddr, err := net.ResolveUDPAddr("udp", addr)
if err != nil {
return nil, err
}
var server DNSServer = &server{
addr: udpAddr,
closeOnce: sync.Once{},
mutex: sync.Mutex{},
}
return server, nil
}
func (s *server) Shutdown() {
log.Println("dns server shutdown")
s.closeOnce.Do(s.shutdown)
}
func (s *server) shutdown() {
s.mutex.Lock()
defer s.mutex.Unlock()
atomic.CompareAndSwapInt32(&s.closed, 0, 1)
if s.conn != nil {
s.conn.Close()
}
}
func (s *server) Serve() error {
if atomic.LoadInt32(&s.closed) != 0 {
return ErrClosed
}
conn, err := net.ListenUDP("udp", s.addr)
if err != nil {
return err
}
s.conn = conn
return s.listen()
}
func (s *server) listen() error {
qc := layer.NewPacker()
buf := make([]byte, 512)
for {
n, addr, err := s.conn.ReadFromUDP(buf)
if err != nil {
log.Println(err)
return err
}
q, err := qc.Decode(buf[:n])
if err != nil {
log.Printf("decode error %v", err)
continue
}
go s.handleQuery(addr, q, qc)
}
return nil
}
func (s *server) handleQuery(addr *net.UDPAddr, query *layer.Query, qc layer.Packer) {
log.Printf("recv dns query %s", addr.String())
query.Header.Opt = layer.NewOption(layer.QROpt)
rs, err := qc.Encode(query)
if err != nil {
return
}
s.conn.WriteToUDP(rs, addr)
}
func (s *server) Addr() *net.UDPAddr {
return s.addr
}
|
package errors
import (
"fmt"
)
// deprecated
type PageError struct {
Message string
}
func (p PageError) Error() string {
return p.Message
}
func (p PageError) String() string {
return p.Message
}
func Bomb(format string, a ...interface{}) {
panic(PageError{Message: fmt.Sprintf(format, a...)})
}
func Dangerous(v interface{}) {
if v == nil {
return
}
switch t := v.(type) {
case string:
if t != "" {
panic(PageError{Message: t})
}
case error:
panic(PageError{Message: t.Error()})
}
}
|
package main
import (
"errors"
"net/url"
"strconv"
)
func GetLimitQueryParam(val url.Values) (int, error) {
if val.Get("limit") == "" || len(val.Get("limit")) < 1 {
return 0, errors.New("no 'limit' query param")
}
limit, err := strconv.Atoi(val.Get("limit"))
if err != nil {
return 0, err
}
return limit, nil
}
|
package user
import (
"time"
"github.com/dwaynelavon/es-loyalty-program/internal/app/eventsource"
"github.com/pkg/errors"
)
var (
errInvalidAggregateType = errors.New("aggregate is not of type user.User")
UserDeletedEventType = "UserDeleted"
UserCreatedEventType = "UserCreated"
UserReferralCreatedEventType = "UserReferralCreated"
UserReferralCompletedEventType = "UserReferralCompleted"
PointsEarnedEventType = "PointsEarned"
)
// ReferralStatus represents the state of a referral
type ReferralStatus string
const (
ReferralStatusCreated ReferralStatus = "Created"
ReferralStatusSent ReferralStatus = "Sent"
ReferralStatusCompleted ReferralStatus = "Completed"
)
func GetReferralStatus(status *string) (ReferralStatus, error) {
errInvalidStatus := errors.New("invalid referral status")
if status == nil {
return "", errInvalidStatus
}
switch *status {
case string(ReferralStatusCreated):
return ReferralStatusCreated, nil
case string(ReferralStatusSent):
return ReferralStatusSent, nil
case string(ReferralStatusCompleted):
return ReferralStatusCompleted, nil
default:
return "", errInvalidStatus
}
}
//Referral is the struct that represents a new user's referral status
type Referral struct {
ID string `json:"id" firestore:"id"`
ReferralCode string `json:"referralCode" firestore:"referralCode"`
ReferredUserEmail string `json:"referredUserEmail" firestore:"referredUserEmail"`
Status ReferralStatus `json:"status" firestore:"status"`
CreatedAt time.Time `json:"createdAt" firestore:"createdAt"`
UpdatedAt time.Time `json:"updatedAt" firestore:"updatedAt"`
}
// User encapsulates account information about an application user
type User struct {
ID string `json:"id"`
Email string `json:"email"`
Version int `json:"version"`
Username string `json:"username"`
Points uint32 `json:"points"`
// TODO: should this be a pointer?
ReferralCode *string `json:"referralCode"`
Referrals []Referral `json:"referrals"`
CreatedAt time.Time `json:"createdAt"`
UpdatedAt time.Time `json:"updatedAt"`
DeletedAt *time.Time `json:"deletedAt"`
}
// NewUser creates a new instance of the User aggregate
func NewUser(id string) eventsource.Aggregate {
// TODO: can handle creating of referral code here
return &User{
ID: id,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
}
// EventVersion returns the current event version
func (u *User) EventVersion() int {
return u.Version
}
// Apply takes event history and applies them to an aggregate
func (u *User) Apply(history eventsource.History) error {
for _, h := range history {
a, err := GetApplier(h)
if err != nil {
return err
}
errApply := a.Apply(u)
if errApply != nil {
return errApply
}
}
return nil
}
func AssertUserAggregate(agg eventsource.Aggregate) (*User, error) {
var u *User
ok := false
if u, ok = agg.(*User); !ok {
return nil, errInvalidAggregateType
}
return u, nil
}
|
// Package transmitter provides functionality for transmitting
// arbitrary webhook messages on Discord.
//
// Existing webhooks are used for messages sent, and if necessary,
// new webhooks are created to ensure messages in multiple popular channels
// don't cause messages to be registered as new users.
package transmitter
import (
"strings"
"github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
"github.com/bwmarrin/discordgo"
)
// A Transmitter represents a message manager instance for a single guild.
type Transmitter struct {
session *discordgo.Session
guild string
prefix string
limit int // max number of webhooks
webhooks webhookHeap
}
// New returns a new Transmitter given a Discord session, guild ID, and webhook prefix.
func New(session *discordgo.Session, guild string, prefix string, limit int) (*Transmitter, error) {
// Get all existing webhooks
hooks, err := session.GuildWebhooks(guild)
// Check to make sure we have permissions
if err != nil {
restErr := err.(*discordgo.RESTError)
if restErr.Message != nil && restErr.Message.Code == discordgo.ErrCodeMissingPermissions {
return nil, errors.Wrap(err, "the 'Manage Webhooks' permission is required")
}
return nil, errors.Wrap(err, "could not get webhooks")
}
// Delete existing webhooks with the same prefix
for _, wh := range hooks {
if strings.HasPrefix(wh.Name, prefix) {
if err := session.WebhookDelete(wh.ID); err != nil {
return nil, errors.Wrapf(err, "could not remove hook %s", wh.ID)
}
}
}
t := &Transmitter{
session: session,
guild: guild,
prefix: prefix,
limit: limit,
webhooks: newWebhookHeap(),
}
session.AddHandler(t.onWebhookUpdate)
return t, nil
}
// Close immediately stops all active webhook timers and deletes webhooks.
func (t *Transmitter) Close() error {
var result error
// Delete all the webhooks
for _, wh := range t.webhooks.list {
err := t.session.WebhookDelete(wh.ID)
if err != nil {
result = multierror.Append(result, errors.Wrapf(err, "could not remove hook %s", wh.ID)).ErrorOrNil()
}
}
return result
}
// Message transmits a message to the given channel with the given username, avatarURL, and content.
//
// Note that this function will wait until Discord responds with an answer.
//
// This will use an existing webhook if it exists.
// If an existing webhook doesn't exist then it will try to repurpose a webhook.
// If there is space to create a new webhook then it will do that.
func (t *Transmitter) Message(channel string, username string, avatarURL string, content string) (err error) {
// Attempt to free up a webhook for the channel (or check for existing)
free, err := t.freeWebhook(channel)
if err != nil {
return err
}
// Create a webhook if there is no free webhook
if !free {
err = t.createWebhook(channel)
if err != nil {
return err // this error is already wrapped by us
}
}
params := discordgo.WebhookParams{
Username: username,
AvatarURL: avatarURL,
Content: content,
}
err = t.executeWebhook(channel, ¶ms)
if err != nil {
exists, checkErr := t.checkAndDeleteWebhook(channel)
// If there was error performing the check, compose the list
if checkErr != nil {
err = multierror.Append(err, checkErr).ErrorOrNil()
}
// If the webhook exists OR there was an error performing the check
// return the error to the caller
if exists || checkErr != nil {
return errors.Wrap(err, "could not execute existing webhook")
}
// Otherwise just try and send the message again
return t.Message(channel, username, avatarURL, content)
}
return nil
}
// HasWebhook checks whether the transmitter is using a particular webhook
func (t *Transmitter) HasWebhook(id string) bool {
for _, wh := range t.webhooks.list {
if wh.ID == id {
return true
}
}
return false
}
|
package main
import "fmt"
func main() {
i, j := 5, 11
k := (i + j) >> 1
fmt.Println(k)
}
/**
二分查找
*/
func findDuplicate(nums []int) int {
n := len(nums)
l, r := 1, n-1
ans := -1
for l <= r {
mid := (l + r) >> 1
cnt := 0
for i := 0; i < n; i++ {
if nums[i] <= mid {
cnt++
}
}
if cnt <= mid {
l = mid + 1
} else {
r = mid - 1
ans = mid
}
}
return ans
}
/**
快慢指针
*/
func findDuplicate2(nums []int) int {
slow, fast := 0, 0
for slow, fast = nums[slow], nums[nums[fast]]; slow != fast; slow, fast = nums[slow], nums[nums[fast]] {
}
slow = 0
for slow != fast {
slow = nums[slow]
fast = nums[fast]
}
return slow
}
|
/*
时间:2017/11/6
功能:golang 各种排序算法
*/
package main
import "fmt"
func main() {
a := []int{1, 5, 8, 9, 74, 6, 4, 7, 5}
fmt.Println(bubbleSort(a))
}
/*
#冒泡排序:
重复比较相邻两个元素,顺序错误就叫唤,重复至不在需要叫唤。
冒泡就是说最小或者最大的元素回慢慢浮到数列的顶端
*/
func bubbleSort(arr []int) []int {
length := len(arr)
for i := 0; i < length; i++ {
for j := 0; j < length-1-i; j++ {
if arr[j] > arr[j+1] {
arr[j], arr[j+1] = arr[j+1], arr[j]
}
}
}
return arr
}
/*
#选择排序:
遍历寻找最小或者最大元素放在最前面,直至遍历结束
*/
func selectionSort(arr []int) []int {
length := len(arr)
for i := 0; i < length-1; i++ {
min := i
for j := i + 1; j < length; j++ {
if arr[min] > arr[j] {
min = j
}
}
arr[i], arr[min] = arr[min], arr[i]
}
return arr
}
/*
#插入排序:
将第一个元素看做有序队列,剩下的作为待排序列,每次去一个插入到已排序队列的指定位置
*/
func insertionSort(arr []int) []int {
for i := range arr {
preIndex := i - 1
current := arr[i]
for preIndex >= 0 && arr[preIndex] > current {
arr[preIndex+1] = arr[preIndex]
preIndex -= 1
}
arr[preIndex+1] = current
}
return arr
}
/*
#希尔排序:
是插入排序的一种更高效的改进版本(非稳定)
先将整个带排序的记录序列分割成若干子序列分别进行直接插入排序。待整个序列基本有序后再对全体直接插入排序。
*/
func shellSort(arr []int) []int {
length := len(arr)
gap := 1
for gap < gap/3 {
gap = gap*3 + 1
}
for gap > 0 {
for i := gap; i < length; i++ {
temp := arr[i]
j := i - gap
for j >= 0 && arr[j] > temp {
arr[j+gap] = arr[j]
j -= gap
}
arr[j+gap] = temp
}
gap = gap / 3
}
return arr
}
/*
#归并排序
分治法的典型应用。需要额外空间。
*/
func mergeSort(arr []int) []int {
length := len(arr)
if length < 2 {
return arr
}
middle := length / 2
left := arr[0:middle]
right := arr[middle:]
return merge(mergeSort(left), mergeSort(right))
}
func merge(left []int, right []int) []int {
var result []int
for len(left) != 0 && len(right) != 0 {
if left[0] <= right[0] {
result = append(result, left[0])
left = left[1:]
} else {
result = append(result, right[0])
right = right[1:]
}
}
for len(left) != 0 {
result = append(result, left[0])
left = left[1:]
}
for len(right) != 0 {
result = append(result, right[0])
right = right[1:]
}
return result
}
/*
#快速排序
快速排序使用分治法(Divide and conquer)策略来把一个串行(list)分为两个子串行(sub-lists)。
简单描述 选基准 大的放一边小的放一边
*/
func quickSort(arr []int) []int {
return _quickSort(arr, 0, len(arr)-1)
}
func _quickSort(arr []int, left, right int) []int {
if left < right {
partitionIndex := partition(arr, left, right)
_quickSort(arr, left, partitionIndex-1)
_quickSort(arr, partitionIndex+1, right)
}
return arr
}
func partition(arr []int, left, right int) int {
pivot := left
index := pivot + 1
for i := index; i <= right; i++ {
if arr[i] < arr[pivot] {
swap(arr, i, index)
index += 1
}
}
swap(arr, pivot, index-1)
return index - 1
}
/*func swap(arr []int, i, j int) {
arr[i], arr[j] = arr[j], arr[i]
}*/
/*
#堆排序
利用数据结构--堆。大顶堆和小顶堆。
*/
func heapSort(arr []int) []int {
arrLen := len(arr)
buildMaxHeap(arr, arrLen)
for i := arrLen - 1; i >= 0; i-- {
swap(arr, 0, i)
arrLen -= 1
heapify(arr, 0, arrLen)
}
return arr
}
func buildMaxHeap(arr []int, arrLen int) {
for i := arrLen / 2; i >= 0; i-- {
heapify(arr, i, arrLen)
}
}
func heapify(arr []int, i, arrLen int) {
left := 2*i + 1
right := 2*i + 2
largest := i
if left < arrLen && arr[left] > arr[largest] {
largest = left
}
if right < arrLen && arr[right] > arr[largest] {
largest = right
}
if largest != i {
swap(arr, i, largest)
heapify(arr, largest, arrLen)
}
}
func swap(arr []int, i, j int) {
arr[i], arr[j] = arr[j], arr[i]
}
/*
#计数排序
计数排序的核心在于将输入的数据值转化为键存储在额外开辟的数组空间中。
作为一种线性时间复杂度的排序,计数排序要求输入的数据必须是有确定范围的整数。
*/
func countingSort(arr []int, maxValue int) []int {
bucketLen := maxValue + 1
bucket := make([]int, bucketLen) // 初始为0的数组
sortedIndex := 0
length := len(arr)
for i := 0; i < length; i++ {
bucket[arr[i]] += 1
}
for j := 0; j < bucketLen; j++ {
for bucket[j] > 0 {
arr[sortedIndex] = j
sortedIndex += 1
bucket[j] -= 1
}
}
return arr
}
/*
#桶排序
桶排序是计数排序的升级版。它利用了函数的映射关系,高效与否的关键就在于这个映射函数的确定。
*/
/*
#基数排序
基数排序是一种非比较型整数排序算法,其原理是将整数按位数切割成不同的数字,然后按每个位数分别比较。
*/
|
package pow
import (
"testing"
"github.com/stretchr/testify/require"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/x/auth"
"github.com/cosmos/cosmos-sdk/x/bank"
"github.com/cosmos/cosmos-sdk/x/mock"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto/ed25519"
)
var (
priv1 = ed25519.GenPrivKey()
addr1 = sdk.AccAddress(priv1.PubKey().Address())
)
// initialize the mock application for this module
func getMockApp(t *testing.T) *mock.App {
mapp := mock.NewApp()
RegisterCodec(mapp.Cdc)
keyPOW := sdk.NewKVStoreKey("pow")
bankKeeper := bank.NewBaseKeeper(mapp.AccountKeeper)
config := Config{"pow", 1}
keeper := NewKeeper(keyPOW, config, bankKeeper, DefaultCodespace)
mapp.Router().AddRoute("pow", keeper.Handler)
mapp.SetInitChainer(getInitChainer(mapp, keeper))
require.NoError(t, mapp.CompleteSetup(keyPOW))
mapp.Seal()
return mapp
}
// overwrite the mock init chainer
func getInitChainer(mapp *mock.App, keeper Keeper) sdk.InitChainer {
return func(ctx sdk.Context, req abci.RequestInitChain) abci.ResponseInitChain {
mapp.InitChainer(ctx, req)
genesis := Genesis{
Difficulty: 1,
Count: 0,
}
InitGenesis(ctx, keeper, genesis)
return abci.ResponseInitChain{}
}
}
func TestMsgMine(t *testing.T) {
mapp := getMockApp(t)
// Construct genesis state
acc1 := &auth.BaseAccount{
Address: addr1,
Coins: nil,
}
accs := []auth.Account{acc1}
// Initialize the chain (nil)
mock.SetGenesis(mapp, accs)
// A checkTx context (true)
ctxCheck := mapp.BaseApp.NewContext(true, abci.Header{})
res1 := mapp.AccountKeeper.GetAccount(ctxCheck, addr1)
require.Equal(t, acc1, res1)
// Mine and check for reward
mineMsg1 := GenerateMsgMine(addr1, 1, 2)
mock.SignCheckDeliver(t, mapp.BaseApp, []sdk.Msg{mineMsg1}, []uint64{0}, []uint64{0}, true, true, priv1)
mock.CheckBalance(t, mapp, addr1, sdk.Coins{sdk.NewCoin("pow", sdk.NewInt(1))})
// Mine again and check for reward
mineMsg2 := GenerateMsgMine(addr1, 2, 3)
mock.SignCheckDeliver(t, mapp.BaseApp, []sdk.Msg{mineMsg2}, []uint64{0}, []uint64{1}, true, true, priv1)
mock.CheckBalance(t, mapp, addr1, sdk.Coins{sdk.NewCoin("pow", sdk.NewInt(2))})
// Mine again - should be invalid
mock.SignCheckDeliver(t, mapp.BaseApp, []sdk.Msg{mineMsg2}, []uint64{0}, []uint64{1}, false, false, priv1)
mock.CheckBalance(t, mapp, addr1, sdk.Coins{sdk.NewCoin("pow", sdk.NewInt(2))})
}
|
package problem0160
import "testing"
func TestSolve(t *testing.T) {
listA := &ListNode{Val: 4}
listB := &ListNode{Val: 4}
t.Log(getIntersectionNode(listA, listB) == nil)
}
|
package model
import (
"Seaman/utils"
"time"
)
type TplPermResourceT struct {
Id int64 `xorm:"pk autoincr BIGINT(20)"`
Code string `xorm:"not null comment('资源编号') VARCHAR(16)"`
Status int `xorm:"not null default 1 comment('状态(0:无效,1:有效)') INT(11)"`
Desp string `xorm:"comment('描述') VARCHAR(500)"`
Revision int64 `xorm:"not null comment('版本号') BIGINT(20)"`
CreateDate time.Time `xorm:"comment('创建时间') DATETIME"`
LastUpdateDate time.Time `xorm:"comment('最后修改时间') DATETIME"`
TenantId string `xorm:"comment('多租户ID') VARCHAR(32)"`
AppName string `xorm:"not null comment('应用(模块)名') VARCHAR(32)"`
AppScope string `xorm:"comment('系统群名') VARCHAR(32)"`
AppCode string `xorm:"not null comment('应用(模块)编码') VARCHAR(32)"`
}
/**
* 将数据库查询出来的结果进行格式组装成request请求需要的json字段格式
*/
func (tplPermResourceT *TplPermResourceT) tplPermResourceTToRespDesc() interface{} {
respInfo := map[string]interface{}{
"id": tplPermResourceT.Id,
"code": tplPermResourceT.Code,
"status": tplPermResourceT.Status,
"desp": tplPermResourceT.Desp,
"revision": tplPermResourceT.Revision,
"tenant_id": tplPermResourceT.TenantId,
"app_name": tplPermResourceT.AppName,
"app_scope": tplPermResourceT.AppScope,
"create_date": utils.FormatDatetime(tplPermResourceT.CreateDate),
"last_update_date": utils.FormatDatetime(tplPermResourceT.LastUpdateDate),
}
return respInfo
}
|
package yaice
import (
"context"
"github.com/yaice-rx/yaice/config"
"github.com/yaice-rx/yaice/network"
"github.com/yaice-rx/yaice/network/kcpNetwork"
"github.com/yaice-rx/yaice/network/tcp"
"github.com/yaice-rx/yaice/router"
"google.golang.org/protobuf/proto"
"reflect"
)
//服务运行状态
var shutdown = make(chan bool, 1)
type IService interface {
AddRouter(message proto.Message, handler func(conn network.IConn, content []byte))
Listen(packet network.IPacket, network string, startPort int, endPort int, isAllowConnFunc func(conn interface{}) bool) int
Dial(packet network.IPacket, network string, address string, options network.IOptions, reConnCallBackFunc func(conn network.IConn, err error)) network.IConn
Close()
}
type service struct {
cancel context.CancelFunc
routerMgr router.IRouter
configMgr config.IConfig
ServiceType int
}
/**
* @param endpoints 集群管理中心连接节点
*/
func NewService() IService {
return &service{
routerMgr: router.RouterMgr,
configMgr: config.ConfInstance(),
}
}
/**
* @param message 消息传递结构体
* @param handler func(conn network.IConn, content []byte) 网络调用函数
*/
func (s *service) AddRouter(message proto.Message, handler func(conn network.IConn, content []byte)) {
s.routerMgr.AddRouter(message, handler)
}
func (s *service) RegisterMQProto(mqProto interface{}, handler func(content []byte)) {
val := reflect.Indirect(reflect.ValueOf(mqProto))
s.routerMgr.RegisterMQ(val.Field(0).Type().Name(), handler)
}
/**
* 连接网络
* @param network.IPacket packet 网络包的协议处理方式,如果传输为nil,则采用默认的方式
* @param network string 网络连接方式
* @param address string 地址
* @param options 最大连接次数
*/
func (s *service) Dial(packet network.IPacket, network_ string, address string, options network.IOptions, callFunc func(conn network.IConn, err error)) network.IConn {
if packet == nil {
packet = tcp.NewPacket()
}
switch network_ {
case "kcpNetwork":
return kcpNetwork.NewClient(packet, address, options, callFunc).Connect()
case "tcp", "tcp4", "tcp6":
return tcp.NewClient(packet, address, options, callFunc).Connect()
}
return nil
}
/**
* @param network.IPacket packet 网络包的协议处理方式,如果传输为nil,则采用默认的方式
* @param string network 网络连接方式
* @param int startPort 监听端口范围开始
* @param int endPort 监听端口范围结束
* @param func isAllowConnFunc 限制连接数,超过连接数的时候,由上层逻辑通知,底层不予维护
*/
func (s *service) Listen(packet network.IPacket, network_ string, startPort int, endPort int, isAllowConnFunc func(conn interface{}) bool) int {
if packet == nil {
packet = tcp.NewPacket()
}
switch network_ {
case "kcpNetwork":
serverMgr := kcpNetwork.NewServer()
return serverMgr.Listen(packet, startPort, endPort, isAllowConnFunc)
case "tcp", "tcp4", "tcp6":
serverMgr := tcp.NewServer()
return serverMgr.Listen(packet, startPort, endPort, isAllowConnFunc)
}
return 0
}
/**
* 关闭集群服务
*/
func (s *service) Close() {
}
|
package models
import uuid "github.com/satori/go.uuid"
type Like struct {
PostID uuid.UUID `json:"post_id" gorm:"primaryKey"`
UserID uuid.UUID `json:"user_id" gorm:"primaryKey"`
}
|
package slaveMapHandler
import (
"github.com/stretchr/testify/assert"
"master/master"
"net/http"
"net/http/httptest"
"testing"
"github.com/gorilla/mux"
"time"
)
func TestInitiateEmptySlaveMapHandler(t *testing.T) {
router := mux.NewRouter()
responseRecorder := httptest.NewRecorder()
slaveMap := make(map[string]master.Slave)
InitiateSlaveMapHandler(router, slaveMap)
request, _ := http.NewRequest("GET", "/slavemap", nil)
router.ServeHTTP(responseRecorder, request)
assert.Equal(t, 200, responseRecorder.Code)
assert.Equal(t, "null", responseRecorder.Body.String())
}
func TestInitiateNonEmptySlaveMapHandler(t *testing.T) {
router := mux.NewRouter()
responseRecorder := httptest.NewRecorder()
slaveMap := make(map[string]master.Slave)
slaveMap["slave1"] = master.Slave{URL: "http://10.0.0.122:8080", Heartbeat: time.Now(), PreviouslyDisplayedURL: "http://www.google.com", DisplayedURL: "http://www.prezi.com"}
InitiateSlaveMapHandler(router, slaveMap)
request, _ := http.NewRequest("GET", "/slavemap", nil)
router.ServeHTTP(responseRecorder, request)
assert.Equal(t, 200, responseRecorder.Code)
assert.Equal(t, "[\"slave1\"]", responseRecorder.Body.String())
}
|
// Package meta reads and interprets repo metadata (acyl.yml)
package meta
import (
"context"
"fmt"
"io"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/dollarshaveclub/acyl/pkg/eventlogger"
nitroerrors "github.com/dollarshaveclub/acyl/pkg/nitro/errors"
"github.com/dollarshaveclub/acyl/pkg/ghclient"
"github.com/dollarshaveclub/acyl/pkg/match"
"github.com/dollarshaveclub/acyl/pkg/models"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
billy "gopkg.in/src-d/go-billy.v4"
yaml "gopkg.in/yaml.v2"
)
// Getter describes an object that fetches and parses metadata (acyl.yml) from a set of repositories
type Getter interface {
Get(ctx context.Context, rd models.RepoRevisionData) (*models.RepoConfig, error)
FetchCharts(ctx context.Context, rc *models.RepoConfig, basePath string) (ChartLocations, error)
GetAcylYAML(ctx context.Context, rc *models.RepoConfig, repo, ref string) (err error)
}
var _ Getter = &DataGetter{}
// DataGetter is an object that fetches and parses metadata (acyl.yml) from a set of repositories
type DataGetter struct {
// RepoRefOverrides is a map for reponame to ref override (primarily for local development)
RepoRefOverrides map[string]string
RC ghclient.RepoClient
FS billy.Filesystem
}
func log(ctx context.Context, msg string, args ...interface{}) {
eventlogger.GetLogger(ctx).Printf(msg, args...)
}
// GetAcylYAML fetches acyl.yaml from repo at ref and deserializes it into rc, returning an error if the version is < 2.
func (g DataGetter) GetAcylYAML(ctx context.Context, rc *models.RepoConfig, repo, ref string) (err error) {
defer func() {
if err != nil {
log(ctx, "error in acyl.yml for %v@%v: %v", repo, ref, err)
}
}()
log(ctx, "fetching acyl.yml for %v@%v", repo, ref)
b, err := g.RC.GetFileContents(ctx, repo, "acyl.yml", ref)
if err != nil {
return fmt.Errorf("error getting acyl.yml: %w", err)
}
if err := yaml.Unmarshal(b, &rc); err != nil {
return nitroerrors.User(fmt.Errorf("error unmarshaling acyl.yml: %w", err))
}
if rc.Version < 2 {
return nitroerrors.User(ErrUnsupportedVersion)
}
rc.Application.SetValueDefaults()
rc.Application.Repo = repo
rc.Application.Ref = ref
return nil
}
func (g DataGetter) getChartName(ctx context.Context, repo, ref, path string) (_ string, err error) {
defer func() {
if err != nil {
log(ctx, "error getting chart name for %v@%v:%v: %v", repo, ref, path, err)
}
}()
if repo == "" || ref == "" || path == "" {
return "", nitroerrors.User(fmt.Errorf("one of repo (%v), ref (%v) or path (%v) is empty", repo, ref, path))
}
log(ctx, "getting file contents: %v@%v: %v", repo, ref, path+"/Chart.yaml")
b, err := g.RC.GetFileContents(ctx, repo, path+"/Chart.yaml", ref)
if err != nil {
return "", fmt.Errorf("error getting Chart.yaml: %w", err)
}
cd := struct {
Name string `yaml:"name"`
}{}
if err := yaml.Unmarshal(b, &cd); err != nil {
return "", nitroerrors.User(fmt.Errorf("error unmarshaling Chart.yaml: %w", err))
}
if cd.Name == "" {
return "", nitroerrors.User(errors.New("chart name field is empty"))
}
return cd.Name, nil
}
func (g DataGetter) getDependencyChartName(ctx context.Context, d *models.RepoConfigDependency) (cname string, err error) {
defer func() {
if err != nil {
log(ctx, "error getting dependency chart name: %v", err)
return
}
log(ctx, "chart name for dependency: %v", cname)
}()
if d == nil {
return "", nitroerrors.User(errors.New("dependency is nil"))
}
var crepo, cref, cpath string
switch {
case d.AppMetadata.ChartPath != "":
crepo, cref, cpath = d.AppMetadata.Repo, d.AppMetadata.Ref, d.AppMetadata.ChartPath
case d.AppMetadata.ChartRepoPath != "":
rp := &repoPath{}
if err := rp.parseFromString(d.AppMetadata.ChartRepoPath); err != nil {
return "", fmt.Errorf("error parsing ChartRepoPath for repo dependency: %v: %w", d.Repo, err)
}
crepo, cref, cpath = rp.repo, rp.ref, rp.path
default:
return "", nitroerrors.User(fmt.Errorf("repo dependency lacks ChartPath/ChartRepoPath: %v", d.Repo))
}
return g.getChartName(ctx, crepo, cref, cpath)
}
func (g DataGetter) getRefForRepoDependency(ctx context.Context, d *models.RepoConfigDependency, rd models.RepoRevisionData, defaultBranch string, branchMatch bool) (sha, branch string, err error) {
defer func() {
if err != nil {
log(ctx, "error getting ref for repo dependency: %v", err)
return
}
log(ctx, "calculated ref for repo dependency: %v: %v (%v)", d.Repo, branch, sha)
}()
if d == nil || d.Repo == "" {
return "", "", nitroerrors.User(errors.New("empty Repo field"))
}
log(ctx, "fetching branches for %v", d.Repo)
branches, err := g.RC.GetBranches(ctx, d.Repo)
if err != nil {
return "", "", fmt.Errorf("error getting repo branches: %w", err)
}
bi := make([]match.BranchInfo, len(branches))
for i := range branches {
bi[i] = match.BranchInfo{Name: branches[i].Name, SHA: branches[i].SHA}
}
defb := rd.BaseBranch
if d.DefaultBranch != "" {
defb = d.DefaultBranch
}
ri := match.RepoInfo{
SourceBranch: rd.SourceBranch,
BaseBranch: rd.BaseBranch,
BranchMatch: !d.DisableBranchMatch,
DefaultBranch: defb,
}
sha, branch, err = match.GetRefForRepo(ri, bi)
if err != nil {
return "", "", fmt.Errorf("error getting ref for repo: %w", err)
}
// get override if present, but only override the SHA
if override, ok := g.RepoRefOverrides[d.Repo]; ok {
sha = override
}
return sha, branch, nil
}
var ErrUnsupportedVersion = errors.New("acyl.yml is previous or unsupported version")
const DefaultFallbackBranch = "master"
// Get fetches and parses acyl.yml from owner/repo and any dependent repositories, calculates refs and returns the parsed data.
func (g DataGetter) Get(ctx context.Context, rd models.RepoRevisionData) (*models.RepoConfig, error) {
var renameRequires = func(ds []models.RepoConfigDependency, old, new string) {
for i, d := range ds {
for j, r := range d.Requires {
if r == old {
ds[i].Requires[j] = new
}
}
}
}
repomap := map[string]struct{}{}
transitiveDeps := []models.RepoConfigDependency{}
// d is current dependency
// parent is the parent dependency (if applicable)
// ancestor is the "root" dependency (for recursive transitive dependency trees)
/*
triggering repo
|
|---> repo dependency (ancestor)
|
|----> transitive dependency (parent)
|
|----> child transitive dependency (d)
*/
var processDep func(d, parent, ancestor *models.RepoConfigDependency) (err error)
processDep = func(d, parent, ancestor *models.RepoConfigDependency) (err error) {
switch {
case d == nil:
return errors.New("processDep: d is nil")
case parent == nil:
return errors.New("processDep: parent is nil")
case ancestor == nil:
return errors.New("processDep: ancestor is nil")
}
defer func() {
if err != nil {
log(ctx, "error processing dependency: %v: %v", d.Name, err)
return
}
log(ctx, "processing completed for dependency: %v", d.Name)
}()
log(ctx, "processing dependency: %v (parent: %v, ancestor: %v)", d.Name, parent.Name, ancestor.Name)
select {
case <-ctx.Done():
return nitroerrors.Cancelled(errors.New("context was cancelled"))
default:
break
}
switch {
case d.Repo != "" && (d.ChartPath != "" || d.ChartRepoPath != ""):
return nitroerrors.User(fmt.Errorf("dependency error: %v: only one of Repo, ChartPath, or ChartRepoPath may be used", d.Name))
case d.ChartPath != "" && d.ChartRepoPath != "":
return nitroerrors.User(fmt.Errorf("dependency error: %v: either ChartPath or ChartRepoPath may be used, not both", d.Name))
case d.Repo != "":
if _, ok := repomap[d.Repo]; ok {
return nitroerrors.User(fmt.Errorf("duplicate repository dependency: %v (check for circular dependency declarations)", d.Repo))
}
repomap[d.Repo] = struct{}{}
bm := !ancestor.DisableBranchMatch
defb := DefaultFallbackBranch
if ancestor.DefaultBranch != "" {
defb = ancestor.DefaultBranch
}
dref, dbranch, err := g.getRefForRepoDependency(ctx, d, rd, defb, bm)
if err != nil {
return fmt.Errorf("error getting ref for repo dependency: %v: %w", d.Repo, err)
}
drc := models.RepoConfig{}
if err := g.GetAcylYAML(ctx, &drc, d.Repo, dref); err != nil {
return fmt.Errorf("error processing %v acyl.yml: %w", d.Repo, err)
}
drc.Application.Branch = dbranch
d.AppMetadata = drc.Application
if d.Name == "" {
name, err := g.getDependencyChartName(ctx, d)
if err != nil {
return fmt.Errorf("error getting chart name for repo dependency: %v: %w", d.Repo, err)
}
d.Name = name
}
renames := map[string]string{}
deps := []models.RepoConfigDependency{}
for i := range drc.Dependencies.Direct {
dd := &drc.Dependencies.Direct[i]
if err := processDep(dd, d, ancestor); err != nil {
return fmt.Errorf("error processing direct dependency of %v: %v: %w", d.Name, dd.Name, err)
}
old, new := dd.Name, models.GetName(d.Repo)+"-"+dd.Name
dd.Name = new
renames[old] = new
d.Requires = append(d.Requires, dd.Name)
dd.Parent = d.Name
deps = append(deps, *dd)
}
for old, new := range renames {
renameRequires(deps, old, new)
}
transitiveDeps = append(transitiveDeps, deps...)
case d.ChartPath != "" || d.ChartRepoPath != "":
if d.DisableBranchMatch || d.DefaultBranch != "" {
return nitroerrors.User(fmt.Errorf("branch matching and default branch not available if ChartPath or ChartRepoPath is used: %v", d.Name))
}
var drepo, dref, dbranch string
if d.ChartPath == "" {
rp := &repoPath{}
if err := rp.parseFromString(d.ChartRepoPath); err != nil {
return fmt.Errorf("dependency error: %v: malformed ChartRepoPath: %w", d.Name, err)
}
drepo = rp.repo
dref = rp.ref
} else {
drepo = parent.AppMetadata.Repo
dref = parent.AppMetadata.Ref
dbranch = parent.AppMetadata.Branch
}
d.AppMetadata = models.RepoConfigAppMetadata{
Repo: drepo,
Ref: dref,
Branch: dbranch,
ChartPath: d.ChartPath,
ChartRepoPath: d.ChartRepoPath,
ChartVarsPath: d.ChartVarsPath,
ChartVarsRepoPath: d.ChartVarsRepoPath,
}
if d.Name == "" {
name, err := g.getDependencyChartName(ctx, d)
if err != nil {
return fmt.Errorf("error getting chart name for dependency: %v: %w", d.AppMetadata.Repo, err)
}
d.Name = name
}
default:
return fmt.Errorf("dependency error: %v: exactly one of Repo, ChartPath, or ChartRepoPath must be used", d.Name)
}
d.AppMetadata.SetValueDefaults()
return nil
}
rc := models.RepoConfig{}
if err := g.GetAcylYAML(ctx, &rc, rd.Repo, rd.SourceSHA); err != nil {
return nil, fmt.Errorf("error processing target repo acyl.yml: %w", err)
}
repomap[rd.Repo] = struct{}{}
rc.Application.Branch = rd.SourceBranch
parent := &models.RepoConfigDependency{AppMetadata: rc.Application}
for i := range rc.Dependencies.Direct {
d := &rc.Dependencies.Direct[i]
if err := processDep(d, parent, d); err != nil {
return nil, fmt.Errorf("error processing direct dependencies: %w", err)
}
}
for _, td := range transitiveDeps {
rc.Dependencies.Direct = append(rc.Dependencies.Direct, td)
}
transitiveDeps = []models.RepoConfigDependency{}
for i := range rc.Dependencies.Environment {
d := &rc.Dependencies.Environment[i]
if err := processDep(d, parent, d); err != nil {
return nil, fmt.Errorf("error processing environment dependencies: %w", err)
}
}
for _, td := range transitiveDeps {
rc.Dependencies.Environment = append(rc.Dependencies.Environment, td)
}
if ok, err := rc.Dependencies.ValidateNames(); !ok {
return nil, fmt.Errorf("error validating dependency names: %w", err)
}
return &rc, nil
}
// repoPath models a path within a repo at a ref
type repoPath struct {
repo, path, ref string
}
func (rp *repoPath) parseFromString(crp string) error {
// chart_repo_path: dollarshaveclub/helm-charts@master:path/to/chart
psl := strings.Split(crp, ":")
if len(psl) != 2 {
return nitroerrors.User(fmt.Errorf("malformed repo path: exactly one ':'' required: %v", crp))
}
rp.path = psl[1]
if strings.Contains(psl[0], "@") {
rsl := strings.Split(psl[0], "@")
if len(rsl) != 2 {
return nitroerrors.User(fmt.Errorf("malformed repo path: no more than one '@' may be present: %v", psl[0]))
}
rp.ref = rsl[1]
psl[0] = rsl[0]
}
if len(strings.Split(psl[0], "/")) != 2 {
return nitroerrors.User(fmt.Errorf("malformed repo: exactly one '/' is required: %v", psl[0]))
}
rp.repo = psl[0]
return nil
}
// chartLocation models the location for a chart and the associated vars file
type chartLocation struct {
chart repoPath
vars repoPath
}
func getChartLocation(d models.RepoConfigDependency) (chartLocation, error) {
loc := chartLocation{}
if d.AppMetadata.ChartPath == "" {
if d.AppMetadata.ChartRepoPath == "" {
return loc, nitroerrors.User(errors.New("one of ChartPath or ChartRepoPath must be defined"))
}
rp := &repoPath{}
err := rp.parseFromString(d.AppMetadata.ChartRepoPath)
if err != nil {
return loc, fmt.Errorf("error validating ChartRepoPath: %w", err)
}
loc.chart = *rp
} else {
loc.chart.repo = d.AppMetadata.Repo
loc.chart.path = d.AppMetadata.ChartPath
loc.chart.ref = d.AppMetadata.Ref
}
switch {
case d.AppMetadata.ChartVarsPath != "":
loc.vars.repo = d.AppMetadata.Repo
loc.vars.path = d.AppMetadata.ChartVarsPath
loc.vars.ref = d.AppMetadata.Ref
case d.AppMetadata.ChartVarsRepoPath != "":
rp := &repoPath{}
err := rp.parseFromString(d.AppMetadata.ChartVarsRepoPath)
if err != nil {
return loc, fmt.Errorf("error validating ChartVarsRepoPath: %w", err)
}
loc.vars = *rp
}
return loc, nil
}
// ChartLocation models the local filesystem path for the chart and the associated vars file
type ChartLocation struct {
ChartPath, VarFilePath string
ValueOverrides map[string]string
}
// ChartLocations is a map of dependency name to ChartLocation
type ChartLocations map[string]ChartLocation
// FetchCharts fetches the charts for the repo and all dependencies, writing them to the filesystem g.FS at basePath/[offset]/[name] and returns a map of dependency name to filesystem path
func (g DataGetter) FetchCharts(ctx context.Context, rc *models.RepoConfig, basePath string) (ChartLocations, error) {
// returns the local filesystem path of the chart and vars file (in order)
fetchChartAndVars := func(i int, d models.RepoConfigDependency) (_ *ChartLocation, err error) {
defer func() {
if err != nil {
log(ctx, "error fetching chart and vars for dependency: %v: %v", d.Name, err)
return
}
log(ctx, "success fetching chart and vars for dependency: %v", d.Name)
}()
log(ctx, "fetching chart and vars for dependency: %v", d.Name)
out := &ChartLocation{ValueOverrides: make(map[string]string)}
cloc, err := getChartLocation(d)
if err != nil {
return nil, fmt.Errorf("error getting chart location: %w", err)
}
cd := path.Join(basePath, strconv.Itoa(i), d.Name)
log(ctx, "getting directory contents: %v@%v: %v", cloc.chart.repo, cloc.chart.ref, cloc.chart.path)
dc, err := g.RC.GetDirectoryContents(ctx, cloc.chart.repo, cloc.chart.path, cloc.chart.ref)
if err != nil {
return nil, fmt.Errorf("error fetching chart contents: %w", err)
}
for n, c := range dc {
n = strings.Replace(n, filepath.Clean(cloc.chart.path), "", -1) // remove chart path
fp := path.Join(cd, n)
if err = g.FS.MkdirAll(path.Dir(fp), os.ModePerm); err != nil {
return nil, fmt.Errorf("error creating directory: %w", err)
}
if c.Symlink {
if err := g.FS.Symlink(c.SymlinkTarget, fp); err != nil {
return nil, fmt.Errorf("error creating symlink: %w", err)
}
continue
}
f, err := g.FS.Create(fp)
if err != nil {
return nil, fmt.Errorf("error creating file: %w", err)
}
defer f.Close()
var n int
for {
i, err := f.Write(c.Contents[n:len(c.Contents)])
if err != nil {
return nil, fmt.Errorf("error writing to file: %w", err)
}
n += i
if n == len(c.Contents) {
break
}
}
}
out.ChartPath = cd
if cloc.vars.repo != "" {
log(ctx, "getting file contents: %v@%v: %v", cloc.vars.repo, cloc.vars.ref, cloc.vars.path)
fc, err := g.RC.GetFileContents(ctx, cloc.vars.repo, cloc.vars.path, cloc.vars.ref)
if err != nil {
return nil, fmt.Errorf("error getting vars file: %w", err)
}
vcd := path.Join(cd, "vars.yml")
vf, err := g.FS.Create(vcd)
if err != nil {
return nil, fmt.Errorf("error creating vars file: %w", err)
}
defer vf.Close()
n, err := vf.Write(fc)
if err != nil {
return nil, fmt.Errorf("error writing to vars file: %w", err)
}
if n < len(fc) {
return nil, io.ErrShortWrite
}
for _, v := range d.AppMetadata.ValueOverrides {
vsl := strings.SplitN(v, "=", 2)
if len(vsl) != 2 {
return nil, fmt.Errorf("malformed value override: %v", v)
}
out.ValueOverrides[vsl[0]] = vsl[1]
}
out.VarFilePath = vcd
}
return out, nil
}
name := models.GetName(rc.Application.Repo)
loc, err := fetchChartAndVars(0, models.RepoConfigDependency{Name: name, AppMetadata: rc.Application})
if err != nil || loc == nil {
return nil, fmt.Errorf("error getting primary repo chart: %w", err)
}
out := map[string]ChartLocation{name: *loc}
ctx, cf := context.WithTimeout(ctx, 2*time.Minute)
defer cf()
eg, _ := errgroup.WithContext(ctx)
couts := make([]ChartLocation, rc.Dependencies.Count())
offsetmap := make(map[int]*models.RepoConfigDependency, rc.Dependencies.Count())
for i, d := range rc.Dependencies.All() {
d := d
i := i
offsetmap[i] = &d
eg.Go(func() error {
loc, err = fetchChartAndVars(i+1, d)
if err != nil || loc == nil {
return fmt.Errorf("error getting dependency chart: %v: %w", d.Name, err)
}
couts[i] = *loc
return nil
})
}
if err := eg.Wait(); err != nil {
return nil, fmt.Errorf("error fetching charts: %w", err)
}
for i := 0; i < rc.Dependencies.Count(); i++ {
loc := couts[i]
d := offsetmap[i]
for _, v := range d.ValueOverrides { // Dependency value_overrides override anything in the application metadata
vsl := strings.SplitN(v, "=", 2)
if len(vsl) != 2 {
return nil, nitroerrors.User(fmt.Errorf("malformed value override: %v", v))
}
loc.ValueOverrides[vsl[0]] = vsl[1]
}
out[d.Name] = loc
}
return out, nil
}
|
package initdb
import (
"github.com/jinzhu/gorm"
"fmt"
_ "github.com/jinzhu/gorm/dialects/mysql"
)
var MYSQLORM *gorm.DB
func init() {
connect, err := gorm.Open("mysql", "root:123456@tcp(localhost:3307)/reader?parseTime=true")
if err != nil {
fmt.Print(err)
panic("connect postgres failed ")
}
//defer MYSQLORM.Close()
fmt.Print("Login postgres database success!")
MYSQLORM = connect;
}
|
func twoSum(nums []int, target int) []int {
m := make(map[int]int)
for i := 0; i < len(nums); i++ {
idx := target - nums[i]
// idx => if idx exists, nums slice has answer
if _, ok := m[idx]; ok {
return []int{m[idx], i}
}
// set m[pivot nums value] to nums slice keys
// so if m[pivot nums value] exists, we can get m[idx] position.
m[nums[i]] = i
}
return nil
}
|
package loader
import (
"net/http"
"net/http/httptest"
"testing"
"time"
)
const (
testTextFile = "/index.html"
testImgFile = "/image.png"
)
func TestCompression(t *testing.T) {
t.Run("compressed", func(t *testing.T) {
req := httptest.NewRequest(http.MethodGet, testTextFile, nil)
req.Header.Set("Accept", "*/*")
req.Header.Set("Accept-Encoding", "gzip, deflate, br")
res := httptest.NewRecorder()
testLoader.ServeHTTP(res, req)
if res.Code != http.StatusOK {
t.Errorf("Not OK: %d", res.Code)
}
if res.Header().Get("Content-Encoding") != "gzip" {
t.Errorf("Wrong encoding: %s", res.Header().Get("Content-Encoding"))
}
if res.Body.Len() != len(testLoader.content[testTextFile].CompressedBytes) {
t.Errorf("Wrong content length, expected %d, got %d", res.Body.Len(), len(testLoader.content[testTextFile].CompressedBytes))
}
})
t.Run("uncompressed", func(t *testing.T) {
req := httptest.NewRequest(http.MethodGet, testTextFile, nil)
req.Header.Set("Accept", "*/*")
res := httptest.NewRecorder()
testLoader.ServeHTTP(res, req)
if res.Code != http.StatusOK {
t.Errorf("Not OK: %d", res.Code)
}
if res.Header().Get("Content-Encoding") != "" {
t.Errorf("Wrong encoding: %s", res.Header().Get("Content-Encoding"))
}
if res.Body.Len() != len(testLoader.content[testTextFile].RawBytes) {
t.Errorf("Wrong content length, expected %d, got %d", res.Body.Len(), len(testLoader.content[testTextFile].CompressedBytes))
}
})
t.Run("uncompressable", func(t *testing.T) {
req := httptest.NewRequest(http.MethodGet, testImgFile, nil)
req.Header.Set("Accept", "*/*")
req.Header.Set("Accept-Encoding", "gzip, deflate, br")
res := httptest.NewRecorder()
testLoader.ServeHTTP(res, req)
if res.Code != http.StatusOK {
t.Errorf("Not OK: %d", res.Code)
}
if res.Header().Get("Content-Encoding") != "" {
t.Errorf("Wrong encoding: %s", res.Header().Get("Content-Encoding"))
}
if res.Body.Len() != len(testLoader.content[testImgFile].RawBytes) {
t.Errorf("Wrong content length, expected %d, got %d", res.Body.Len(), len(testLoader.content[testTextFile].CompressedBytes))
}
})
t.Run("index", func(t *testing.T) {
req := httptest.NewRequest(http.MethodGet, "/", nil)
req.Header.Set("Accept", "*/*")
res := httptest.NewRecorder()
testLoader.ServeHTTP(res, req)
if res.Code != http.StatusOK {
t.Errorf("Not OK: %d", res.Code)
}
})
}
func TestConditional(t *testing.T) {
// Test If-Modified-Since 200 & 304
// Test Etag/If-None-Match 200 & 304
}
func TestNotFound(t *testing.T) {
// Test file with nonexistent path
// Test folder with nonexistent path
}
func TestIndex(t *testing.T) {
// Test / vs /index.html
// Test when index does/doesn't exist
}
func TestRange(t *testing.T) {
// Test range with and without compression
}
func TestContentType(t *testing.T) {
// Test content types
}
var testLoader = New()
func init() {
testLoader.Add(&Content{
Path: testImgFile,
Hash: "5typ8Zrehr5H07eDP0GNuQ",
Modified: time.Unix(1506288628, 0),
Raw: `
iVBORw0KGgoAAAANSUhEUgAAAMgAAAC3CAMAAABg8uG4AAABGlBMVEVHcEyoqKqytLVzc3ONj5HCwsTW
1trDysqPk5OWmJqZmcyZmZucnqGztLWQkZLb3N2Hh4eChYbd3d3d3d66u72EhYfCw8Xc3N3Ly81XV1dY
WFjh4eJPT0/h4eLo6Onr6+zt7e3k5OVra2vc3N3h4eLn5+ju7u/09PW8vb/Q0NLq6uvS09Tw8PCvsLKy
s7W/v8GkpafZ2dvy8vPIyMqsra/Fxse1t7m4ubvKy8zV1demqKrNzs/8/Pypqq3BwsSQkZPd3d+IiIvX
2NmYmpyen6DCxMX19faMjY+foaObnJ85NjeWl5n29ve6u70/PT5RT1D39/j5+fozMDFgX2CTlJdzcnSD
hIZnZmfDw8RZWFl6ent/f4BIRkiho6Xc+NdTAAAAHnRSTlMA6Y/kcFctESFABeS90Jqm3sR4v7Kq8t/Y
ynnj7s8DVns2AAAvBElEQVR4AezTwYqbQBzHcR1iotbZwCLJXnoIelBEo8yMowbdWTfSWYIsxAa2aX3/
1+gYpeyWHhJWb/0+wMCH//ykKfufOjPAQpqs+T2E4nnFhJMi7gw5xxhDVZomfVkSSsz7dbLlYEKIcch9
EcayJk3R4mv0ViMvssoK83b0u6uaomjdDZTw5RBeJD4GU0CWFnLciDGW1Jxn4yKUO+jHtK6DmSTJYcgP
LxdJjo3xh6JYzLOYqCQZ52DUXft0u3WcWoRVyQjDsH3uJFgka+M7LtksiW86yBzAh9Vq9WAq/1QAGNRF
sd0OEmroOhCS/Xd/kOSzUfexdL9lzO5yi/aWgwCSlmWZpinxdeljugJlWhBSVYOkFlFTlTS5fd77fyTG
F2Ossyhrtv9p2yzduS7BnMvXO1AUlV1pFSw+zsKkAUkQIn9LYlmWwzwIBokoazOgj8CYLxmjTfepKqdw
xEC4cvUl49SKegp5B9EArNEuSXBKeknxXkLjOA5EAyQXZdnnVz9fCwL64QkHbfkl8/ohx46QXCherA2n
ALRK3J0oqd92qJNUg8QZJB3k8fTaPPYnGUUytxhlUYPsza7mtzok9SmuU8vqJStomiYskOeKegn2SC/p
IO8lwen4mjfHJvfxPh8k+ucc9jm0T/vNpiQh5y1QwE3Da04UWVZPEZhU5IkGSUJ8D4lI5SXIQ4NEQOj5
SUj88/FX+5tUc+9u09q2+HCdR1OfnKbJPblN2gtCgGxAIMRDvCwJCWzZYIHesl7f/2vctYGtjWTZcXqW
2ozR/iN+mmuuufYmg2iag7hv/juO1oZrVxmG8Rp3d5b58Sd/lj96ZtPHJH1MImASr2NJ4BFPMlSr1RCI
TdKpUp1M5d4kiZLJPCe5+K84+ik0q4NAOrLE6cbHnxt3HyUf6hClrImnWx3PG6rKzc3NsGPvSbaTWFlN
qogkTiabDGT36z9Oj784Tt20li2G4XnG53lWVyFCXh+FfzoQPD53goRoYpugBXQXkMjCvrmmk61ibCdz
1F2zJLrPXfKPt0SOAynCLp8VRVFS47VZ+P7tL39KPsMwHCosyklNbNvGo0vZkwwmk3lLTSY9GRxvwFKP
QNRf/mEM+gyUOWUQBSrGVl7MEBzYv/zPnxKLRMwqI3HSZAUknitkJIijRIJzUdZxmkBvRaoST6LYSCdd
Iwdxd2//EchnBtU0TQUqr765u1N/fGypNDNuHoqQCNvE0cM0SnqYRHpK0pBNTDKbTFJF2UwSdwdjqyBR
zz78I4cwUF64mBYcPBLkFWanfAoKo2CSdZSka3UbzdEUVo0DTXBzDQ1zn4vVSbRWlCkM4dVcLkCA5OcN
/9sXBnmc8tOrkiB3r/hJ/mIoSoQPVE7S2RkLiGkTjFKNFsgo0+gOa+Kh0hW33WoMw0aR8ADSSieTgSJv
J8u1ASCY5Odj8XeEAf8KZo4hgiCvWxe/0BlEhsJDcUEShf3c8otojYZXupX2PkGKhLDshrOZ4ZWWLjWa
RDMl2N7JUHuQ3S/v//Xx3U8kCOJg+k6zT1HF7+uYd68QBOr3W1GkMwwsyjqKOVQIZAPDqxPFxCdA0lK9
fE2BGUyWrngyiea9EC+PmOTNoG29fvuWkBx9RtRohJGVLb9yf3875kUoSkTm4BGKk2ydnGQTBTC81lEP
QIgmO90kM3jfXFYMo6tH1uAC5H5g2G9enejAwTcZntIAAwkiis4NrFqv0vT9iMlAuDHUJYdIFtEiT5RV
5LHsMJ2uDuJk5+genlxEEtgdwzZwlCVR72dd46oVvNLzX3goluP5Sg01FipGl9HIelVRHE2LIjd2DPeq
8oBI+osFg0ikaOn7ztKzIqtMYnj6fnJhkhMnEzeMZzvLbN3Hwes8/5ZH5UBXjDIOnmP7jgQj65Um+4un
adCj70LJWp2CKuZwELV8pweiTJfNnERAJHZbAhL9uLmOQO4C19KHxu4u7N4Hb17TGizi4FiKQg7hWKfJ
VGrc8PUn3C8jjtPGrHv29v2Hj40xl5EwqFZVUAWtK2rU6Zds0rBsD0hONBchUYfmUM5ccnc/C+IfN9c7
1FjIIRQ/YpuOT1UqNM3p7VeMLKwoNR7TunuGBPzto69RmERO+/nmxepp3Cw3l2KS5jqURClADNnOt3kA
CWNorh/mucQxPOP30e84EtH8oaEc89WCQJ3BFwJHfpGrjJkiUriqx3AFCUqTEonX9synkliyGoZ3RiGJ
1QGQQpJu8KPm+p2TwA8Ox2OX01l5Qzgd/sQx5Ozsl/eI4sJQLjQuH+HMzGGYPQnESf/IJkd+H8q7tul4
DRdIkCJGY4hBQiC5eP+yP3yG1mo0hYpw0OhW7idP/x/+MwjVoDc/1zjAABTWx5uXDygEJB/BpiLoB5JY
btvuPyKXqNjuupKRZCBx982LgiCjUzkGtceoVHTrub33+7e/0236/Zjij82g29tUl8vqBWSRiDcvIskB
CUKxrmydSNKQ1UfWzFxyFeLBJZslSQbdF/z+zueAAxfhqEimYXz+888n3fXmfLlKUX0os53P15vNfDkF
jOrFLZPnPB7CrGSbJni7Dz45ODDKpo5JOorbYqVGniWWYRQgO2Fv9/ugN/j3C4PTRxxYEMljEAYqui/1
Gcq2DiPx14vuGkBWq1X6Df+v79+W1fkcpDi/kM/OZJZiaJqQMELnypSaoIejN6RDSZDhM5BOy5Ul1mwU
IC0rlDMS1QVFiCRdLMkpQZgSB5VGMWCQqjl68EuZQzUW09UUfvbq8lsGAW22Ah2m0+mnWtZFDEVD5SRQ
TsNmahSTN1ezcySJ7QpA0lBcRfLtzO443u9UePr7tq0YBCSI43+9IAjGgL5WNmm7jFGv+3ZYnsEXxgZh
oJp++/b3Nkm2wAG1rJ5rfK2SFyaBsDeFmkgOjXwHNxeWRHcFy2h3mr6d+Z3sKUOrJQ9tE/xOegtIuh9+
KAgFXz1nVxzBqAEIZ4fB+xLILJ1W80q3220KPZZRQJ2ParVjEv9RpPmS4SmdPQqThtweCkzfxoOrvHAB
Sr4Cx4sfSvKFRRy8n6+7Qq8bEIwMpKnfB+9KTl+vqkVljsfyQH0a1zEIJmF0jcGGRySM4LBlv9+0ZMtj
HMck8X61JyGb410ymRYgIMmvzwiCDrTTBfRVyxR71pTHGAWI0AgAhNT3vSJgbgxRgND1+iFJk6OgCEmt
47M5CXCYQ0TBQPwZngkgJyRRcpD1ZDJZ4N6a/ft0hsDXcJuBSYlK2qKqUw9zFCCMbgThQXx+WgHJqTof
U/UjTVgEgkkAhL7x/QxE0q8QBdUUPCirBYOLgBQkBGQJIFskCQKJByckeZ99U9cZNGR3vms0ktaeoyIi
PSivEwRls785M9ar50B4ADkg4b1rnirlItWwoaH0jqU0JKbC5i5BN/T64ylJcG/NJ9PtpFf01mxxQpLP
TdRYSRJFUbWTSAt3j9GH6S45kGN3QfCBvNY/M9qL5xVhAOSQxJcuL4Fi31yMo5u20OfrFAvdhUE8XZVu
9Od7qzdZh8kK91Zv8P7pcUgAi6jpctMbsOmMbrOYQ9KHaPXstMIAx8gb2KMMFUIEjHG66kiRIxLOFpjR
iNk3l58NL2z3vSRyxyQgBQkBaSdbdxPt1BxkMHvzZGNlTL7YsERpTnOpX9jDMdUAWuoM/uj+8f3792/f
lvNNr7dZpliOUyUhj5RJ8snlS9KIx5LkWzDJEiEH0VXBzHur8UxvuVHRW/Gse/H0nO7kQQjlzY1pWHDA
kQowPkBob7fQdkmaQlRM8ax9tuCI+4QkS3eJKUtCBhfpLXvndfQTIGohCdzZrZYFCEjy4cjqDN/fr+7i
PHHxuPKGQdBL0yLvYLGCehECz9/KExKU7pRe47HfDyUhvdWycpCbw97aGPnc6k4mUZqbJI673SO7f+b9
IgjFYuPFguyCeJVtHmibPeGKF9x+RJJdxkg1inpZEvvRuNFPSOKujDzc55NJAgM4T5Lu4PBC5f94J7MI
5sAgKAN7QFDk3fK1IFWKr6MqgzBNr+Fc8vsRzJ1wiQd14+qnQJTlSs5J0slE3ZvksLfe8ZlFsCAkQXQ5
CM7Jw78e5CtdL5PwrGA2PL9O8yQVub0kGMSzDPil46F5EkSNqjmIO53cYZCj3vrM+P6xIABSR501q5YK
JtXr6mJUK0go3/EeTcmnR1p+PDiWpNNz8t5qdGPVUKwbG/89goIED+D1pHjDUCUgR731RReylZfqm50b
nyzuTTMIBtVSTV8LgrKd4pqCfWM7vvgwRlpkSzDAEEmaTq+aJu1+Jokwu7LNG/jY+KB4JImcTqYqgITJ
ZOdikEH3Q3k9EdhMEIkVayOHxhx1qQGdVS3V8rUgVV83PcfntetrrUKJOEmgcG9x3m45FaxetCh6a+h6
eZQ8B6LA6I2m82UySbJVHoEc9tZbn8s4OActRRq/P4EI1j8COf90IXuAcFmjRbqWFTmXIEX4RqivomiJ
7lOSsJhbsmVKBQiQkHAnIIqaTrKa4jPJbAb3NKXF188zpM/RUBq1B/EApHpQ0x8hnH3lrq9v6/zjpajV
ieGJJJSjbLaJzDOtSEYmSeVibsk3OQiW5OaJSRTZWGYgXQQCHN3NfL4o3XD9xecgDgLhxYIDgbRKIMss
TF5i8G7HGjxyTYOSbxmtIClJwikLYTONohW4XYgcZPfELOaW0jFxlJwAwVtKN42SdXHe7a5nrhx035Aj
VXFUlxiYWFln4aPUkIBkUbJ8BuLCYoChpt3e3l5qWXXAH4QEKUJ58jxJLOisMIoGPG+l+auGZpGJHUUn
IJ3OY+PxFAgaW8U9XdBrqZDtg/2J97Ofc2QgfK1CQKQyyPw5CPvhsl7TRg9Ql6g0xOI8avUDEH65gmVt
mtl9EUU9PlgjkOHWL0CEnSBgkEdbbjfMxgkQ8vLqLh6GKEgGA/LiM785oQUKBlZJEK3fCGJs9vlpDBnu
GW4fsrqF2pPw8i1FJKlk5YHBdxnJOorCdcgAiTzdZ+LO25vkMQzau6FJVvnC7QcgRjvIQfA58X/x1YkN
XfxQEqTud8jUOj8//wR1fn7AQdUeRqjKJIABKArHaPVDENqHBr/KbrTnEXQZAtnNfSxJQ/awJGavYZqq
PTx2O+ktAGmHCAQisTDJe7+4kuMkuiJeMnuOGtW/gUA8vzj76lDXpOw/SIBfjnARkhzFtGkN6gCE9pJo
K6AJzIDjWwgkXuxBnBBLctOOYf42VLvxvEnA7UamSBdfnr4VhGZ2EWRzdIV+EJzsttyRbFgs+szD9YOG
Bg4EpJY7+fLaudiv69e3t6dIQJG+8lArJKnV9iRKEqUotHgnBSQA6Q0IiLmTTAAxzauehYJEHQ5PgdwP
ZvkLXuU+V+Si8DqlMfD0Td1BWxZ1yUkw/YS+T10+1D22okFdQt1CPUDVruv6GTl41GsnNEG8lFHjc0lq
JRC6nURzBMLPYAj3GW5+V9qAWztPN23P6Bl2Fu2B/RTE7QXqoAgSI1ekOLn/zvPamHEclsoShKbg2esV
kaJr9ZrJ5RiEowIcX89L4XL9MBqXSIgkio+SpACpYRB6EEVdRLLeJNG0yU1bfmkDtmLVvR8EVzaAAEl7
ODwG2Q1aw7a62OUgASgCJB/2XufpGoxfsr8Xq6um82gClTjq15ptlfy+SioaIBQoB5I8CoUih5JQ1SiR
4RunuoGWlNTzSW/BJt8yWmYeiUiSO/NwSZHDbgddZXe7+dHqHilSuP23Ig65pyAaAtE09GAFx0i79hp/
l/f69PJyPMYoB80ldCqI4xiE5mB0DSl/y/C9KNokPtTTY2JxtnLN4cFVShCbmdvDRbY1xmrWWrnb33H5
0NrxIj7iEkVsBhQhHPC8tPkpWRGQ9Px2DHWqudhhznEMIsLoSvSrOc/DEAbrcy+ADI2D+TsLdSMDcRch
UiRuI5Be7z/Z0AIQRNITMQgRBEDKjTWCujT/SNJSkIAgqEhz7UF8qwDBJBiEkuH54y6PXvRGS87nngfp
hHYJpOuaeP4OggzEQCCbzaf8dJinyNSkoQ5AQBFOOxBkVLv2P6bbJcb4OkIcqA6bC4E8mtRJSSAQuxEE
I4Aw/mpNFOmfALmzhxhE7so3MgYBkwDIrJ2BLAbZ0Co2xnWUzkPxSBEPg2BBbm+vuYvtNF93v17XR9dj
TIKK0S1FRre5vNRqiRDtp0Eo6CkfgTCelYOwRyB2ASJbGMTtXjVIIsYZSHCHQBbdwa8YBBRh0wi8Bxxl
RQQGaEhnIRLteszIZ/JX/3pcv7yGwiCAossejA6m/yjLV32N0U4pku1a3CrFL31OgugYpOF2cpD7WL9S
CEjYg0DsteISyJdCEZEZhsuoT0A0KC/zSIkDnleDjL9EO/sYIOCzJxnZVo3naZFn6IdRjRERxjEIPrYL
6gsg+IyITruqjkCU2PAspQSiLrq9hexikA/7ldGbhhDs4lwpp0iWI1AEBKHklWlxnf9ZgIgGJeZ2v9Rw
HbudHNsPQXCQQBEQJMnOAxA3bniHy5arxndDNchBBlmQsBQqaZlEq0F7ZRxYpGbSGpq/e5DxngM+BQTu
rpHe4ZHdc6sjlmNJiCIvgAiHIHfelRy3PbN1BOK23eJia5CD/CblhxGOcxN08Bl4JYswXi1fUR72ihA9
EEZGgr3e6lNkZuE6BKFPKOL7fftKcVVV1vENcOmvZ6t2GNxI1vH6Wxza0zRXBBLxfa6I2BalJe3NptEc
gdR4ESkisVoRiNgiGISgXO+9bvAVDHJaEeB4qgirt1ylI/TZvqleEUlwIgZtRbqxhs+AbCaD7qDXWwDI
u2Jo3YmPcxQjnAPNxeimBxHCoCQo5u+BRa5Hj/F8uY4fNeKQ0a1R00genlIEOI4U4QTLeHR4iu1nCDsh
9wgGGe5msHoNoU6BxEEQrzZdcP3iPwUIoISiskAgqHiTexhf9m2Tr2n7FaUkCNcFScN1kiTbcLRX5BL8
hUAOJKmfACGKSIrVr/DNfuGRfqiXQYa7WBYk99TJCjjUu810EC/XsGqtMxA+V2QghgOxK2QgUlOs10R4
tIqGFcGSoEe+T9JH1FNaF1CW9N4jMLRwsL9KEaZjsXW/6e/N3gw9AoIwJKnRCOzhMyCwfHbnc+T23h8I
BK9acTeM1hmIztRRVZBNSksjYMA/t/Mk5YsEuUIkD9gjLZbHHCcUqRwrwlm65vultySSo+4D0dqFLUdo
dCBIOtZpkMX0fjXfLEGRXgbC8Jkii36YJkmKOKhHCgdiWZGcZDxIEmmMnR4ASYwVMW2mAKkwPAVOrh8e
2g8VYSxJ89H0JYooipOB6EqoPrJeJ7ujG7qNQxBskXRwP5/2pt0BFIC8ZflMkZlxFUXBuo8sYtP5+M0x
DrdGIUnmJEa0FEhqhSJ+K59aGn/JerrnUDW+UISAEEVMDzjKIE5/ZzvA0TB2Lcm39+92A9s6CZKE95vV
IMUg7/p8RuKuqY1Kb4bwfQxEyZEiZEsBi7dKwY5sclWsjLcKyyMQXmxZuud15E4FRMGNdaQI09J8/hAE
CWK3IE78JvI6frXbbpUVMQjI/f182SUgfg5iJ+jid5mBCPDNWJFCEgyiwYOzRZIgFA/+O8TnEVahxYdb
Rms5mUHoR4XhaxnHU0WaNs0zhyC7juUaJuv/P2nn26O4jqVxabXS7PaL+avV7quAKzaXkEogFUISshDH
0E1jhuQOEIoq4Pt/jfWxE5xU6mpL9z7dNeqSRrf49ePH59ix04mvK7sA2W+jB8gk22wdAJmIofW+Ks5p
salBMJEkJC+NXsRt5QiQvNQkzbHFeJ4bj3ZLAOQ83zwWiJewT9DLxa+zbjsEaY6WIzh8ph9AgmUCj4aa
l3zkVuN6OatAJtvQj8KZAnk/FMUpyyXIf2mQ0Zkfz7fbCMLuj9QGXdsRaYnFec5UNVESITlXhsDy0Ql8
N3h5zL6WO0AtEJ0Rf9wjuAHCGAT9sycks3WgQJz7OJyFoTuVs+/P19c0LW7AkQmQP9EKhN045yc1/cIP
N378Mhh0LKGc82Fd4EFHGFp6ZwslCW5Mvs/jy+hzRwzku+yFShBTgajq3gXZ74KZBJlHIYR9/jihmRbn
txrkWw0yurzlJemDzNjo935Yvo86lvySc76Sf//aEUt8090NUiBkilETRGGMZGE39+M+1c1vC8Rvgrj3
JYA4ge0CyNatQNLbu8pIBtsopgQxPaSf6PZ89s9BhJ6p1W/PW0JXzo/wwSsWkufi284Wiq6FywC1sq6P
zorfkftiapC4C1JtYq+SpUAI2cyF2XcVzOd3sRl0zo9pegOOTC6siBAbWr6Pa5C+ES0jOBYTdC2xhSV2
1QkL3fM80SDSkHZ34rndiACH6n1DbNIvgIh07CbDJHQlyHosor59L/jbU5rmmQA5/bm+9hJ5mBDb1M/Y
e7KU2GYdEk2ScV7+Wgvd8oWYk/3WzlzthwTpTfvkA8hIciAJgr4CIhRRtnTdCiQUINnhkJUAshGST9/+
IhCIjwxEkSk8aR/HZHV11ySDJ87vFYf3dpsJJ+L39kZ8q12cUdxxhFSOROELrUGqQ5oAMt4EGmTYaX7X
w4m6sgAghTBkd4ah9XdIeyL9xsoSDYKS3nPHk+cs5+VStvWv76Nff5jOW9HgeGBUIGMbf+SoHPH2YY/i
CkQb4pTFvVlG9k0QqOsrCbLeAsimgIyc/wO2TD0AQdV/v21JLzDalqjRxU7lbQF1/v09LXMuhLUf9ciq
217fR58aQpcuE1HHLUPsZVbwfBzVIOPPQLZuDVICSJZmZ/V4BBFiqadWlLZB+kvyrC2RKBMPUPpZCbHY
8EqXmqNjyCAJmiC1IThwrWfD1OVQgkyfcs4Pt87RBw0iW/hFDSJWVcdNtsn+qh5O/8CExcBhULPXJvmO
qp7rl5pkWOzS94LzGyxr74KhKDeTfa/tR/OBrrVELQ4AYcswfu4LjA8g+8g680PeAQmbILtxxbErs3Qj
vuRyBCoi+gVTywB1QHz8/Kx7RxDmlRKYiyf+SHihnxx2DBEgQw0iK3p8cS/0l36705pdM5V1nx8ObvfA
QAgc6/N7Wb47ixokS9MNGAL1UBYS9COOJQikvUWSmI2ViSKBTiY/Xu+mItAc2g+dEAAZE1maEDWZFS1n
oW8+/4Ioamxq+afi5lRZHwqQTQNkqEHS9RAuiy+grEuQk+CQJP9eP3pDAyyHFqKjNogFIIqkHl6r1d58
+dGhaPlRcfQJhp7WvwzDmRsOL5FtosGPHlCQBwfEm/NdXIFc88Ph2Oi0dETuoTMVEPO5NARApCFpBoVd
ph0TkLQdjdoksfesSPTwqqQpaozaD6AwaBwtw2GQ2B5OkNF//gUqv7w7QqQehkyy9VGAzCuQIE8PPNeG
NEHq1aE+B6hA1OwLh5weIMIXIOk/SLz4uZJenLSlMSqNqO0PL5aHBr/CWrhPAxMhLH6R+higkh5Z8Y3z
DYCAIa/fISSfRmQqQNTiUINk2S5Tk5Zs5FEFQrEAGTUsYfagBmnoUwz46lNrfBEF8F//+9IjQiPRZkUB
VgVE72dpQ9TTqpLzJ2WIn7tJcTikftABWYSVIY+IpDIgO/XkTaVdSoYdAYm2xLR6DxKdec2ikqFIkIiD
jV7+JRiM3uN0kwYBjK4hALLiPFcg2WuSlIdDGQSdrE+GEqQZketZOLLJIOtV2hGpLfHMUZPETHqDTz3R
f1J/RvZ4yeD8Jem1T2n1/QDXhkg9DNEb8WPO+QxAonyaJPDiipqjlfWPI2uzOQsM1cSrtKOHJYTShDZI
zGQ0eJD0DIQpRQNNUFEM2GUcj34dkB4kXUlRaBB9KrNjiAeL0xQMSYskSWaHAx93QNxJONVRB5DNFjjg
97fHwTOCNAlhkUWNXg1i9Ak2YysKluNlECWJlQQJas+08ThB/3whI/lNJX2S3L8gfYFPG9ICeeL8VYAk
xQpa+PxwWHezvnCmrZGVbtOrwEiz898aRwGVVFGkBoarHXYcx5bvL5dBYjOKiDzsLj7S4KXH7EEDhEXP
PwwCNVAf2NAcvWRIJEd3ytLtCTzlFSCrIgGQ4+Fw/hgR13Umi+bIOq1OaSncOEFE9MkzsITWt/cQxUYP
wXyCETxk6Bvyx0I5AA2QMUr6L8/1rwSTQW1El6Nnh33ph56yOoawEEJiW8UJQPzr4fDaGVmus1KG3KUh
u90qTcssWz3Koa4kRV7K07/wReCHYIzjGCOjXp1ooZGtRxYeE0NjaI4KxHRfjN82RIHEOYRkUkRgiL8W
ae9WESfdNw1ZnwTIdZWKKvJNH2COMUEkPz0VkaFQ6kPyQX0fpo0yMNhIf8eGiPwmRw8tXlAr6YIBOFpH
/F85L61jKg3x9wdREmEt0hpZzs6p94HWq90a+pPtRCREVBGtf1iEkCdCZkUgSerz/qMh7msSjTIgDD1r
sT1CGuPjDcQFwYBRgWDcwxT1zJYhccb527wyxPdzYcnTk6NHlgsgi41bRX11+lmVdcGhIwITsLCEvIuv
+BgboIqE7FFPkzxQeoQsRV60zCEetTE0xyiMqaE5jGC6cBYO65kNQ2IHLgtNqp25MT/k5UJHZDZTS8Or
M5GG7NL7qgIBkm/NaxfQkM6m8DDsHTU8QXukF/GaheCh+dzUwBwbv8UxikLy4KDImcUY4WhqkYYhccKF
JZYyJDwW2b6as7QhcCv0Gt4FyGl1X612D0P+1r4IgzEpS+hMLztDe2IASAfFeF5izSBFwn4XQ3GM8BzT
msNwQxV0tkCeNiS2C86vypDJ+1wXEW2IPBJ0de/3dPtzVRmyAZJqZOkLJMjNb5uYkLUFFJUne1GVOyio
bw1a6rOhSTRGm2NkjEODAAiiCDhU0pcX0ryW9CQcSaKpfi6iQcCQqs3K3t3NZL2tlyJncOTbh8tiJkLu
W85fF3RtaE++e6NeB8V4TvoNCpoMrf6oV2N0OAy8sKnsGfzFuObAsfPiaUPsUq47U19Icww1R9X3Zulc
cOiE1J2v1j8wRvSdTl7zvDiPUT0LJ9ZIkrRXwIgkKttw42jvm4NnpBkAQ3PIOmjNKaaG7Tj2g4NSZ8S0
IcGNH9OxjLo2RHE0QCbb1crdPkDO79lZV8O6JoIlJSIk3h05LytLDOarkqJZQCNiJJdLsBwHCUMvfaQx
uhxSyeLiO06CUA0iwFzMakP87HWzbL1XT0VdJ0T1vbvJfLNYKZDNpjwcHmsqrW+xh9BqT4SSYriqHDFQ
iEaaRLPg/nMPTtUaCBFN0MWo11JmEEg3tCE0NFk9sIKLZSUPkLErmqrJ1mkZshAgq6k42D9ZVYY88eNB
R70R9wihOEP06d2+WYQYFcp3NpLqfWSBD9Ri0BiaA34R3ZaQBoe5Z0yfY3xwLN15GLHYsoansD1l3bfO
XVb11Q5AyrzMi3q13oq7Lyy5oojfinJNHiTMJ6MuSm0M6nUoNEZjQYjUL80hQEIzrpP+uFoVLmaJZ8lL
CsHOaXGs51O55zABkDTN8qfDm456+207CK3HuDDp9YnUJAYZmqqkkBFB5COMe5sqBDfWGJ31oGJpBYSa
poPiDxzh4rvH/PqVO7spcMzUBvx86yzkUn013cmR9cbzJzH7/ufnr0pA8RmtTURzqkm8PUXEIORHHO5j
9MGXO+eRdKfI172e7yKgMLSI9qNtiGkyp9e+bDyeBtTWc+8ydXXS7/fZfCJBTlNpCDzRPZStqq4FLzQs
MZ6jCV8QKUUy9E1EzOV3hAMftwfZE39H0oKC8+Eo44Up+9yuHTWHddnP3NnwEiUzs3X9O3L3NG68A2k4
W2mOtVNzbNO1NOT1KT8c3rUhnXd+rmcoOXL+RBokJA6Gw+82ZCXy0aghlr8Sd0FGozjPnZEAyYwHhMbQ
dvhOYMrXoXlWIuxv3JmOHNtsFfXxeh5WIJPJ0HksqE47WdXLQ55zvcbtFEXkZQjZ10nhoSaKVhAbDZA0
Z+LTH9HoxFORiuPNjDzNUWM8ONxLD3kMZGIaaw4B4ppx+6VUYTpTHIuf4b5eqQuQ9UlOWYe3c3E8d+fe
2hKM3BgJXe8IoTaJmktxiDWHUUxHxht/Y/h2RIZh8yLPdx0McAREHXvgUZDusWqOkEkObch++hO63qko
Jxe3es2ONGQia0jxmp35tWlIt08xkdDliEgTRSuKSAVmz0/laOTyV2Sk3DfwpOCva7OJoWcrAn7EhjAC
f8aRzJDfLur7/U70VOvF/rv72HGAq/iprCHZa5Y9FZ2ENCcuQSL1eoGf3hlfI4OGuIozSfMjQ0VBjeB2
zd+ergVPmxh6tpIwQTAycceP+oUVSBsiOULfiqIk2KstIACRHKvJCQyBfjEvm4Z0a0lFkqwRaqJoltAE
Chlq+1w88dAwjkMjy6nh8gIPraYbj3ggzKYD7+EH0xwKZBGwoMExrK+LtDjkuxF2FUiZf2qIvkZdk2BU
kXy0ZWwaWhbPULljAo+7Bs3zvFh9sKMug2GiCnqXA0CCuWNbj6QDR7t7FyCS4w4c8nzs7bVZ1Lv6OxYK
IhPVkh+H+pGNK5YlNrTOJSFuXgwNxrPJa84djVHZUfdXc8Joc5GuA6KarP3EYQ+OfZdDBmSSAghw5Lzb
ZX1oginGM2raqCE7ROLQSWwSIRr2NcekcDfFcfF2M4zilgUZ39UY2g7FYd6fzQ6H3Xortpv4ikP3vBoE
SuG54hAgRV5+nHo7lpgUDxn1YqxBZiOCkdjdpfDeHfzYJjaFBTx3BNydkLIgxOGv0yyNJAfSdoDi+8Cj
ZofD0hyzwKw56pJeB2Syu17P2UpwVIY83Y5/+/b/vsCTUivBmHkPEjauAkvgOhXReuLF+v0VE9AuN9Gc
82O5saQl2g4pescMMD7jAJAw9Nj3S4NDDyxYg0wn6xXUdGXImb91k97Ne0y9McXYe5CwADc+GNHCK0a8
XPw8REjIj6+bggdA0RBWMuki8j7lAJAgHHo4+i2O+3y9gqRD+14l/Xj+81fee8mo7WPhCatIzD384VMW
YdYxi4u3hFC4oX7lW9L8v2K9+hgvDPYphy9qBqZJcIHb69PzPpzuzueVyod6GrL9WXPUu4tnsd37Bf3F
o5YHJHVOQtqcw0iDZl6StCBWcSQkLwlZ8BLh0GGoiaGKB5tYMetwLMPQpyiOguACe71Ofsj5QYjPHw9D
VnNVCgWH3AMCji8MLKFvf/pvk3mUYlYXFd9CWi2apzfykyfEDAi53RC68Pzpxnk+1oPqcfpnOTfiBkcU
7MNhECMETVb9oqDyUCt1HkFfVRyb06Y25OuvkjblcMAeliimtqSNg17f0JSvEajkRXG8rtwIU6YxGv8a
wTyyIz+4jEWR2I+DJMYEebYIuuYYFg+QTT3xil1FwbFdnTbZpjYEZqyv6e+KhDGsNLTR55qlyLtlCOTn
d6TUwfAsfzleXoIoZhQhMoKDYcxWEy9wBHVjctSO1AVktwWQVZoJVZuL+sVzX4iJCSgim1RVARX3zxVW
AAxrCh0O2w/8hGGjN0KXhLEPhfDD+TLnXL5vtnPBs5UcAuSUrkRrkl7T01ne3dGbvV+NCaB4XmybWGhv
wxj7grDGoKblXyKGegSrU0zDObIlRofjw65idjjMgQN691W5hTusu500BDj0zPulmMSmVGx6VmKZ8Z5i
qa8wSArhRGKSHqKNf3bkHj0wNIfur2oO53zgNcd6fS3fr6fdbqM4Pj5F+Eo18SSJDEqc+JFlMwnT5cGa
oaYIfEZ6hHqmxhAg4zVLLO2HjkfFUe81lIe84pDPoWUlzKpd67/qgHw18MyrUExYZPsePGyvlFh2zDzN
1aLAydImI2R6psTQHLbtruNE+aFjPhbaaw71UslCcNRrEMWxUYY0g/7lwLMaBaawaAkfyKPKDExNj8W2
/FBMM0C+2dg2kJxx2xigxNku7YYdFx2PZsNbHF7nLY5TVhvSrIRf/4c7YtZgGSZUSXzD5KeUf+cmNm1a
MVQ7obiLEdcp328nl8fxZP1KnfZ1nfzwrjkEyOkMHJv0ChPW79Cf/hHHK4d5isWbWbQhVbLhf1BAHzLN
ZIgURRdDbcTN1usg0unQ8dALkMMh63Kk6fn3cQDJ/9jHY7lmiiV2LWnNB5l4XzFIMTdmFYXG0Bzgxmw1
fqRj3D7TKxuslSgjMGHVHdZGcKS7ze/nAJJLZp+eEiZZbNc2u2KWb2p5nuX68ecYj3CEd1alQ8ej4oA6
mB245lBB3502Z7iP+wdIFmv7XgTqoyVO7HVA6JjVDFLMDkM7BgqN0SodfhC6yVJxdO8XCpWHQo8rCPpu
BQkBjj9EktlZIY8KMebDuJHSQHjvPVQ5EbjeBzeaGMHyHo+BQx8zaV3BnRSHrMmxAYz2+Ybfl/jx0CpL
8XmAZSlIpLyHTF8zKI7Y/470skO7ISng3K4VQC1vc0wfHJNXzSFAUnl08Y9yAMlfkmTFA1spnMVSjc+N
k7gBESdBwAwmELQX2ozx3nGXXv2gs2uHroMVxxaS/gc59D8MnITlSnwmALHcvR03xOCL2pYq1L4fJYwg
TzEARY0RDMMwHI4Dy4sD5YYuHo9lrdr4ERy7k+LYCo4NGKLr4B/SvyXnceYkFsAk0yX8bdeqshAzs34N
cVw7oSnG7jjyWPJ/1Z1bb6M4FMdpJmlCLtWM1NkP0H1JHzBI3CJGjsXiETCIIJVLKITv/zX2HMd1kq72
rcl0fq1aP/qXc/6uTRG47dZcm8BR478eFhJQa8gYaERpWrE+BI+vH+KBTFi86RqYlr21bYp7WKXyLL8Q
WQaUkBaIS+1/7G2rXpCiwvH+EWYo4QT4/Jk4iniSZsenIMTfZtqHcb+INk3Z/nDsv59NAnP9Xy4cbIT/
cH8iJ438pHEej8ChFsaDZ+mQiIdihk0Yx9/n2gcyXYVuWJguhRjAm4HOOM3fhhRslITc3ro5f0YL1FAe
HnCuITwoaIiYc87FpZ8wxICMtQ9lPh81BitITm21l7/EK/C2mF1R9pZrH3ERL928SQgNj9Akpd65hrzs
MyT8bNnFmDOG8fhg5pNFWO/yddDaUJUTb2OvyDeE+3ZrZH1juQi0VPtzzZ7PakGCNK44D6kvPVTKU8Yt
5VGBBwO+T7UrMH1ke8vd0ty+ZGMLLAo1OBBRByfOWtymA2ZqrKWGF0RxZhlQjSA0z7tKnAYHK0mFR1yW
eADZd9hW12HyVwsfM3FaF7HfsWZdsRuwEFAJv+EggfiMemjhcMYGIjCMKlqjhvBADVx1ufBgZVnWYdM9
PanV6ipFaWEdNSnZulvbcHL3HVAGrIOAd+TYUl4as4rFLHVOSxVtfFWOIIDBW1uxIq72Fb5VZqxdlYm/
BXIaGMngU2qiGNC2W4Up8coMNBAfJk8uV1wWeahBxZZXbtqBrC67umGs+zLTrsx8BHOE2RICP/2BbBVr
FsaO+QYoeEWT+4gnJVBDZjwJXRxR9X9OuVrVh6Zm7MtYuwH3j76pCIbclHR8iOrSx24CC4S9RGhxruEg
1DGMdO2oSz7Cg9XlrowPcVUtHqbabdBXsGtCYPqEYxUwDJa1Xvs9g0q8BrmPsJdCaRgCBzEIlCJcKw30
YHWX9b/qjlXZw0y7FXNtsjLNXBIMfi7gfeTB0Ot23WuXRXG9ezkQQGkoC8h4lg9n5ahfi0O3D6ssQo1b
Air5G4RTmYWuOBRw/9Hedzr4/euw58oCHQxpAT1V+TIcLIzSqNnt9wyPHzfXwKroj2IbiwSpIVPN475j
xCMKAwEHHEoLIGlynqBFyMIAL/lkwOLWGor7kXoYTsqJJyEn0IESRJ2dhMcw4EEjDKs0xRvEI6Exnmq/
j+lk5fkiIjRDkwsHxMGbdSmiNI7ZiKIU4Bw1In631LXfC5bFEBNnrqqJNOIOEiRpFBlCAkELzEZyfF+Q
8FliMT4Bc310R4yKGgoHITG2kwi5Qy81IEmsQgkYLscz7RMxG62CxHKUBXJ0Cut93fddL3tK/uGQx6Zv
aPHZmOqjZSI+5jSxVCqssCz2XfO028fKAvsqWWApPi1zfbxc8CCgBCRkLhhLy9eya/oYYoHv2xgWyweU
+PzM9PFotFzdiVQgySC2uAs0GOtT7Y9hLr616Ww203V9MhlPdH021f5s5nN0ujr/AnmDnJ97wE0oAAAA
AElFTkSuQmCC
`,
})
testLoader.Add(&Content{
Path: testTextFile,
Hash: "BqZLX-Gc5JCfp_iX562hjA",
Modified: time.Unix(1506288652, 0),
Compressed: `
H4sIAAAAAAAA/3yTTY/TMBCG7/yKYbmA1DQtsFClbgQSSFy4ceE4jcfNaG1PZE/SVlX/O0qyZUF8XCJ5
Yr/vk0eOeW6l0XNH0GrwtXl8EtraKKun+vMJQ+cJPklAjqacpyaQIjQtpky669UVm//MIgbaDUzHTpJC
I1Ep6u7uyFbbnaWBGyqmxYIjK6MvcoOeduu72mQ9e6r3Ys+XPTYPhyR9tEUjXlL1wq3cyr3eBkwHjtVq
26G1HA/VauskauEwsD9X0lGEjDEvWvIDKTcIkXpafLktFx8To1+Me4pMid3V8nCZkKp3q1V3ulXcUwDs
VX423Y8v/wLm3HYvyVIqElruc7WmcMXKc3xYYDVwZiV7edz9ZvN2s3FbpZMWlhpJqCyxihLp+iGQZXwZ
8DQrqt6PPK8u/zLifmWfUG92ZvDfqZ6UjXxXU866TTlfgbGkNpaH2rTrP65Cu65NV39rOYOdRsAZKCvu
PeeWLKjAnqDPZMFJAva+zzp+3EBAc1gGjmCl6QNFzUv4Lj0EPI+HQFvOz27J8enEkbWVXqFLLAkakWQ5
TspAEmB+4HiYCjtKgXNmiUtTdiOsQWgTuV2r2lVleTwel4wRl5IO5dyUy8ee+qskAo5OUpjCl8ulKbGe
ksrJSTn7Kae/5kcAAAD///DU6xJLAwAA
`,
})
}
|
package sound
import (
"errors"
"fmt"
"github.com/rmcsoft/hasp/events"
"github.com/sirupsen/logrus"
)
// HotWordDetectedEventData is the HotWordDetectedEvent data
type HotWordDetectedEventData struct {
AudioData *AudioData
}
const (
// HotWordDetectedEventName is the event name for keyword detection
HotWordDetectedEventName = "HotWordDetected"
HotWordWithDataDetectedEventName = "HotWordWithDataDetected"
)
// NewHotWordDetectedEvent creates HotWordDetectedEvent
func NewHotWordDetectedEvent(audioData *AudioData) *events.Event {
typeName := HotWordWithDataDetectedEventName
if len(audioData.samples) == 0 {
logrus.Debug("HotWordDetected")
typeName = HotWordDetectedEventName
} else {
logrus.Debug("HotWordWithDataDetected")
}
return &events.Event{
Name: typeName,
Args: []interface{}{
SoundCapturedEventData{audioData},
},
}
}
// GetHotWordDetectedEventData gets HotWordDetectedEvent data
func GetHotWordDetectedEventData(event *events.Event) (HotWordDetectedEventData, error) {
if event.Name != HotWordDetectedEventName {
return HotWordDetectedEventData{},
fmt.Errorf("The event must be named %s", HotWordDetectedEventName)
}
if len(event.Args) != 1 {
return HotWordDetectedEventData{},
errors.New("Event does not data")
}
data, ok := event.Args[0].(HotWordDetectedEventData)
if !ok {
return HotWordDetectedEventData{},
errors.New("Event does not contain samples")
}
return data, nil
}
|
package instruction
// GitInstructionSet 表示一个 git 指令集
type GitInstructionSet interface {
Init() *GitInit
Add() *GitAdd
Commit() *GitCommit
Push() *GitPush
Status() *GitStatus
}
func Default() GitInstructionSet {
return &defaultGitInstructionSet{}
}
|
// Copyright (c) 2016-2018, Jan Cajthaml <jan.cajthaml@gmail.com>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package utils
import (
"bytes"
"io"
"os"
"path/filepath"
"reflect"
"sort"
"syscall"
"unsafe"
)
var defaultBufferSize int
func init() {
defaultBufferSize = 2 * os.Getpagesize()
}
func nameFromDirent(de *syscall.Dirent) []byte {
ml := int(uint64(de.Reclen) - uint64(unsafe.Offsetof(syscall.Dirent{}.Name)))
var name []byte
sh := (*reflect.SliceHeader)(unsafe.Pointer(&name))
sh.Cap = ml
sh.Len = ml
sh.Data = uintptr(unsafe.Pointer(&de.Name[0]))
if index := bytes.IndexByte(name, 0); index >= 0 {
sh.Cap = index
sh.Len = index
}
return name
}
// ListDirectory returns sorted slice of item names in given absolute path
// default sorting is ascending
func ListDirectory(absPath string, ascending bool) []string {
v := make([]string, 0)
dh, err := os.Open(absPath)
if err != nil {
return nil
}
fd := int(dh.Fd())
scratchBuffer := make([]byte, defaultBufferSize)
var de *syscall.Dirent
for {
n, err := syscall.ReadDirent(fd, scratchBuffer)
if err != nil {
_ = dh.Close()
return nil
}
if n <= 0 {
break
}
buf := scratchBuffer[:n]
for len(buf) > 0 {
de = (*syscall.Dirent)(unsafe.Pointer(&buf[0]))
buf = buf[de.Reclen:]
if de.Ino == 0 {
continue
}
nameSlice := nameFromDirent(de)
namlen := len(nameSlice)
if (namlen == 0) || (namlen == 1 && nameSlice[0] == '.') || (namlen == 2 && nameSlice[0] == '.' && nameSlice[1] == '.') {
continue
}
v = append(v, string(nameSlice))
}
}
if err = dh.Close(); err != nil {
return nil
}
if ascending {
sort.Slice(v, func(i, j int) bool {
return v[i] < v[j]
})
} else {
sort.Slice(v, func(i, j int) bool {
return v[i] > v[j]
})
}
return v
}
// Exists returns true if absolute path exists
func Exists(absPath string) bool {
_, err := os.Stat(absPath)
return !os.IsNotExist(err)
}
// TouchFile creates files given absolute path if file does not already exist
func TouchFile(absPath string) bool {
if err := os.MkdirAll(filepath.Dir(absPath), os.ModePerm); err != nil {
return false
}
f, err := os.OpenFile(absPath, os.O_RDONLY|os.O_CREATE|os.O_EXCL, os.ModePerm)
if err != nil {
return false
}
defer f.Close()
return true
}
// ReadFileFully reads whole file given absolute path
func ReadFileFully(absPath string) (bool, []byte) {
f, err := os.OpenFile(absPath, os.O_RDONLY, os.ModePerm)
if err != nil {
return false, nil
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
return false, nil
}
buf := make([]byte, fi.Size())
_, err = f.Read(buf)
if err != nil && err != io.EOF {
return false, nil
}
return true, buf
}
// WriteFile writes data given absolute path to a file if that file does not
// already exists
func WriteFile(absPath string, data []byte) bool {
if err := os.MkdirAll(filepath.Dir(absPath), os.ModePerm); err != nil {
return false
}
f, err := os.OpenFile(absPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, os.ModePerm)
if err != nil {
return false
}
defer f.Close()
if _, err := f.Write(data); err != nil {
return false
}
return true
}
// UpdateFile rewrite file with data given absolute path to a file if that file
// exist
func UpdateFile(absPath string, data []byte) bool {
f, err := os.OpenFile(absPath, os.O_WRONLY|os.O_TRUNC, os.ModePerm)
if err != nil {
return false
}
defer f.Close()
if _, err := f.Write(data); err != nil {
return false
}
return true
}
// AppendFile appens data given absolute path to a file, creates it if it does
// not exist
func AppendFile(absPath string, data []byte) bool {
if err := os.MkdirAll(filepath.Dir(absPath), os.ModePerm); err != nil {
return false
}
f, err := os.OpenFile(absPath, os.O_WRONLY|os.O_CREATE|os.O_APPEND, os.ModePerm)
if err != nil {
return false
}
defer f.Close()
if _, err := f.Write(data); err != nil {
return false
}
return true
}
|
package middlewares
import (
"encoding/json"
"errors"
"fmt"
"net/mail"
"path"
"time"
"github.com/golang-jwt/jwt/v5"
"github.com/google/uuid"
"github.com/authelia/authelia/v4/internal/model"
"github.com/authelia/authelia/v4/internal/templates"
)
// IdentityVerificationStart the handler for initiating the identity validation process.
func IdentityVerificationStart(args IdentityVerificationStartArgs, delayFunc TimingAttackDelayFunc) RequestHandler {
if args.IdentityRetrieverFunc == nil {
panic(fmt.Errorf("Identity verification requires an identity retriever"))
}
return func(ctx *AutheliaCtx) {
requestTime := time.Now()
success := false
if delayFunc != nil {
defer delayFunc(ctx, requestTime, &success)
}
identity, err := args.IdentityRetrieverFunc(ctx)
if err != nil {
// In that case we reply ok to avoid user enumeration.
ctx.Logger.Error(err)
ctx.ReplyOK()
return
}
var jti uuid.UUID
if jti, err = uuid.NewRandom(); err != nil {
ctx.Error(err, messageOperationFailed)
return
}
verification := model.NewIdentityVerification(jti, identity.Username, args.ActionClaim, ctx.RemoteIP())
// Create the claim with the action to sign it.
claims := verification.ToIdentityVerificationClaim()
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
signedToken, err := token.SignedString([]byte(ctx.Configuration.JWTSecret))
if err != nil {
ctx.Error(err, messageOperationFailed)
return
}
if err = ctx.Providers.StorageProvider.SaveIdentityVerification(ctx, verification); err != nil {
ctx.Error(err, messageOperationFailed)
return
}
linkURL := ctx.RootURL()
query := linkURL.Query()
query.Set(queryArgToken, signedToken)
linkURL.Path = path.Join(linkURL.Path, args.TargetEndpoint)
linkURL.RawQuery = query.Encode()
data := templates.EmailIdentityVerificationValues{
Title: args.MailTitle,
LinkURL: linkURL.String(),
LinkText: args.MailButtonContent,
DisplayName: identity.DisplayName,
RemoteIP: ctx.RemoteIP().String(),
}
ctx.Logger.Debugf("Sending an email to user %s (%s) to confirm identity for registering a device.",
identity.Username, identity.Email)
recipient := mail.Address{Name: identity.DisplayName, Address: identity.Email}
if err = ctx.Providers.Notifier.Send(ctx, recipient, args.MailTitle, ctx.Providers.Templates.GetIdentityVerificationEmailTemplate(), data); err != nil {
ctx.Error(err, messageOperationFailed)
return
}
success = true
ctx.ReplyOK()
}
}
// IdentityVerificationFinish the middleware for finishing the identity validation process.
//
//nolint:gocyclo
func IdentityVerificationFinish(args IdentityVerificationFinishArgs, next func(ctx *AutheliaCtx, username string)) RequestHandler {
return func(ctx *AutheliaCtx) {
var finishBody IdentityVerificationFinishBody
b := ctx.PostBody()
err := json.Unmarshal(b, &finishBody)
if err != nil {
ctx.Error(err, messageOperationFailed)
return
}
if finishBody.Token == "" {
ctx.Error(fmt.Errorf("No token provided"), messageOperationFailed)
return
}
token, err := jwt.ParseWithClaims(finishBody.Token, &model.IdentityVerificationClaim{},
func(token *jwt.Token) (any, error) {
return []byte(ctx.Configuration.JWTSecret), nil
},
jwt.WithIssuedAt(),
jwt.WithIssuer("Authelia"),
jwt.WithStrictDecoding(),
ctx.GetJWTWithTimeFuncOption(),
)
switch {
case err == nil:
break
case errors.Is(err, jwt.ErrTokenMalformed):
ctx.Error(fmt.Errorf("Cannot parse token"), messageOperationFailed)
return
case errors.Is(err, jwt.ErrTokenExpired):
ctx.Error(fmt.Errorf("Token expired"), messageIdentityVerificationTokenHasExpired)
return
case errors.Is(err, jwt.ErrTokenNotValidYet):
ctx.Error(fmt.Errorf("Token is only valid in the future"), messageIdentityVerificationTokenNotValidYet)
return
case errors.Is(err, jwt.ErrTokenSignatureInvalid):
ctx.Error(fmt.Errorf("Token signature does't match"), messageIdentityVerificationTokenSig)
return
default:
ctx.Error(fmt.Errorf("Cannot handle this token: %w", err), messageOperationFailed)
return
}
claims, ok := token.Claims.(*model.IdentityVerificationClaim)
if !ok {
ctx.Error(fmt.Errorf("Wrong type of claims (%T != *middlewares.IdentityVerificationClaim)", claims), messageOperationFailed)
return
}
verification, err := claims.ToIdentityVerification()
if err != nil {
ctx.Error(fmt.Errorf("Token seems to be invalid: %w", err),
messageOperationFailed)
return
}
found, err := ctx.Providers.StorageProvider.FindIdentityVerification(ctx, verification.JTI.String())
if err != nil {
ctx.Error(err, messageOperationFailed)
return
}
if !found {
ctx.Error(fmt.Errorf("Token is not in DB, it might have already been used"),
messageIdentityVerificationTokenAlreadyUsed)
return
}
// Verify that the action claim in the token is the one expected for the given endpoint.
if claims.Action != args.ActionClaim {
ctx.Error(fmt.Errorf("This token has not been generated for this kind of action"), messageOperationFailed)
return
}
if args.IsTokenUserValidFunc != nil && !args.IsTokenUserValidFunc(ctx, claims.Username) {
ctx.Error(fmt.Errorf("This token has not been generated for this user"), messageOperationFailed)
return
}
err = ctx.Providers.StorageProvider.ConsumeIdentityVerification(ctx, claims.ID, model.NewNullIP(ctx.RemoteIP()))
if err != nil {
ctx.Error(err, messageOperationFailed)
return
}
next(ctx, claims.Username)
}
}
|
package gotten_test
import (
"bytes"
"fmt"
"github.com/Hexilee/gotten"
"github.com/Hexilee/gotten/headers"
"github.com/stretchr/testify/assert"
"io"
"io/ioutil"
"net/http"
"strings"
"testing"
)
func TestFormRequest(t *testing.T) {
creator, err := gotten.NewBuilder().
SetBaseUrl("https://mock.io").
SetClient(mockClient).
Build()
assert.Nil(t, err)
var service AllTypesService
assert.Nil(t, creator.Impl(&service))
req, err := service.FormParamsRequest(&FormParams{
JsonBeforeForm: TestSerializationObject,
Int: TestInt,
String: TestString,
Stringer: TestStringer,
JsonAfterForm: TestSerializationObject,
XmlAfterForm: TestSerializationObject,
})
assert.Nil(t, err)
req.ParseForm()
assert.Equal(t, TestJSON, req.PostFormValue("json_before_form"))
assert.Equal(t, TestJSON, req.PostFormValue("json_after_form"))
assert.Equal(t, TestXML, req.PostFormValue("xml_after_form"))
assert.Equal(t, TestString, req.PostFormValue("int"))
assert.Equal(t, TestString, req.PostFormValue("string"))
assert.Equal(t, TestString, req.PostFormValue("stringer"))
assert.Equal(t, headers.MIMEApplicationForm, req.Header.Get(headers.HeaderContentType))
assert.Equal(t, "/form", req.URL.Path)
req, err = service.FormParamsWithDefaultRequest(&FormParamsWithDefault{})
assert.Nil(t, err)
req.ParseForm()
assert.Equal(t, TestJSON, req.PostFormValue("json_before_form"))
assert.Equal(t, TestJSON, req.PostFormValue("json_after_form"))
assert.Equal(t, TestXML, req.PostFormValue("xml_after_form"))
assert.Equal(t, TestString, req.PostFormValue("int"))
assert.Equal(t, TestString, req.PostFormValue("string"))
assert.Equal(t, TestString, req.PostFormValue("stringer"))
assert.Equal(t, headers.MIMEApplicationForm, req.Header.Get(headers.HeaderContentType))
assert.Equal(t, "/form", req.URL.Path)
}
func TestMultipartRequest(t *testing.T) {
creator, err := gotten.NewBuilder().
SetBaseUrl("https://mock.io").
SetClient(mockClient).
Build()
assert.Nil(t, err)
var service AllTypesService
assert.Nil(t, creator.Impl(&service))
req, err := service.MultipartRequest(&MultipartParams{
JsonBeforeForm: TestSerializationObject,
Int: TestInt,
String: TestString,
Stringer: TestStringer,
Reader: getTestReader(),
JsonAfterForm: TestSerializationObject,
XmlAfterForm: TestSerializationObject,
})
assert.Nil(t, err)
req.ParseMultipartForm(2 << 32)
assert.Equal(t, TestJSON, req.PostFormValue("json_before_form"))
assert.Equal(t, TestJSON, req.PostFormValue("json_after_form"))
assert.Equal(t, TestXML, req.PostFormValue("xml_after_form"))
assert.Equal(t, TestString, req.PostFormValue("int"))
assert.Equal(t, TestString, req.PostFormValue("string"))
assert.Equal(t, TestString, req.PostFormValue("stringer"))
assert.Equal(t, TestString, req.PostFormValue("reader"))
assert.True(t, strings.HasPrefix(req.Header.Get(headers.HeaderContentType), headers.MIMEMultipartForm))
assert.Equal(t, "/multipart", req.URL.Path)
}
func TestXMLRequest(t *testing.T) {
creator, err := gotten.NewBuilder().
SetBaseUrl("https://mock.io").
SetClient(mockClient).
Build()
assert.Nil(t, err)
var service AllTypesService
assert.Nil(t, creator.Impl(&service))
req, err := service.XMLAllRequest(&XMLAllParams{
Int: TestInt,
Xml: TestSerializationObject,
String: TestXML,
Stringer: bytes.NewBufferString(TestXML),
Reader: bytes.NewBufferString(TestXML),
})
assert.Nil(t, err)
req.ParseMultipartForm(2 << 32)
assert.Equal(t, TestString, req.PostFormValue("int"))
assert.Equal(t, TestXML, req.PostFormValue("xml"))
assert.Equal(t, TestXML, req.PostFormValue("string"))
assert.Equal(t, TestXML, req.PostFormValue("stringer"))
assert.Equal(t, TestXML, req.PostFormValue("reader"))
assert.True(t, strings.HasPrefix(req.Header.Get(headers.HeaderContentType), headers.MIMEMultipartForm))
assert.Equal(t, "/xml", req.URL.Path)
req, err = service.XMLAllWithDefaultRequest(&XMLAllWithDefaultParams{})
assert.Nil(t, err)
req.ParseMultipartForm(2 << 32)
assert.Equal(t, TestString, req.PostFormValue("int"))
assert.Equal(t, TestXML, req.PostFormValue("xml"))
assert.Equal(t, TestXML, req.PostFormValue("string"))
assert.Equal(t, TestXML, req.PostFormValue("stringer"))
assert.Equal(t, TestXML, req.PostFormValue("reader"))
assert.True(t, strings.HasPrefix(req.Header.Get(headers.HeaderContentType), headers.MIMEMultipartForm))
assert.Equal(t, "/xml", req.URL.Path)
req, err = service.XMLSingleRequest(&XMLSingleParams{TestSerializationObject})
assert.Nil(t, err)
body, err := ioutil.ReadAll(req.Body)
assert.Nil(t, err)
assert.Equal(t, TestXML, string(body))
assert.Equal(t, headers.MIMEApplicationXMLCharsetUTF8, req.Header.Get(headers.HeaderContentType))
assert.Equal(t, "/xml", req.URL.Path)
}
func TestJSONRequest(t *testing.T) {
creator, err := gotten.NewBuilder().
SetBaseUrl("https://mock.io").
SetClient(mockClient).
Build()
assert.Nil(t, err)
var service AllTypesService
assert.Nil(t, creator.Impl(&service))
req, err := service.JSONSingleParamsRequest(&JSONSingleParams{TestSerializationObject})
assert.Nil(t, err)
body, err := ioutil.ReadAll(req.Body)
assert.Nil(t, err)
assert.Equal(t, TestJSON, string(body))
assert.Equal(t, headers.MIMEApplicationJSONCharsetUTF8, req.Header.Get(headers.HeaderContentType))
assert.Equal(t, "/json", req.URL.Path)
}
func TestHeadersAllRequest(t *testing.T) {
creator, err := gotten.NewBuilder().
SetBaseUrl("https://mock.io").
SetClient(mockClient).
Build()
assert.Nil(t, err)
var service AllTypesService
assert.Nil(t, creator.Impl(&service))
req, err := service.HeadersAllParamsRequest(&HeadersAllParams{TestString, TestString, TestString})
assert.Nil(t, err)
assert.Equal(t, TestString, req.Header.Get("HOST"))
assert.Equal(t, TestString, req.Header.Get("LOCATION"))
assert.Equal(t, TestString, req.Header.Get(headers.HeaderContentType))
}
func TestCookieAllParams(t *testing.T) {
creator, err := gotten.NewBuilder().
SetBaseUrl("https://mock.io").
SetClient(mockClient).
Build()
assert.Nil(t, err)
var service AllTypesService
assert.Nil(t, creator.Impl(&service))
req, err := service.CookieAllParamsRequest(&CookieAllParams{
Ga: TestString,
GaId: TestInt,
QscSession: TestStringer,
})
assert.Nil(t, err)
cookie, err := req.Cookie("ga")
assert.Nil(t, err)
assert.Equal(t, TestString, cookie.Value)
cookie, err = req.Cookie("ga_id")
assert.Nil(t, err)
assert.Equal(t, TestString, cookie.Value)
cookie, err = req.Cookie("qsc_session")
assert.Nil(t, err)
assert.Equal(t, TestString, cookie.Value)
}
type (
AllTypesService struct {
FormParamsRequest func(*FormParams) (*http.Request, error) `method:"POST" path:"/form"`
FormParamsWithDefaultRequest func(withDefault *FormParamsWithDefault) (*http.Request, error) `method:"POST" path:"/form"`
MultipartRequest func(*MultipartParams) (*http.Request, error) `method:"POST" path:"/multipart"`
XMLAllRequest func(*XMLAllParams) (*http.Request, error) `method:"POST" path:"/xml"`
XMLAllWithDefaultRequest func(*XMLAllWithDefaultParams) (*http.Request, error) `method:"POST" path:"/xml"`
XMLSingleRequest func(*XMLSingleParams) (*http.Request, error) `method:"POST" path:"/xml"`
JSONSingleParamsRequest func(*JSONSingleParams) (*http.Request, error) `method:"POST" path:"/json"`
HeadersAllParamsRequest func(*HeadersAllParams) (*http.Request, error) `path:"headers"`
CookieAllParamsRequest func(*CookieAllParams) (*http.Request, error) `path:"cookie"`
}
FormParams struct {
JsonBeforeForm *SerializationStruct `type:"json"`
Int int `type:"form"`
String string `type:"form"`
Stringer fmt.Stringer `type:"form"`
JsonAfterForm *SerializationStruct `type:"json"`
XmlAfterForm *SerializationStruct `type:"xml" `
}
FormParamsWithDefault struct {
JsonBeforeForm *SerializationStruct `type:"json" default:"{\"data\":\"1\"}"`
Int int `type:"form" default:"1"`
String string `type:"form" default:"1"`
Stringer fmt.Stringer `type:"form" default:"1"`
JsonAfterForm *SerializationStruct `type:"json" default:"{\"data\":\"1\"}"`
XmlAfterForm *SerializationStruct `type:"xml" default:"<SerializationStruct><Data>1</Data></SerializationStruct>"`
}
MultipartParams struct {
JsonBeforeForm *SerializationStruct `type:"json"`
Int int `type:"part"`
String string `type:"part"`
Stringer fmt.Stringer `type:"part"`
Reader io.Reader `type:"part"`
JsonAfterForm *SerializationStruct `type:"json"`
XmlAfterForm *SerializationStruct `type:"xml" `
}
XMLAllParams struct {
Int int `type:"part"`
Xml *SerializationStruct `type:"xml"`
String string `type:"xml"`
Stringer fmt.Stringer `type:"xml"`
Reader io.Reader `type:"xml"`
}
XMLAllWithDefaultParams struct {
Int int `type:"part" default:"1"`
Xml *SerializationStruct `type:"xml" default:"<SerializationStruct><Data>1</Data></SerializationStruct>"`
String string `type:"xml" default:"<SerializationStruct><Data>1</Data></SerializationStruct>"`
Stringer fmt.Stringer `type:"xml" default:"<SerializationStruct><Data>1</Data></SerializationStruct>"`
Reader io.Reader `type:"xml" default:"<SerializationStruct><Data>1</Data></SerializationStruct>"`
}
XMLSingleParams struct {
Xml *SerializationStruct `type:"xml"`
}
JSONSingleParams struct {
Json *SerializationStruct `type:"json"`
}
HeadersAllParams struct {
Host string `type:"header"`
Location string `type:"header"`
ContentType string `type:"header"`
}
CookieAllParams struct {
Ga string `type:"cookie"`
GaId int `type:"cookie"`
QscSession fmt.Stringer `type:"cookie"`
}
SerializationStruct struct {
Data string `json:"data"`
}
)
var (
TestInt = 1
TestString = "1"
TestStringer = bytes.NewBufferString(TestString)
TestSerializationObject = &SerializationStruct{TestString}
TestJSON = "{\"data\":\"1\"}"
TestXML = "<SerializationStruct><Data>1</Data></SerializationStruct>"
)
func getTestReader() io.Reader {
return bytes.NewBufferString(TestString)
}
|
package main
import "fmt"
//匿名结构体的使用
type A struct {
Name string
age int
}
func (a *A) sayOK() {
fmt.Println("A is OK.", a.Name)
}
func (a *A) hello() {
fmt.Println("hello.", a.Name)
}
type B struct {
Name string
score float64
}
type C struct {
//匿名结构体
A
//有名结构体
b B
Name string
}
func main() {
var c C
//常规写法 b.A.xxx
c.A.Name = "tom"
//b是有名结构体, 必须声明b
c.b.Name = "jack"
c.Name = "mark" //这里Name字段属于结构体B 方法是属于结构体A
//简写
c.age = 10
c.sayOK()
c.hello()
}
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
package tailerhandler
import (
"github.com/facebookexperimental/GOAR/confighandler"
"github.com/facebookexperimental/GOAR/endpoints"
"github.com/streadway/amqp"
)
// Event represents a type of each tailer event
type Event []byte
// Tailer defines an interface for a tailer.
type Tailer interface {
Tail() error
publish(event Event) error
}
// SyslogTailerQueue represents RabitMQ tailored for our tailer
type SyslogTailerQueue struct {
OutQueue amqp.Queue
endpoints.RabbitMQEndpoint
}
// Connect establishes and configures access to external queues. Returns error upon failure
func (endpoint *SyslogTailerQueue) Connect(config confighandler.Config) error {
if err := endpoint.RabbitMQEndpoint.Connect(config); err != nil {
return err
}
var err error
endpoint.OutQueue, err = endpoint.Channel.QueueDeclare(
config.QueueLog,
true, // durable
false, // delete when unused
false, // exclusive
false, // no-wait
nil, // arguments
)
return err
}
|
/*
* EVE Swagger Interface
*
* An OpenAPI for EVE Online
*
* OpenAPI spec version: 0.4.1.dev1
*
* Generated by: https://github.com/swagger-api/swagger-codegen.git
*/
package swagger
// 200 ok object
type GetUniverseSchematicsSchematicIdOk struct {
// Time in seconds to process a run
CycleTime int32 `json:"cycle_time,omitempty"`
// schematic_name string
SchematicName string `json:"schematic_name,omitempty"`
}
|
package service
import (
"HumoAcademy/models"
"HumoAcademy/pkg/repository"
)
type MainPage interface {
GetAll () (models.MainPageContent, error)
AddUserForNews (news models.SubscribedUsers) error
}
type Courses interface {
CreateCourse(courses models.Courses) (int, error)
EditCourse(id int, course models.Courses) error
GetCourseById (id int) (newCourse models.Courses, err error)
GetAllMiniCourses () ([]models.MiniCourses, error)
ChangeCourseStatus (id int, status bool) error
ChangeCourseImg(id int, img string) error
GetCourseImgSrc(id int) (string, error)
}
type News interface {
CreateNews(news models.News) (int, error)
GetNewsByID (int) (models.News, error)
GetAllMiniNews () ([]models.MiniNews, error)
EditNews(id int, news models.News) error
ChangeNewsStatus (id int, status bool) error
ChangeNewsImg(id int, img string) error
GetNewsImgSrc (id int) (string, error)
CheckNewsExpireDate() error
}
type Admin interface {
CreateAdmin(admin models.Admin) (int, error)
GenerateToken(username, password string) (string, error)
ParseToken(accessToken string) (int, int, error)
}
type User interface {
GetAllSubscribedUsers () ([]string, error)
CreateUser(user models.Users) (int, error)
GetAllCourseUsers (courseId int) (models.CourseUsersList, error)
DeleteUserByID (id int) error
GetUserById (id int) (models.Users, error)
}
type Service struct {
MainPage
Courses
News
Admin
User
}
func NewService(repos *repository.Repository) *Service {
return &Service{
MainPage: NewMainPageService(repos.MainPage),
Courses: NewCoursesService(repos.Courses),
News: NewNewsService(repos.News),
Admin: NewAdminService(repos.Admin),
User: NewUserService(repos.User),
}
}
|
package openshift
import (
"context"
"errors"
"fmt"
"os"
"strings"
"sync"
semver "github.com/blang/semver/v4"
configv1 "github.com/openshift/api/config/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/predicate"
operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/projection"
)
func stripObject(obj client.Object) {
if obj == nil {
return
}
obj.SetResourceVersion("")
obj.SetUID("")
}
func watchName(name *string) predicate.Funcs {
return predicate.NewPredicateFuncs(func(object client.Object) bool {
return object.GetName() == *name
})
}
func conditionsEqual(a, b *configv1.ClusterOperatorStatusCondition) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Type == b.Type && a.Status == b.Status && a.Message == b.Message && a.Reason == b.Reason
}
func versionsMatch(a []configv1.OperandVersion, b []configv1.OperandVersion) bool {
if len(a) != len(b) {
return false
}
counts := map[configv1.OperandVersion]int{}
for _, av := range a {
counts[av]++
}
for _, bv := range b {
remaining, ok := counts[bv]
if !ok {
return false
}
if remaining == 1 {
delete(counts, bv)
continue
}
counts[bv]--
}
return len(counts) < 1
}
type skews []skew
func (s skews) String() string {
msg := make([]string, len(s))
i, j := 0, len(s)-1
for _, sk := range s {
m := sk.String()
// Partial order: error skews first
if sk.err != nil {
msg[i] = m
i++
continue
}
msg[j] = m
j--
}
return "ClusterServiceVersions blocking cluster upgrade: " + strings.Join(msg, ",")
}
type skew struct {
namespace string
name string
maxOpenShiftVersion string
err error
}
func (s skew) String() string {
if s.err != nil {
return fmt.Sprintf("%s/%s has invalid %s properties: %s", s.namespace, s.name, MaxOpenShiftVersionProperty, s.err)
}
return fmt.Sprintf("%s/%s is incompatible with OpenShift minor versions greater than %s", s.namespace, s.name, s.maxOpenShiftVersion)
}
type transientError struct {
error
}
// transientErrors returns the result of stripping all wrapped errors not of type transientError from the given error.
func transientErrors(err error) error {
return utilerrors.FilterOut(err, func(e error) bool {
return !errors.As(e, new(transientError))
})
}
func incompatibleOperators(ctx context.Context, cli client.Client) (skews, error) {
current, err := getCurrentRelease()
if err != nil {
return nil, err
}
if current == nil {
// Note: This shouldn't happen
return nil, fmt.Errorf("failed to determine current OpenShift Y-stream release")
}
next, err := nextY(*current)
if err != nil {
return nil, err
}
csvList := &operatorsv1alpha1.ClusterServiceVersionList{}
if err := cli.List(ctx, csvList); err != nil {
return nil, &transientError{fmt.Errorf("failed to list ClusterServiceVersions: %w", err)}
}
var incompatible skews
for _, csv := range csvList.Items {
if csv.IsCopied() {
continue
}
s := skew{
name: csv.GetName(),
namespace: csv.GetNamespace(),
}
max, err := maxOpenShiftVersion(&csv)
if err != nil {
s.err = err
incompatible = append(incompatible, s)
continue
}
if max == nil || max.GTE(next) {
continue
}
s.maxOpenShiftVersion = fmt.Sprintf("%d.%d", max.Major, max.Minor)
incompatible = append(incompatible, s)
}
return incompatible, nil
}
type openshiftRelease struct {
version *semver.Version
mu sync.Mutex
}
var (
currentRelease = &openshiftRelease{}
)
const (
releaseEnvVar = "RELEASE_VERSION" // OpenShift's env variable for defining the current release
)
// getCurrentRelease thread safely retrieves the current version of OCP at the time of this operator starting.
// This is defined by an environment variable that our release manifests define (and get dynamically updated)
// by OCP. For the purposes of this package, that environment variable is a constant under the name of releaseEnvVar.
//
// Note: currentRelease is designed to be a singleton that only gets updated the first time that this function
// is called. As a result, all calls to this will return the same value even if the releaseEnvVar gets
// changed during runtime somehow.
func getCurrentRelease() (*semver.Version, error) {
currentRelease.mu.Lock()
defer currentRelease.mu.Unlock()
if currentRelease.version != nil {
/*
If the version is already set, we don't want to set it again as the currentRelease
is designed to be a singleton. If a new version is set, we are making an assumption
that this controller will be restarted and thus pull in the new version from the
environment into memory.
Note: sync.Once is not used here as it was difficult to reliably test without hitting
race conditions.
*/
return currentRelease.version, nil
}
// Get the raw version from the releaseEnvVar environment variable
raw, ok := os.LookupEnv(releaseEnvVar)
if !ok || raw == "" {
// No env var set, try again later
return nil, fmt.Errorf("desired release version missing from %v env variable", releaseEnvVar)
}
release, err := semver.ParseTolerant(raw)
if err != nil {
return nil, fmt.Errorf("cluster version has invalid desired release version: %w", err)
}
currentRelease.version = &release
return currentRelease.version, nil
}
func nextY(v semver.Version) (semver.Version, error) {
v.Build = nil // Builds are irrelevant
if len(v.Pre) > 0 {
// Dropping pre-releases is equivalent to incrementing Y
v.Pre = nil
v.Patch = 0
return v, nil
}
return v, v.IncrementMinor() // Sets Y=Y+1 and Z=0
}
const (
MaxOpenShiftVersionProperty = "olm.maxOpenShiftVersion"
)
func maxOpenShiftVersion(csv *operatorsv1alpha1.ClusterServiceVersion) (*semver.Version, error) {
// Extract the property from the CSV's annotations if possible
annotation, ok := csv.GetAnnotations()[projection.PropertiesAnnotationKey]
if !ok {
return nil, nil
}
properties, err := projection.PropertyListFromPropertiesAnnotation(annotation)
if err != nil {
return nil, err
}
var max *string
for _, property := range properties {
if property.Type != MaxOpenShiftVersionProperty {
continue
}
if max != nil {
return nil, fmt.Errorf(`defining more than one "%s" property is not allowed`, MaxOpenShiftVersionProperty)
}
max = &property.Value
}
if max == nil {
return nil, nil
}
// Account for any additional quoting
value := strings.Trim(*max, "\"")
if value == "" {
// Handle "" separately, so parse doesn't treat it as a zero
return nil, fmt.Errorf(`value cannot be "" (an empty string)`)
}
version, err := semver.ParseTolerant(value)
if err != nil {
return nil, fmt.Errorf(`failed to parse "%s" as semver: %w`, value, err)
}
truncatedVersion := semver.Version{Major: version.Major, Minor: version.Minor}
if !version.EQ(truncatedVersion) {
return nil, fmt.Errorf("property %s must specify only <major>.<minor> version, got invalid value %s", MaxOpenShiftVersionProperty, version)
}
return &truncatedVersion, nil
}
func notCopiedSelector() (labels.Selector, error) {
requirement, err := labels.NewRequirement(operatorsv1alpha1.CopiedLabelKey, selection.DoesNotExist, nil)
if err != nil {
return nil, err
}
return labels.NewSelector().Add(*requirement), nil
}
func olmOperatorRelatedObjects(ctx context.Context, cli client.Client, namespace string) ([]configv1.ObjectReference, error) {
selector, err := notCopiedSelector()
if err != nil {
return nil, err
}
csvList := &operatorsv1alpha1.ClusterServiceVersionList{}
if err := cli.List(ctx, csvList, client.InNamespace(namespace), client.MatchingLabelsSelector{Selector: selector}); err != nil {
return nil, err
}
var refs []configv1.ObjectReference
for _, csv := range csvList.Items {
if csv.IsCopied() {
// Filter out copied CSVs that the label selector missed
continue
}
// TODO: Generalize ObjectReference generation
refs = append(refs, configv1.ObjectReference{
Group: operatorsv1alpha1.GroupName,
Resource: "clusterserviceversions",
Namespace: csv.GetNamespace(),
Name: csv.GetName(),
})
}
return refs, nil
}
|
package problem0110
import "testing"
func TestSolve(t *testing.T) {
root1 := MakeTree([]int{10, 5, -3, 3, 2, 0, 11, 3, -2, 0, 1})
t.Log(isBalanced(root1))
root2 := MakeTree([]int{3, 9, 20, 0, 0, 15, 7})
t.Log(isBalanced(root2))
root3 := MakeTree([]int{1, 2, 2, 3, 3, 0, 0, 4, 4})
t.Log(isBalanced(root3))
}
func MakeTree(vals []int) *TreeNode {
nodes := []*TreeNode{}
for i := 0; i < len(vals); i++ {
var node *TreeNode
if vals[i] != 0 {
node = &TreeNode{Val: vals[i]}
}
nodes = append(nodes, node)
}
for i := 0; i < len(vals)/2; i++ {
if nodes[i] == nil {
continue
}
nodes[i].Left = nodes[i*2+1]
nodes[i].Right = nodes[i*2+2]
}
return nodes[0]
}
|
package message
import (
"github.com/golang/protobuf/proto"
)
type Message interface {
Handle()
}
type MessageManager interface {
Produce(msg Message)
Consume() (msg Message)
}
type messageManager struct {
messageCh chan Message
}
func (m *messageManager) Produce(msg Message) {
m.messageCh <- msg
}
func (m *messageManager) Consume() (msg Message) {
return <-m.messageCh
}
|
package caaa
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document00500102 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:caaa.005.001.02 Document"`
Message *AcceptorCancellationRequestV02 `xml:"AccptrCxlReq"`
}
func (d *Document00500102) AddMessage() *AcceptorCancellationRequestV02 {
d.Message = new(AcceptorCancellationRequestV02)
return d.Message
}
// The AcceptorCancellationRequest message is sent by an acceptor (or its agent) to the acquirer (or its agent) , to request the cancellation of a successfully completed transaction. Cancellation should only occur before the transaction has been cleared.
//
//
type AcceptorCancellationRequestV02 struct {
// Cancellation request message management information.
Header *iso20022.Header1 `xml:"Hdr"`
// Information related to the cancellation request.
CancellationRequest *iso20022.AcceptorCancellationRequest2 `xml:"CxlReq"`
// Trailer of the message containing a MAC.
SecurityTrailer *iso20022.ContentInformationType6 `xml:"SctyTrlr"`
}
func (a *AcceptorCancellationRequestV02) AddHeader() *iso20022.Header1 {
a.Header = new(iso20022.Header1)
return a.Header
}
func (a *AcceptorCancellationRequestV02) AddCancellationRequest() *iso20022.AcceptorCancellationRequest2 {
a.CancellationRequest = new(iso20022.AcceptorCancellationRequest2)
return a.CancellationRequest
}
func (a *AcceptorCancellationRequestV02) AddSecurityTrailer() *iso20022.ContentInformationType6 {
a.SecurityTrailer = new(iso20022.ContentInformationType6)
return a.SecurityTrailer
}
|
package utils
import (
"crypto/md5"
"encoding/base64"
"github.com/golang/glog"
"github.com/mitchellh/mapstructure"
"io/ioutil"
"net"
"os"
//"os/exec"
//"strings"
)
// MergeMap copy keys from a `data` map to a `resultTo` tagged object
func MergeMap(data map[string]string, resultTo interface{}) error {
if data != nil {
decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
WeaklyTypedInput: true,
Result: resultTo,
TagName: "json",
})
if err != nil {
glog.Warningf("error configuring decoder: %v", err)
} else {
if err = decoder.Decode(data); err != nil {
glog.Warningf("error decoding config: %v", err)
}
}
return err
}
return nil
}
// BackendHash calc a base64 encoding of a partial hash of an endpoint
// to be used as a cookie value of the backend on sticky session conf
func BackendHash(endpoint string) string {
hash := md5.Sum([]byte(endpoint))
return base64.StdEncoding.EncodeToString(hash[:8])
}
// SendToSocket send strings to a unix socket specified
func SendToSocket(socket string, command string) error {
c, err := net.Dial("unix", socket)
if err != nil {
glog.Warningf("error sending to unix socket: %v", err)
return err
}
sent, err := c.Write([]byte(command))
if err != nil || sent != len(command) {
glog.Warningf("error sending to unix socket %s", socket)
return err
}
readBuffer := make([]byte, 2048)
rcvd, err := c.Read(readBuffer)
if rcvd > 2 {
glog.Infof("telegraf stat socket response: \"%s\"", string(readBuffer[:rcvd-2]))
}
return nil
}
// checkValidity runs a configuration validity check on a file
func checkValidity(configFile string) error {
//TODO
return nil
}
// RewriteConfigFiles safely replaces configuration files with new contents after validation
func RewriteConfigFiles(data []byte, reloadStrategy, configFile string) error {
tmpf := "/etc/telegraf/new_cfg.erb"
err := ioutil.WriteFile(tmpf, data, 644)
if err != nil {
glog.Warningln("Error writing rendered template to file")
return err
}
err = checkValidity(tmpf)
if err != nil {
return err
}
err = os.Rename(tmpf, configFile)
if err != nil {
glog.Warningln("Error updating config file")
return err
}
err = os.Chmod(configFile, 0644)
if err != nil {
glog.Warningln("Error chmod config file")
return err
}
return nil
}
|
package main
import (
"fmt"
)
type TreeNode struct {
Data int
Left *TreeNode
Right *TreeNode
}
type Stack struct {
nodes []*TreeNode
count int
}
func (s *Stack) Push(n *TreeNode) {
s.nodes = append(s.nodes[:s.count], n)
s.count++
}
func (s *Stack) Pop() *TreeNode {
if s.count == 0 {
return nil
}
s.count--
return s.nodes[s.count]
}
func inOrder(root *TreeNode) {
if root == nil {
return
}
stack := new(Stack)
current := root
for current != nil {
stack.Push(current)
current = current.Left
}
for stack.count > 0 {
current = stack.Pop()
fmt.Print(current.Data, "\t")
if current.Right != nil {
current = current.Right
for current != nil {
stack.Push(current)
current = current.Left
}
}
}
}
func main() {
root := Create(42)
root.Left = Create(6)
root.Left.Left = Create(12)
root.Left.Left.Left = Create(-3)
root.Left.Left.Right = Create(20)
root.Left.Right = Create(4)
root.Right = Create(10)
root.Right.Left = Create(20)
root.Right.Right = Create(-7)
inOrder(root)
}
func Create(value int) *TreeNode {
node := new(TreeNode)
node.Data = value
node.Left = nil
node.Right = nil
return node
}
|
package main
import (
"testing"
)
func TestParseCode(t *testing.T) {
opcode, paramsMode := ParseCode(1002)
if opcode != 2 {
t.Errorf("expected 2, got %v", opcode)
}
if paramsMode[0] != 0 || paramsMode[1] != 1 || paramsMode[2] != 0 {
t.Errorf("expected [0, 1, 0], got %v", paramsMode)
}
}
func TestIntCodeComputer_Run(t *testing.T) {
com := new(IntCodeComputer)
com.Init("3,9,8,9,10,9,4,9,99,-1,8")
r := com.Run(1)
if r[0] != 0 {
t.Errorf("expected 0, got 1")
}
r = com.Run(8)
if r[0] != 1 {
t.Errorf("expected 1, got 0")
}
com.Init("3,9,7,9,10,9,4,9,99,-1,8")
r = com.Run(1)
if r[0] != 1 {
t.Errorf("expected 1, got 0")
}
r = com.Run(9)
if r[0] != 0 {
t.Errorf("expected 0, got 1")
}
com.Init("3,3,1108,-1,8,3,4,3,99")
r = com.Run(1)
if r[0] != 0 {
t.Errorf("expected 0, got 1")
}
r = com.Run(8)
if r[0] != 1 {
t.Errorf("expected 1, got 0")
}
com.Init("3,12,6,12,15,1,13,14,13,4,13,99,-1,0,1,9")
r = com.Run(0)
if r[0] != 0 {
t.Errorf("expected 0, got %v", r[0])
}
r = com.Run(100)
if r[0] != 1 {
t.Errorf("expected 1, got %v", r[0])
}
com.Init("3,21,1008,21,8,20,1005,20,22,107,8,21,20,1006,20,31,1106,0,36,98,0,0,1002,21,125,20,4,20,1105,1,46,104,999,1105,1,46,1101,1000,1,20,4,20,1105,1,46,98,99")
r = com.Run(1)
if r[0] != 999 {
t.Errorf("expected 999, got %v", r[0])
}
r = com.Run(8)
if r[0] != 1000 {
t.Errorf("expected 1000, got %v", r[0])
}
r = com.Run(9)
if r[0] != 1001 {
t.Errorf("expected 1001, got %v", r[0])
}
}
|
package slice_utils
//Subtract -
func Subtract(originalList []interface{}, subtractList []interface{}) []interface{} {
result := make([]interface{}, 0)
for _, original := range originalList {
exists := false
for _, subtract := range subtractList {
if subtract == original {
exists = true
}
}
if !exists {
result = append(result, original)
}
}
return result
}
//SubtractStrings -
func SubtractStrings(originalList []string, subtractList []string) []string {
result := make([]string, 0)
for _, original := range originalList {
exists := false
for _, subtract := range subtractList {
if subtract == original {
exists = true
}
}
if !exists {
result = append(result, original)
}
}
return result
}
//ElementsInBoth -
func ElementsInBoth(firstList []interface{}, secondList []interface{}) []interface{} {
result := make([]interface{}, 0)
for _, firstItem := range firstList {
for _, secondItem := range secondList {
if firstItem == secondItem {
result = append(result, firstItem)
}
}
}
return result
}
//Pop -
func Pop(array []interface{}, key int) []interface{} {
result := make([]interface{}, 0)
result = append(result, array[:key]...)
result = append(result, array[key+1:]...)
return result
}
|
package 贪心
import "strconv"
func maximum69Number(num int) int {
// 获得最高位的6,将其改为9,如果没有,就不用更改
numString := strconv.Itoa(num)
numBytes := []byte(numString)
for i := 0; i <= len(numBytes)-1; i++ {
if numBytes[i] == '6' {
numBytes[i] = '9'
break
}
}
max69Number, _ := strconv.Atoi(string(numBytes))
return max69Number
}
/*
题目链接: https://leetcode-cn.com/problems/maximum-69-number/
*/
|
package map2
func Append(m1, m2 map[string]interface{}) {
for k, v := range m2 {
m1[k] = v
}
}
|
package metalgo
import (
v1 "github.com/metal-stack/masterdata-api/api/rest/v1"
"github.com/metal-stack/metal-go/api/client/project"
"github.com/metal-stack/metal-go/api/models"
)
// ProjectListResponse is the response of a ProjectList action
type ProjectListResponse struct {
Project []*models.V1ProjectResponse
}
// ProjectGetResponse is the response of a ProjectGet action
type ProjectGetResponse struct {
Project *models.V1ProjectResponse
}
// ProjectFindRequest is the find request struct
type ProjectFindRequest struct {
ID string
Name string
Tenant string
}
// ProjectList return all projects
func (d *Driver) ProjectList() (*ProjectListResponse, error) {
response := &ProjectListResponse{}
listProjects := project.NewListProjectsParams()
resp, err := d.project.ListProjects(listProjects, nil)
if err != nil {
return response, err
}
response.Project = resp.Payload
return response, nil
}
// ProjectFind return projects by given findRequest
func (d *Driver) ProjectFind(pfr v1.ProjectFindRequest) (*ProjectListResponse, error) {
response := &ProjectListResponse{}
findProjects := project.NewFindProjectsParams()
body := &models.V1ProjectFindRequest{}
if pfr.Id != nil {
body.ID = *pfr.Id
}
if pfr.Name != nil {
body.Name = *pfr.Name
}
if pfr.TenantId != nil {
body.TenantID = *pfr.TenantId
}
findProjects.Body = body
resp, err := d.project.FindProjects(findProjects, nil)
if err != nil {
return response, err
}
response.Project = resp.Payload
return response, nil
}
// ProjectGet return a Project
func (d *Driver) ProjectGet(projectID string) (*ProjectGetResponse, error) {
response := &ProjectGetResponse{}
getProject := project.NewFindProjectParams()
getProject.ID = projectID
resp, err := d.project.FindProject(getProject, nil)
if err != nil {
return response, err
}
response.Project = resp.Payload
return response, nil
}
// ProjectCreate a new Project
func (d *Driver) ProjectCreate(pcr v1.ProjectCreateRequest) (*ProjectGetResponse, error) {
response := &ProjectGetResponse{}
createProject := project.NewCreateProjectParams()
body := &models.V1ProjectCreateRequest{
Description: pcr.Description,
Meta: &models.V1Meta{
ID: pcr.Meta.Id,
Kind: pcr.Meta.Kind,
Apiversion: pcr.Meta.Apiversion,
Annotations: pcr.Meta.Annotations,
Labels: pcr.Meta.Labels,
Version: pcr.Meta.Version,
},
Name: pcr.Name,
TenantID: pcr.TenantId,
Quotas: ToV1QuotaSet(pcr.Quotas),
}
createProject.Body = body
resp, err := d.project.CreateProject(createProject, nil)
if err != nil {
return response, err
}
response.Project = resp.Payload
return response, nil
}
// ProjectUpdate update a Project
func (d *Driver) ProjectUpdate(pur v1.ProjectUpdateRequest) (*ProjectGetResponse, error) {
response := &ProjectGetResponse{}
updateProject := project.NewUpdateProjectParams()
body := &models.V1ProjectUpdateRequest{
Description: pur.Description,
Meta: &models.V1Meta{
ID: pur.Meta.Id,
Kind: pur.Meta.Kind,
Apiversion: pur.Meta.Apiversion,
Annotations: pur.Meta.Annotations,
Labels: pur.Meta.Labels,
Version: pur.Meta.Version,
},
Name: pur.Name,
TenantID: pur.TenantId,
Quotas: ToV1QuotaSet(pur.Quotas),
}
updateProject.Body = body
resp, err := d.project.UpdateProject(updateProject, nil)
if err != nil {
return response, err
}
response.Project = resp.Payload
return response, nil
}
// ProjectDelete delete a Project
func (d *Driver) ProjectDelete(projectID string) (*ProjectGetResponse, error) {
response := &ProjectGetResponse{}
getProject := project.NewDeleteProjectParams()
getProject.ID = projectID
resp, err := d.project.DeleteProject(getProject, nil)
if err != nil {
return response, err
}
response.Project = resp.Payload
return response, nil
}
// Helper
// ToV1QuotaSet convert a masterdata-api v1 QuotaSet to a swagger V1Quotaset
func ToV1QuotaSet(q *v1.QuotaSet) *models.V1QuotaSet {
if q == nil {
return nil
}
return &models.V1QuotaSet{
Cluster: ToV1Quota(q.Cluster),
Machine: ToV1Quota(q.Machine),
IP: ToV1Quota(q.Ip),
Project: ToV1Quota(q.Project),
}
}
// ToV1Quota convert a masterdata-api v1 Quota to a swagger V1Quota
func ToV1Quota(q *v1.Quota) *models.V1Quota {
if q == nil {
return nil
}
if q.Quota == nil {
return nil
}
return &models.V1Quota{
Quota: *q.Quota,
}
}
|
package main
import (
"context"
"errors"
"sync"
"testing"
"time"
"github.com/brigadecore/brigade/sdk/v3"
coreTesting "github.com/brigadecore/brigade/sdk/v3/testing"
myk8s "github.com/brigadecore/brigade/v2/internal/kubernetes"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
)
func TestSyncJobPods(t *testing.T) {
const testNamespace = "foo"
const testPodName = "bar"
var syncJobPodFnCallCount int
mu := &sync.Mutex{}
kubeClient := fake.NewSimpleClientset()
observer := &observer{
kubeClient: kubeClient,
syncJobPodFn: func(_ interface{}) {
mu.Lock()
defer mu.Unlock()
syncJobPodFnCallCount++
},
}
ctx, cancel := context.WithTimeout(context.Background(), 6*time.Second)
defer cancel()
go observer.syncJobPods(ctx)
// The informer needs a little time to get going. If we don't put a little
// delay here, we'll be adding, updating, and deleting pods before the
// informer gets cranking.
<-time.After(time.Second)
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: testPodName,
Labels: map[string]string{
myk8s.LabelComponent: myk8s.LabelKeyJob,
},
},
}
_, err := kubeClient.CoreV1().Pods(testNamespace).Create(
ctx,
pod,
metav1.CreateOptions{},
)
require.NoError(t, err)
_, err = kubeClient.CoreV1().Pods(testNamespace).Update(
ctx,
pod,
metav1.UpdateOptions{},
)
require.NoError(t, err)
err = kubeClient.CoreV1().Pods(testNamespace).Delete(
ctx,
testPodName,
metav1.DeleteOptions{},
)
require.NoError(t, err)
<-ctx.Done()
mu.Lock()
defer mu.Unlock()
require.Equal(t, 2, syncJobPodFnCallCount)
}
func TestSyncJobPod(t *testing.T) {
now := time.Now().UTC()
testCases := []struct {
name string
pod *corev1.Pod
observer *observer
}{
{
name: "pod is deleted",
pod: &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
DeletionTimestamp: &metav1.Time{
Time: now,
},
},
},
observer: &observer{
timedPodsSet: map[string]context.CancelFunc{},
manageJobTimeoutFn: func(
context.Context,
*corev1.Pod,
sdk.JobPhase,
) {
},
jobsClient: &coreTesting.MockJobsClient{
UpdateStatusFn: func(
ctx context.Context,
eventID string,
jobName string,
status sdk.JobStatus,
_ *sdk.JobStatusUpdateOptions,
) error {
require.Equal(t, sdk.JobPhaseAborted, status.Phase)
return nil
},
},
cleanupJobFn: func(_, _ string) {},
},
},
{
name: "pod phase is pending",
pod: &corev1.Pod{
Status: corev1.PodStatus{
Phase: corev1.PodPending,
},
},
observer: &observer{
timedPodsSet: map[string]context.CancelFunc{},
manageJobTimeoutFn: func(
context.Context,
*corev1.Pod,
sdk.JobPhase,
) {
},
jobsClient: &coreTesting.MockJobsClient{
UpdateStatusFn: func(
ctx context.Context,
eventID string,
jobName string,
status sdk.JobStatus,
_ *sdk.JobStatusUpdateOptions,
) error {
require.Equal(t, sdk.JobPhaseRunning, status.Phase)
return nil
},
},
cleanupJobFn: func(_, _ string) {
require.Fail(
t,
"cleanupJobFn should not have been called, but was",
)
},
},
},
{
name: "pod phase is running and container[0] is not finished",
pod: &corev1.Pod{
Status: corev1.PodStatus{
Phase: corev1.PodPending,
},
},
observer: &observer{
timedPodsSet: map[string]context.CancelFunc{},
manageJobTimeoutFn: func(
context.Context,
*corev1.Pod,
sdk.JobPhase,
) {
},
jobsClient: &coreTesting.MockJobsClient{
UpdateStatusFn: func(
ctx context.Context,
eventID string,
jobName string,
status sdk.JobStatus,
_ *sdk.JobStatusUpdateOptions,
) error {
require.Equal(t, sdk.JobPhaseRunning, status.Phase)
require.Nil(t, status.Ended)
return nil
},
},
cleanupJobFn: func(_, _ string) {
require.Fail(
t,
"cleanupJobFn should not have been called, but was",
)
},
},
},
{
name: "pod phase is running and container[0] succeeded",
pod: &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "nombre",
Namespace: "ns",
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{Name: "foo"}},
},
Status: corev1.PodStatus{
Phase: corev1.PodPending,
ContainerStatuses: []corev1.ContainerStatus{
{
Name: "foo",
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 0,
FinishedAt: metav1.Time{
Time: now,
},
},
},
},
},
},
},
observer: &observer{
manageJobTimeoutFn: func(
context.Context,
*corev1.Pod,
sdk.JobPhase,
) {
},
timedPodsSet: map[string]context.CancelFunc{
"ns:nombre": func() {},
},
jobsClient: &coreTesting.MockJobsClient{
UpdateStatusFn: func(
ctx context.Context,
eventID string,
jobName string,
status sdk.JobStatus,
_ *sdk.JobStatusUpdateOptions,
) error {
require.Equal(t, sdk.JobPhaseSucceeded, status.Phase)
require.NotNil(t, status.Ended)
require.Equal(t, now, *status.Ended)
return nil
},
},
cleanupJobFn: func(_, _ string) {},
},
},
{
name: "error updating job status",
pod: &corev1.Pod{
Status: corev1.PodStatus{
Phase: corev1.PodPending,
},
},
observer: &observer{
timedPodsSet: map[string]context.CancelFunc{},
manageJobTimeoutFn: func(
context.Context,
*corev1.Pod,
sdk.JobPhase,
) {
},
jobsClient: &coreTesting.MockJobsClient{
UpdateStatusFn: func(
ctx context.Context,
eventID string,
jobName string,
status sdk.JobStatus,
_ *sdk.JobStatusUpdateOptions,
) error {
return errors.New("something went wrong")
},
},
errFn: func(i ...interface{}) {
require.Len(t, i, 1)
str, ok := i[0].(string)
require.True(t, ok)
require.Contains(t, str, "something went wrong")
require.Contains(t, str, "error updating status for event")
},
},
},
{
name: "pod phase is running and container[0] failed",
pod: &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "nombre",
Namespace: "ns",
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{Name: "foo"}},
},
Status: corev1.PodStatus{
Phase: corev1.PodPending,
ContainerStatuses: []corev1.ContainerStatus{
{
Name: "foo",
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 1,
FinishedAt: metav1.Time{
Time: now,
},
},
},
},
},
},
},
observer: &observer{
timedPodsSet: map[string]context.CancelFunc{
"ns:nombre": func() {},
},
manageJobTimeoutFn: func(
context.Context,
*corev1.Pod,
sdk.JobPhase,
) {
},
jobsClient: &coreTesting.MockJobsClient{
UpdateStatusFn: func(
ctx context.Context,
eventID string,
jobName string,
status sdk.JobStatus,
_ *sdk.JobStatusUpdateOptions,
) error {
require.Equal(t, sdk.JobPhaseFailed, status.Phase)
require.NotNil(t, status.Ended)
require.Equal(t, now, *status.Ended)
return nil
},
},
cleanupJobFn: func(_, _ string) {},
},
},
{
name: "pod phase is succeeded",
pod: &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "nombre",
Namespace: "ns",
},
Status: corev1.PodStatus{
Phase: corev1.PodSucceeded,
},
},
observer: &observer{
timedPodsSet: map[string]context.CancelFunc{
"ns:nombre": func() {},
},
manageJobTimeoutFn: func(
context.Context,
*corev1.Pod,
sdk.JobPhase,
) {
},
jobsClient: &coreTesting.MockJobsClient{
UpdateStatusFn: func(
ctx context.Context,
eventID string,
jobName string,
status sdk.JobStatus,
_ *sdk.JobStatusUpdateOptions,
) error {
require.Equal(t, sdk.JobPhaseSucceeded, status.Phase)
return nil
},
},
cleanupJobFn: func(_, _ string) {},
},
},
{
name: "pod phase is failed",
pod: &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "nombre",
Namespace: "ns",
},
Status: corev1.PodStatus{
Phase: corev1.PodFailed,
},
},
observer: &observer{
timedPodsSet: map[string]context.CancelFunc{
"ns:nombre": func() {},
},
manageJobTimeoutFn: func(
context.Context,
*corev1.Pod,
sdk.JobPhase,
) {
},
jobsClient: &coreTesting.MockJobsClient{
UpdateStatusFn: func(
ctx context.Context,
eventID string,
jobName string,
status sdk.JobStatus,
_ *sdk.JobStatusUpdateOptions,
) error {
require.Equal(t, sdk.JobPhaseFailed, status.Phase)
return nil
},
},
cleanupJobFn: func(_, _ string) {},
},
},
{
name: "pod phase is unknown",
pod: &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "nombre",
Namespace: "ns",
},
Status: corev1.PodStatus{
Phase: corev1.PodUnknown,
},
},
observer: &observer{
timedPodsSet: map[string]context.CancelFunc{
"ns:nombre": func() {},
},
manageJobTimeoutFn: func(
context.Context,
*corev1.Pod,
sdk.JobPhase,
) {
},
jobsClient: &coreTesting.MockJobsClient{
UpdateStatusFn: func(
ctx context.Context,
eventID string,
jobName string,
status sdk.JobStatus,
_ *sdk.JobStatusUpdateOptions,
) error {
require.Equal(t, sdk.JobPhaseUnknown, status.Phase)
return nil
},
},
cleanupJobFn: func(_, _ string) {
require.Fail(
t,
"cleanupJobFn should not have been called, but was",
)
},
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
testCase.observer.syncJobPod(testCase.pod)
})
}
}
func TestManageJobTimeout(t *testing.T) {
testPod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "nombre",
Namespace: "ns",
Annotations: map[string]string{
myk8s.AnnotationTimeoutDuration: "1m",
},
},
}
testCases := []struct {
name string
phase sdk.JobPhase
observer *observer
assertions func(*observer)
}{
{
name: "job in terminal phase and not already timed",
// Nothing should happen
phase: sdk.JobPhaseSucceeded,
observer: &observer{
timedPodsSet: map[string]context.CancelFunc{},
},
assertions: func(o *observer) {
require.Empty(t, o.timedPodsSet)
},
},
{
name: "job in terminal phase and already timed",
// Should stop the clock
phase: sdk.JobPhaseSucceeded,
observer: &observer{
timedPodsSet: map[string]context.CancelFunc{
"ns:nombre": func() {},
},
},
assertions: func(o *observer) {
require.Len(t, o.timedPodsSet, 1)
},
},
{
name: "job in non-terminal phase and not already timed",
// Should start the clock
phase: sdk.JobPhaseRunning,
observer: &observer{
timedPodsSet: map[string]context.CancelFunc{},
runJobTimerFn: func(context.Context, *corev1.Pod) {},
},
assertions: func(o *observer) {
require.Contains(t, o.timedPodsSet, "ns:nombre")
},
},
{
name: "job in non-terminal phase and already timed",
// Nothing should happen
phase: sdk.JobPhaseRunning,
observer: &observer{
timedPodsSet: map[string]context.CancelFunc{
"ns:nombre": func() {},
},
},
assertions: func(o *observer) {
require.Contains(t, o.timedPodsSet, "ns:nombre")
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
testCase.observer.manageJobTimeout(
context.Background(),
testPod,
testCase.phase,
)
testCase.assertions(testCase.observer)
})
}
}
func TestRunJobTimer(t *testing.T) {
testCases := []struct {
name string
pod *corev1.Pod
observer *observer
assertions func(*observer)
}{
{
name: "canceled before timeout",
pod: &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "nombre",
Namespace: "ns",
Labels: map[string]string{
myk8s.LabelEvent: "tunguska",
myk8s.LabelJob: "italian",
},
Annotations: map[string]string{
myk8s.AnnotationTimeoutDuration: "1m",
},
},
},
observer: &observer{
config: observerConfig{
maxJobLifetime: time.Minute,
},
timedPodsSet: map[string]context.CancelFunc{
"ns:nombre": func() {},
},
jobsClient: &coreTesting.MockJobsClient{
TimeoutFn: func(
context.Context,
string,
string,
*sdk.JobTimeoutOptions,
) error {
require.Fail(
t,
"timout should not have been called on jobs client, but was",
)
return nil
},
},
errFn: func(i ...interface{}) {
require.Fail(t, "errFn should not have been called, but was")
},
},
assertions: func(observer *observer) {
require.Empty(t, observer.timedPodsSet)
},
},
{
name: "error calling timeout",
pod: &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "nombre",
Namespace: "ns",
Labels: map[string]string{
myk8s.LabelEvent: "tunguska",
myk8s.LabelJob: "italian",
},
Annotations: map[string]string{
myk8s.AnnotationTimeoutDuration: "1s",
},
},
},
observer: &observer{
config: observerConfig{
maxJobLifetime: time.Minute,
},
timedPodsSet: map[string]context.CancelFunc{
"ns:nombre": func() {},
},
jobsClient: &coreTesting.MockJobsClient{
TimeoutFn: func(
context.Context,
string,
string,
*sdk.JobTimeoutOptions,
) error {
return errors.New("something went wrong")
},
},
errFn: func(i ...interface{}) {
require.Len(t, i, 1)
err, ok := i[0].(error)
require.True(t, ok)
require.Contains(t, err.Error(), "something went wrong")
},
},
assertions: func(observer *observer) {
require.Empty(t, observer.timedPodsSet)
},
},
{
name: "success",
pod: &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "nombre",
Namespace: "ns",
Labels: map[string]string{
myk8s.LabelEvent: "tunguska",
myk8s.LabelJob: "italian",
},
Annotations: map[string]string{
myk8s.AnnotationTimeoutDuration: "1s",
},
},
},
observer: &observer{
config: observerConfig{
maxJobLifetime: time.Minute,
},
timedPodsSet: map[string]context.CancelFunc{
"ns:nombre": func() {},
},
jobsClient: &coreTesting.MockJobsClient{
TimeoutFn: func(
context.Context,
string,
string,
*sdk.JobTimeoutOptions,
) error {
return nil
},
},
errFn: func(i ...interface{}) {
require.Fail(t, "errFn should not have been called, but was")
},
},
assertions: func(observer *observer) {
require.Empty(t, observer.timedPodsSet)
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
func() {
// A context that's longer than the timeout of 1s
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
testCase.observer.runJobTimer(ctx, testCase.pod)
}()
})
}
}
func TestCleanupJob(t *testing.T) {
const testEventID = "123456789"
const testJobName = "italian"
testCases := []struct {
name string
observer *observer
}{
{
name: "error calling cleanup",
observer: &observer{
config: observerConfig{
delayBeforeCleanup: time.Second,
},
jobsClient: &coreTesting.MockJobsClient{
CleanupFn: func(
context.Context,
string,
string,
*sdk.JobCleanupOptions,
) error {
return errors.New("something went wrong")
},
},
errFn: func(i ...interface{}) {
require.Len(t, i, 1)
msg, ok := i[0].(string)
require.True(t, ok)
require.Contains(t, msg, "something went wrong")
require.Contains(t, msg, "error cleaning up after event")
},
},
},
{
name: "success",
observer: &observer{
config: observerConfig{
delayBeforeCleanup: time.Second,
},
jobsClient: &coreTesting.MockJobsClient{
CleanupFn: func(
context.Context,
string,
string,
*sdk.JobCleanupOptions,
) error {
return nil
},
},
errFn: func(i ...interface{}) {
require.Fail(
t,
"error logging function should not have been called",
)
},
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
testCase.observer.cleanupJob(testEventID, testJobName)
})
}
}
|
package main
import (
"fmt"
"sync"
"time"
)
// To wait for multiple goroutines to finish, we can use a wait group.
// This is the function we’ll run in every goroutine.
func worker(id int, wg *sync.WaitGroup) {
defer wg.Done()
fmt.Printf("Worker %d starting \n", id)
time.Sleep(time.Second)
fmt.Printf("Worker %d done \n", id)
}
func main() {
var wg sync.WaitGroup
// Launch several goroutines and increment the WaitGroup counter for each.
for i := 1; i <= 5; i++ {
wg.Add(1)
go worker(i, &wg)
}
// Block until the WaitGroup counter goes back to 0; all the workers notified they’re done.
wg.Wait()
}
|
package main
import (
"fmt"
"sync"
"testing"
)
var counter = 0
func OnlyInce() {
counter++
}
func TestOnce(t *testing.T) {
once := sync.Once{}
group := sync.WaitGroup{}
for i := 0; i < 100; i++ {
go func() {
group.Add(1)
once.Do(OnlyInce)
group.Done()
}()
}
group.Wait()
fmt.Println("Counter ", counter)
}
|
package main
import (
"fmt"
)
const FMT = "%T Value = %v\n"
// a struct is a collection of fields
// a struct is always a type.
type Astruct struct {
X int
Y int
}
func main() {
// literal struct declaration with named variables
// v receives type Astruct
var v = Astruct{Y: 2}
v.X = 7
fmt.Printf(FMT, v, v)
// literals declaration with all fields initialized
//var b = Astruct{1,2}
// Use the new() function to create the struct
// x receives a pointer of type *Astruct
x := new(Astruct) // equivalent to => 'var x *Abstract = new A(abstract)'
fmt.Printf(FMT, x, x)
x.Y = 12
fmt.Println(&x)
}
|
// base64 project doc.go
/*
base64 document
*/
package main
|
// +build wireinject
// The build tag makes sure the stub is not built in the final build.
package di
import (
"github.com/google/wire"
"github.com/yk2220s/go-grpc-sample/api/application/server"
"github.com/yk2220s/go-grpc-sample/api/usecase"
)
func InitializeGRPCServer() (*server.GRPCServer, func(), error) {
wire.Build(
server.NewGRPCServer,
usecase.NewGetPost,
)
return nil, nil, nil
}
|
package api
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
)
type CloudCIXClient struct {
Email, Password, ApiKey, Token, ApiUrl string
}
func (cixClient CloudCIXClient) GetToken() (string, error) {
json_data := map[string]string{"api_key": cixClient.ApiKey, "email": cixClient.Email, "password": cixClient.Password}
json_value, err := json.Marshal(json_data)
if err != nil {
return "", err
}
url := fmt.Sprintf("https://membership.%s/auth/login/", cixClient.ApiUrl)
response, err := http.Post(url, "application/json", bytes.NewBuffer(json_value))
if err != nil {
return "", err
}
data, err := ioutil.ReadAll(response.Body)
defer response.Body.Close()
if err != nil {
return "", err
}
var resp map[string]string
err = json.Unmarshal(data, &resp)
return resp["token"], err
}
func (cixClient CloudCIXClient) ReadData(application string, service string, object_id string, token string) (string, error) {
client := http.Client{}
url := "https://" + application + "." + cixClient.ApiUrl + "/" + service + "/" + object_id
request, err := http.NewRequest("GET", url, nil)
if err != nil {
return "", err
}
request.Header.Set("X-Auth-Token", token)
response, err := client.Do(request)
if err != nil {
return "", err
}
data, err := ioutil.ReadAll(response.Body)
defer response.Body.Close()
if err != nil {
return "", err
}
return ApplicationSwitch(application, service, object_id, data)
}
func (cixClient CloudCIXClient) WriteData(application string, service string, object_id string, token string, data map[string]string, method string) (string, error) {
client := http.Client{}
url := "https://" + application + "." + cixClient.ApiUrl + "/" + service + "/" + object_id
post_data, _ := json.Marshal(data)
post_data_buffer := bytes.NewBuffer(post_data)
request, err := http.NewRequest(method, url, post_data_buffer)
if err != nil {
return "", err
}
request.Header.Set("X-Auth-Token", token)
response, err := client.Do(request)
if err != nil {
return "", err
}
body, err := ioutil.ReadAll(response.Body)
defer response.Body.Close()
if err != nil {
return "", err
}
return string(body), nil
}
func (cix_client CloudCIXClient) DeleteData(application string, service string, object_id string, token string) (string, error) {
client := http.Client{}
url := "https://" + application + "." + cix_client.ApiUrl + "/" + service + "/" + object_id
request, err := http.NewRequest("DELETE", url, nil)
if err != nil {
return "", err
}
request.Header.Set("X-Auth-Token", token)
response, err := client.Do(request)
if err != nil {
return "", err
}
body, err := ioutil.ReadAll(response.Body)
defer response.Body.Close()
if err != nil {
return "", err
}
return string(body), nil
}
|
package main
import (
"fmt"
merry_go_round "merry-go-round"
"sync"
)
func main() {
i := 0
pool := merry_go_round.NewPool(func() interface{} {
rs := i
i++
return rs
}, 64)
result := map[int]int{}
muResult := sync.Mutex{}
wg := sync.WaitGroup{}
shouldBe := 1024 * 64
wg.Add(shouldBe)
for i := 0; i < shouldBe; i++ {
go func(result map[int]int) {
rs := pool.Get().(int)
pool.Put(rs)
muResult.Lock()
result[rs]++
muResult.Unlock()
wg.Done()
}(result)
}
wg.Wait()
resultSum := 0
for _, num := range result {
resultSum += num
}
fmt.Println("should:", shouldBe)
fmt.Println("result:", resultSum)
}
|
package dao
import (
"context"
"git.dustess.com/mk-base/mongo-driver/mongo"
"git.dustess.com/mk-training/mk-blog-svc/config"
)
const collName = "blog"
// BlogDao 客数据可连接
type BlogDao struct {
dao *mongo.Dao
ctx context.Context
}
// NewBlogDao 创建对象
func NewBlogDao(ctx context.Context) *BlogDao {
return &BlogDao{
dao: mongo.NewDao(mongo.MKBiz, config.Get().Mongo.MongoMK.MKDB.Name, collName),
ctx: ctx,
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package remote
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"sync/atomic"
jsoniter "github.com/json-iterator/go"
)
var opaque int32
const (
// 0, REQUEST_COMMAND
RPCType = 0
// 1, RPC
RPCOneWay = 1
//ResponseType for response
ResponseType = 1
_Flag = 0
_Version = 317
)
type LanguageCode byte
const (
_Java = LanguageCode(0)
_Go = LanguageCode(9)
_Unknown = LanguageCode(127)
)
func (lc LanguageCode) MarshalJSON() ([]byte, error) {
return []byte(`"GO"`), nil
}
func (lc *LanguageCode) UnmarshalJSON(b []byte) error {
switch string(b) {
case "JAVA":
*lc = _Java
case "GO", `"GO"`:
*lc = _Go
default:
*lc = _Unknown
}
return nil
}
func (lc LanguageCode) String() string {
switch lc {
case _Java:
return "JAVA"
case _Go:
return "GO"
default:
return "unknown"
}
}
type RemotingCommand struct {
Code int16 `json:"code"`
Language LanguageCode `json:"language"`
Version int16 `json:"version"`
Opaque int32 `json:"opaque"`
Flag int32 `json:"flag"`
Remark string `json:"remark"`
ExtFields map[string]string `json:"extFields"`
Body []byte `json:"-"`
}
type CustomHeader interface {
Encode() map[string]string
}
func NewRemotingCommand(code int16, header CustomHeader, body []byte) *RemotingCommand {
cmd := &RemotingCommand{
Code: code,
Version: _Version,
Opaque: atomic.AddInt32(&opaque, 1),
Body: body,
Language: _Go,
ExtFields: make(map[string]string),
}
if header != nil {
cmd.ExtFields = header.Encode()
}
return cmd
}
func (command *RemotingCommand) String() string {
return fmt.Sprintf("Code: %d, opaque: %d, Remark: %s, ExtFields: %v",
command.Code, command.Opaque, command.Remark, command.ExtFields)
}
func (command *RemotingCommand) isResponseType() bool {
return command.Flag&(ResponseType) == ResponseType
}
func (command *RemotingCommand) markResponseType() {
command.Flag = command.Flag | ResponseType
}
var (
jsonSerializer = &jsonCodec{}
rocketMqSerializer = &rmqCodec{}
codecType byte
)
// encode RemotingCommand
//
// Frame format:
// +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// + item | frame_size | header_length | header_body | body +
// +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// + len | 4bytes | 4bytes | (21 + r_len + e_len) bytes | remain bytes +
// +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
func (command *RemotingCommand) WriteTo(w io.Writer) error {
var (
header []byte
err error
)
switch codecType {
case JsonCodecs:
header, err = jsonSerializer.encodeHeader(command)
case RocketMQCodecs:
header, err = rocketMqSerializer.encodeHeader(command)
}
if err != nil {
return err
}
frameSize := 4 + len(header) + len(command.Body)
err = binary.Write(w, binary.BigEndian, int32(frameSize))
if err != nil {
return err
}
err = binary.Write(w, binary.BigEndian, markProtocolType(int32(len(header))))
if err != nil {
return err
}
_, err = w.Write(header)
if err != nil {
return err
}
_, err = w.Write(command.Body)
return err
}
func encode(command *RemotingCommand) ([]byte, error) {
var (
header []byte
err error
)
switch codecType {
case JsonCodecs:
header, err = jsonSerializer.encodeHeader(command)
case RocketMQCodecs:
header, err = rocketMqSerializer.encodeHeader(command)
}
if err != nil {
return nil, err
}
frameSize := 4 + len(header) + len(command.Body)
buf := bytes.NewBuffer(make([]byte, frameSize))
buf.Reset()
err = binary.Write(buf, binary.BigEndian, int32(frameSize))
if err != nil {
return nil, err
}
err = binary.Write(buf, binary.BigEndian, markProtocolType(int32(len(header))))
if err != nil {
return nil, err
}
err = binary.Write(buf, binary.BigEndian, header)
if err != nil {
return nil, err
}
err = binary.Write(buf, binary.BigEndian, command.Body)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func decode(data []byte) (*RemotingCommand, error) {
buf := bytes.NewReader(data)
length := int32(len(data))
var oriHeaderLen int32
err := binary.Read(buf, binary.BigEndian, &oriHeaderLen)
if err != nil {
return nil, err
}
headerLength := oriHeaderLen & 0xFFFFFF
headerData := make([]byte, headerLength)
if _, err = io.ReadFull(buf, headerData); err != nil {
return nil, err
}
var command *RemotingCommand
switch codeType := byte((oriHeaderLen >> 24) & 0xFF); codeType {
case JsonCodecs:
command, err = jsonSerializer.decodeHeader(headerData)
case RocketMQCodecs:
command, err = rocketMqSerializer.decodeHeader(headerData)
default:
err = fmt.Errorf("unknown codec type: %d", codeType)
}
if err != nil {
return nil, err
}
bodyLength := length - 4 - headerLength
if bodyLength > 0 {
bodyData := make([]byte, bodyLength)
if _, err = io.ReadFull(buf, bodyData); err != nil {
return nil, err
}
command.Body = bodyData
}
return command, nil
}
func markProtocolType(source int32) []byte {
result := make([]byte, 4)
result[0] = codecType
result[1] = byte((source >> 16) & 0xFF)
result[2] = byte((source >> 8) & 0xFF)
result[3] = byte(source & 0xFF)
return result
}
const (
JsonCodecs = byte(0)
RocketMQCodecs = byte(1)
)
type serializer interface {
encodeHeader(command *RemotingCommand) ([]byte, error)
decodeHeader(data []byte) (*RemotingCommand, error)
}
// jsonCodec please refer to remoting/protocol/RemotingSerializable
type jsonCodec struct{}
func (c *jsonCodec) encodeHeader(command *RemotingCommand) ([]byte, error) {
buf, err := jsoniter.Marshal(command)
if err != nil {
return nil, err
}
return buf, nil
}
func (c *jsonCodec) decodeHeader(header []byte) (*RemotingCommand, error) {
command := &RemotingCommand{}
command.ExtFields = make(map[string]string)
command.Body = make([]byte, 0)
err := jsoniter.Unmarshal(header, command)
if err != nil {
return nil, err
}
return command, nil
}
// rmqCodec implementation of RocketMQCodecs private protocol, please refer to remoting/protocol/RocketMQSerializable
// RocketMQCodecs Private Protocol Header format:
//
// v_flag: version flag
// r_len: length of remark body
// r_body: data of remark body
// e_len: length of extends fields body
// e_body: data of extends fields
//
// +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// + item | request_code | l_flag | v_flag | opaque | request_flag | r_len | r_body | e_len | e_body +
// +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// + len | 2bytes | 1byte | 2bytes | 4bytes | 4 bytes | 4 bytes | r_len bytes | 4 bytes | e_len bytes +
// +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
const (
// header + body length
headerFixedLength = 21
)
type rmqCodec struct{}
// encodeHeader
func (c *rmqCodec) encodeHeader(command *RemotingCommand) ([]byte, error) {
extBytes, err := c.encodeMaps(command.ExtFields)
if err != nil {
return nil, err
}
buf := bytes.NewBuffer(make([]byte, headerFixedLength+len(command.Remark)+len(extBytes)))
buf.Reset()
// request code, length is 2 bytes
err = binary.Write(buf, binary.BigEndian, int16(command.Code))
if err != nil {
return nil, err
}
// language flag, length is 1 byte
err = binary.Write(buf, binary.BigEndian, _Go)
if err != nil {
return nil, err
}
// version flag, length is 2 bytes
err = binary.Write(buf, binary.BigEndian, int16(command.Version))
if err != nil {
return nil, err
}
// opaque flag, opaque is request identifier, length is 4 bytes
err = binary.Write(buf, binary.BigEndian, command.Opaque)
if err != nil {
return nil, err
}
// request flag, length is 4 bytes
err = binary.Write(buf, binary.BigEndian, command.Flag)
if err != nil {
return nil, err
}
// remark length flag, length is 4 bytes
err = binary.Write(buf, binary.BigEndian, int32(len(command.Remark)))
if err != nil {
return nil, err
}
// write remark, len(command.Remark) bytes
if len(command.Remark) > 0 {
err = binary.Write(buf, binary.BigEndian, []byte(command.Remark))
if err != nil {
return nil, err
}
}
err = binary.Write(buf, binary.BigEndian, int32(len(extBytes)))
if err != nil {
return nil, err
}
if len(extBytes) > 0 {
err = binary.Write(buf, binary.BigEndian, extBytes)
if err != nil {
return nil, err
}
}
return buf.Bytes(), nil
}
func (c *rmqCodec) encodeMaps(maps map[string]string) ([]byte, error) {
if maps == nil || len(maps) == 0 {
return []byte{}, nil
}
extFieldsBuf := bytes.NewBuffer([]byte{})
var err error
for key, value := range maps {
err = binary.Write(extFieldsBuf, binary.BigEndian, int16(len(key)))
if err != nil {
return nil, err
}
err = binary.Write(extFieldsBuf, binary.BigEndian, []byte(key))
if err != nil {
return nil, err
}
err = binary.Write(extFieldsBuf, binary.BigEndian, int32(len(value)))
if err != nil {
return nil, err
}
err = binary.Write(extFieldsBuf, binary.BigEndian, []byte(value))
if err != nil {
return nil, err
}
}
return extFieldsBuf.Bytes(), nil
}
func (c *rmqCodec) decodeHeader(data []byte) (*RemotingCommand, error) {
var err error
command := &RemotingCommand{}
buf := bytes.NewBuffer(data)
var code int16
err = binary.Read(buf, binary.BigEndian, &code)
if err != nil {
return nil, err
}
command.Code = code
var (
languageCode byte
remarkLen int32
extFieldsLen int32
)
err = binary.Read(buf, binary.BigEndian, &languageCode)
if err != nil {
return nil, err
}
command.Language = LanguageCode(languageCode)
var version int16
err = binary.Read(buf, binary.BigEndian, &version)
if err != nil {
return nil, err
}
command.Version = version
// int opaque
err = binary.Read(buf, binary.BigEndian, &command.Opaque)
if err != nil {
return nil, err
}
// int flag
err = binary.Read(buf, binary.BigEndian, &command.Flag)
if err != nil {
return nil, err
}
// String remark
err = binary.Read(buf, binary.BigEndian, &remarkLen)
if err != nil {
return nil, err
}
if remarkLen > 0 {
var remarkData = make([]byte, remarkLen)
if _, err = io.ReadFull(buf, remarkData); err != nil {
return nil, err
}
command.Remark = string(remarkData)
}
err = binary.Read(buf, binary.BigEndian, &extFieldsLen)
if err != nil {
return nil, err
}
if extFieldsLen > 0 {
extFieldsData := make([]byte, extFieldsLen)
if _, err := io.ReadFull(buf, extFieldsData); err != nil {
return nil, err
}
command.ExtFields = make(map[string]string)
buf := bytes.NewBuffer(extFieldsData)
var (
kLen int16
vLen int32
)
for buf.Len() > 0 {
err = binary.Read(buf, binary.BigEndian, &kLen)
if err != nil {
return nil, err
}
key, err := getExtFieldsData(buf, int32(kLen))
if err != nil {
return nil, err
}
err = binary.Read(buf, binary.BigEndian, &vLen)
if err != nil {
return nil, err
}
value, err := getExtFieldsData(buf, vLen)
if err != nil {
return nil, err
}
command.ExtFields[key] = value
}
}
return command, nil
}
func getExtFieldsData(buff io.Reader, length int32) (string, error) {
var data = make([]byte, length)
if _, err := io.ReadFull(buff, data); err != nil {
return "", err
}
return string(data), nil
}
|
package engine_util
import "github.com/coocood/badger"
type CFItem struct {
item *badger.Item
prefixLen int
}
// String returns a string representation of Item
func (i *CFItem) String() string {
return i.item.String()
}
func (i *CFItem) Key() []byte {
return i.item.Key()[i.prefixLen:]
}
func (i *CFItem) KeyCopy(dst []byte) []byte {
return i.item.KeyCopy(dst)
}
func (i *CFItem) Version() uint64 {
return i.item.Version()
}
func (i *CFItem) IsEmpty() bool {
return i.item.IsEmpty()
}
func (i *CFItem) Value() ([]byte, error) {
return i.item.Value()
}
func (i *CFItem) ValueSize() int {
return i.item.ValueSize()
}
func (i *CFItem) ValueCopy(dst []byte) ([]byte, error) {
return i.item.ValueCopy(dst)
}
func (i *CFItem) IsDeleted() bool {
return i.item.IsDeleted()
}
func (i *CFItem) EstimatedSize() int64 {
return i.item.EstimatedSize()
}
func (i *CFItem) UserMeta() []byte {
return i.item.UserMeta()
}
type CFIterator struct {
iter *badger.Iterator
prefix string
}
func NewCFIterator(cf string, txn *badger.Txn) *CFIterator {
return &CFIterator{
iter: txn.NewIterator(badger.DefaultIteratorOptions),
prefix: cf + "_",
}
}
func (it *CFIterator) Item() *CFItem {
return &CFItem{
item: it.iter.Item(),
prefixLen: len(it.prefix),
}
}
func (it *CFIterator) Valid() bool { return it.iter.ValidForPrefix([]byte(it.prefix)) }
func (it *CFIterator) ValidForPrefix(prefix []byte) bool {
return it.iter.ValidForPrefix(append(prefix, []byte(it.prefix)...))
}
func (it *CFIterator) Close() {
it.iter.Close()
}
func (it *CFIterator) Next() {
it.iter.Next()
}
func (it *CFIterator) Seek(key []byte) {
it.iter.Seek(append([]byte(it.prefix), key...))
}
func (it *CFIterator) Rewind() {
it.iter.Rewind()
}
|
package headway
import (
"fmt"
"github.com/pkg/errors"
"net/http"
)
type Client struct {
Host string
Secret string
Client http.Client
}
func NewClient(host, secret string) *Client {
return &Client{
Secret: secret,
Host: host,
Client: http.Client{},
}
}
func (c *Client) Send(current, total float64, name, comment string) error {
req, err := http.NewRequest(http.MethodPut, c.Host, nil)
if err != nil {
return err
}
q := req.URL.Query()
q.Add("name", name)
q.Add("current", fmt.Sprintf("%f", current))
q.Add("total", fmt.Sprintf("%f", total))
q.Add("comment", comment)
q.Add("Secret", c.Secret)
req.URL.RawQuery = q.Encode()
resp, err := c.Client.Do(req)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return errors.New(fmt.Sprintf("invalid request: %d", resp.StatusCode))
}
return nil
}
func (c *Client) Message(message string) error {
req, err := http.NewRequest(http.MethodPut, c.Host, nil)
if err != nil {
return err
}
q := req.URL.Query()
q.Add("message", message)
q.Add("Secret", c.Secret)
req.URL.RawQuery = q.Encode()
resp, err := c.Client.Do(req)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return errors.New(fmt.Sprintf("invalid request: %d", resp.StatusCode))
}
return nil
}
|
package lode
import (
"context"
"errors"
"fmt"
"github.com/brunoscheufler/lode/parser"
"github.com/brunoscheufler/lode/replication"
"github.com/jackc/pgx"
"github.com/sirupsen/logrus"
)
type Configuration struct {
// Postgres connection string to use
ConnectionString string
// Postgres replication slot name override
// Will default to "lode_main"
SlotName string
// Handle incoming WAL message
OnMessage func(message *parser.Wal2JsonMessage) error
// Pass existing logger instance
Logger *logrus.Logger
// Pass log level to use when logger should be created
LogLevel logrus.Level
}
// Result type returned by channel on lode exit
// Can contain streaming error
type ExitResult struct {
// Error returned from streaming goroutine
Error error
}
func Create(configuration Configuration) (<-chan ExitResult, context.CancelFunc, error) {
logger := configuration.Logger
if logger == nil {
logger = logrus.New()
if configuration.LogLevel != 0 {
logger.SetLevel(configuration.LogLevel)
}
}
// Parse connection string to config
parsedConnectConfig, err := pgx.ParseConnectionString(configuration.ConnectionString)
if err != nil {
return nil, nil, fmt.Errorf("could not parse connection string: %w", err)
}
// Create regular connection
pgConn, err := pgx.Connect(parsedConnectConfig)
if err != nil {
return nil, nil, fmt.Errorf("could not establish regular Postgres connection: %w", err)
}
logger.Debugf("Established regular pg connection")
// Create replication connection
replConn, err := pgx.ReplicationConnect(parsedConnectConfig)
if err != nil {
return nil, nil, fmt.Errorf("could not establish replication connection: %w", err)
}
logger.Debugf("Established replication pg connection")
logger.Infof("Connected to Postgres instance, setting up replication")
slotName, state, err := replication.Setup(logger, pgConn, replConn, configuration.SlotName)
if err != nil {
return nil, nil, fmt.Errorf("could not setup Postgres replication: %w", err)
}
done := make(chan ExitResult)
// Create root context
rootCtx := context.Background()
streamCtx, cancel := context.WithCancel(rootCtx)
// Stream changes asynchronously until the context is cancelled or something bad happens
go func() {
streamErr := replication.StreamChanges(logger, streamCtx, replConn, slotName, state, configuration.OnMessage)
if streamErr != nil && !errors.Is(streamErr, context.Canceled) {
logger.Errorf("Could not stream changes: %s", streamErr.Error())
}
// Shut down both connections gracefully before exiting
err = replConn.Close()
if err != nil {
logger.Errorf("Could not close replication connection: %s", err.Error())
}
err = pgConn.Close()
if err != nil {
logger.Errorf("Could not close regular pg connection: %s", err.Error())
}
logger.Trace("Done shutting down lode!")
done <- ExitResult{
Error: streamErr,
}
}()
return done, cancel, nil
}
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package component
import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/kubevela/pkg/util/slices"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"sigs.k8s.io/controller-runtime/pkg/client"
velaclient "github.com/kubevela/pkg/controller/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/pkg/features"
"github.com/oam-dev/kubevela/pkg/multicluster"
"github.com/oam-dev/kubevela/pkg/oam"
)
const (
// RefObjectsAvailableScopeGlobal ref-objects component can refer to arbitrary objects in any cluster
RefObjectsAvailableScopeGlobal = "global"
// RefObjectsAvailableScopeCluster ref-objects component can only refer to objects inside the hub cluster
RefObjectsAvailableScopeCluster = "cluster"
// RefObjectsAvailableScopeNamespace ref-objects component can only refer to objects inside the application namespace
RefObjectsAvailableScopeNamespace = "namespace"
)
// RefObjectsAvailableScope indicates the available scope for objects to refer
var RefObjectsAvailableScope = RefObjectsAvailableScopeGlobal
// GetLabelSelectorFromRefObjectSelector extract labelSelector from `labelSelector` first. If empty, extract from `selector`
func GetLabelSelectorFromRefObjectSelector(selector v1alpha1.ObjectReferrer) map[string]string {
if selector.LabelSelector != nil {
return selector.LabelSelector
}
if utilfeature.DefaultMutableFeatureGate.Enabled(features.DeprecatedObjectLabelSelector) {
return selector.DeprecatedLabelSelector
}
return nil
}
// GetGroupVersionKindFromRefObjectSelector extract GroupVersionKind by Resource if provided, otherwise, extract from APIVersion and Kind directly
func GetGroupVersionKindFromRefObjectSelector(mapper meta.RESTMapper, selector v1alpha1.ObjectReferrer) (schema.GroupVersionKind, error) {
if selector.Resource != "" {
gvks, err := mapper.KindsFor(schema.GroupVersionResource{Group: selector.Group, Resource: selector.Resource})
if err != nil {
return schema.GroupVersionKind{}, err
}
if len(gvks) == 0 {
return schema.GroupVersionKind{}, errors.Errorf("no kind found for resource %s", selector.Resource)
}
return gvks[0], nil
}
if utilfeature.DefaultMutableFeatureGate.Enabled(features.LegacyObjectTypeIdentifier) {
if selector.APIVersion != "" && selector.Kind != "" {
gv, err := schema.ParseGroupVersion(selector.APIVersion)
if err != nil {
return schema.GroupVersionKind{}, errors.Wrapf(err, "invalid APIVersion")
}
return gv.WithKind(selector.Kind), nil
}
return schema.GroupVersionKind{}, errors.Errorf("neither resource or apiVersion/kind is set for referring objects")
}
return schema.GroupVersionKind{}, errors.Errorf("resource is not set and legacy object type identifier is disabled for referring objects")
}
// ValidateRefObjectSelector validate if exclusive fields are set for the selector
func ValidateRefObjectSelector(selector v1alpha1.ObjectReferrer) error {
labelSelector := GetLabelSelectorFromRefObjectSelector(selector)
if labelSelector != nil && selector.Name != "" {
return errors.Errorf("invalid object selector for ref-objects, name and labelSelector cannot be both set")
}
return nil
}
// ClearRefObjectForDispatch reset the objects for dispatch
func ClearRefObjectForDispatch(un *unstructured.Unstructured) {
un.SetResourceVersion("")
un.SetGeneration(0)
un.SetOwnerReferences(nil)
un.SetDeletionTimestamp(nil)
un.SetManagedFields(nil)
un.SetUID("")
unstructured.RemoveNestedField(un.Object, "metadata", "creationTimestamp")
unstructured.RemoveNestedField(un.Object, "status")
// TODO(somefive): make the following logic more generalizable
if un.GetKind() == "Service" && un.GetAPIVersion() == "v1" {
if clusterIP, exist, _ := unstructured.NestedString(un.Object, "spec", "clusterIP"); exist && clusterIP != corev1.ClusterIPNone {
unstructured.RemoveNestedField(un.Object, "spec", "clusterIP")
unstructured.RemoveNestedField(un.Object, "spec", "clusterIPs")
}
}
}
// SelectRefObjectsForDispatch select objects by selector from kubernetes
func SelectRefObjectsForDispatch(ctx context.Context, cli client.Client, appNs string, compName string, selector v1alpha1.ObjectReferrer) (objs []*unstructured.Unstructured, err error) {
if err = ValidateRefObjectSelector(selector); err != nil {
return nil, err
}
labelSelector := GetLabelSelectorFromRefObjectSelector(selector)
ns := appNs
if selector.Namespace != "" {
if RefObjectsAvailableScope == RefObjectsAvailableScopeNamespace {
return nil, errors.Errorf("cannot refer to objects outside the application's namespace")
}
ns = selector.Namespace
}
if selector.Cluster != "" && selector.Cluster != multicluster.ClusterLocalName {
if RefObjectsAvailableScope != RefObjectsAvailableScopeGlobal {
return nil, errors.Errorf("cannot refer to objects outside control plane")
}
ctx = multicluster.ContextWithClusterName(ctx, selector.Cluster)
}
gvk, err := GetGroupVersionKindFromRefObjectSelector(cli.RESTMapper(), selector)
if err != nil {
return nil, err
}
isNamespaced, err := IsGroupVersionKindNamespaceScoped(cli.RESTMapper(), gvk)
if err != nil {
return nil, err
}
if selector.Name == "" && labelSelector != nil {
uns := &unstructured.UnstructuredList{}
uns.SetGroupVersionKind(gvk)
opts := []client.ListOption{client.MatchingLabels(labelSelector)}
if isNamespaced {
opts = append(opts, client.InNamespace(ns))
}
if err = cli.List(ctx, uns, opts...); err != nil {
return nil, errors.Wrapf(err, "failed to load ref object %s with selector", gvk.Kind)
}
for _, _un := range uns.Items {
objs = append(objs, _un.DeepCopy())
}
} else {
un := &unstructured.Unstructured{}
un.SetGroupVersionKind(gvk)
un.SetName(selector.Name)
if isNamespaced {
un.SetNamespace(ns)
}
if selector.Name == "" {
un.SetName(compName)
}
if err := cli.Get(ctx, client.ObjectKeyFromObject(un), un); err != nil {
return nil, errors.Wrapf(err, "failed to load ref object %s %s/%s", un.GetKind(), un.GetNamespace(), un.GetName())
}
objs = append(objs, un)
}
for _, obj := range objs {
ClearRefObjectForDispatch(obj)
}
return objs, nil
}
// ReferredObjectsDelegatingClient delegate client get/list function by retrieving ref-objects from existing objects
func ReferredObjectsDelegatingClient(cli client.Client, objs []*unstructured.Unstructured) client.Client {
objs = slices.Filter(objs, func(obj *unstructured.Unstructured) bool {
return obj.GetAnnotations() == nil || obj.GetAnnotations()[oam.AnnotationResourceURL] == ""
})
return velaclient.DelegatingHandlerClient{
Client: cli,
Getter: func(ctx context.Context, key client.ObjectKey, obj client.Object, _ ...client.GetOption) error {
un, ok := obj.(*unstructured.Unstructured)
if !ok {
return errors.Errorf("ReferredObjectsDelegatingClient does not support non-unstructured type")
}
gvk := un.GroupVersionKind()
for _, _un := range objs {
if gvk == _un.GroupVersionKind() && key == client.ObjectKeyFromObject(_un) {
_un.DeepCopyInto(un)
return nil
}
}
return apierrors.NewNotFound(schema.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, un.GetName())
},
Lister: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
uns, ok := list.(*unstructured.UnstructuredList)
if !ok {
return errors.Errorf("ReferredObjectsDelegatingClient does not support non-unstructured type")
}
gvk := uns.GroupVersionKind()
gvk.Kind = strings.TrimSuffix(gvk.Kind, "List")
listOpts := &client.ListOptions{}
for _, opt := range opts {
opt.ApplyToList(listOpts)
}
for _, _un := range objs {
if gvk != _un.GroupVersionKind() {
continue
}
if listOpts.Namespace != "" && listOpts.Namespace != _un.GetNamespace() {
continue
}
if listOpts.LabelSelector != nil && !listOpts.LabelSelector.Matches(labels.Set(_un.GetLabels())) {
continue
}
uns.Items = append(uns.Items, *_un)
}
return nil
},
}
}
// AppendUnstructuredObjects add new objects into object list if not exists
func AppendUnstructuredObjects(objs []*unstructured.Unstructured, newObjs ...*unstructured.Unstructured) []*unstructured.Unstructured {
for _, newObj := range newObjs {
idx := -1
for i, oldObj := range objs {
if oldObj.GroupVersionKind() == newObj.GroupVersionKind() && client.ObjectKeyFromObject(oldObj) == client.ObjectKeyFromObject(newObj) {
idx = i
break
}
}
if idx >= 0 {
objs[idx] = newObj
} else {
objs = append(objs, newObj)
}
}
return objs
}
// ConvertUnstructuredsToReferredObjects convert unstructured objects into ReferredObjects
func ConvertUnstructuredsToReferredObjects(uns []*unstructured.Unstructured) (refObjs []common.ReferredObject, err error) {
for _, obj := range uns {
bs, err := json.Marshal(obj)
if err != nil {
return nil, err
}
refObjs = append(refObjs, common.ReferredObject{RawExtension: runtime.RawExtension{Raw: bs}})
}
return refObjs, nil
}
// IsGroupVersionKindNamespaceScoped check if the target GroupVersionKind is namespace scoped resource
func IsGroupVersionKindNamespaceScoped(mapper meta.RESTMapper, gvk schema.GroupVersionKind) (bool, error) {
mappings, err := mapper.RESTMappings(gvk.GroupKind(), gvk.Version)
if err != nil {
return false, err
}
if len(mappings) == 0 {
return false, fmt.Errorf("unable to fund the mappings for gvk %s", gvk)
}
return mappings[0].Scope.Name() == meta.RESTScopeNameNamespace, nil
}
|
package dht
import (
"fmt"
"testing"
"time"
dhtcfg "github.com/libp2p/go-libp2p-kad-dht/internal/config"
"github.com/libp2p/go-libp2p-kad-dht/providers"
"github.com/libp2p/go-libp2p-kbucket/peerdiversity"
record "github.com/libp2p/go-libp2p-record"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
ds "github.com/ipfs/go-datastore"
ma "github.com/multiformats/go-multiaddr"
)
// ModeOpt describes what mode the dht should operate in
type ModeOpt = dhtcfg.ModeOpt
const (
// ModeAuto utilizes EvtLocalReachabilityChanged events sent over the event bus to dynamically switch the DHT
// between Client and Server modes based on network conditions
ModeAuto ModeOpt = iota
// ModeClient operates the DHT as a client only, it cannot respond to incoming queries
ModeClient
// ModeServer operates the DHT as a server, it can both send and respond to queries
ModeServer
// ModeAutoServer operates in the same way as ModeAuto, but acts as a server when reachability is unknown
ModeAutoServer
)
// DefaultPrefix is the application specific prefix attached to all DHT protocols by default.
const DefaultPrefix protocol.ID = "/ipfs"
type Option = dhtcfg.Option
// ProviderStore sets the provider storage manager.
func ProviderStore(ps providers.ProviderStore) Option {
return func(c *dhtcfg.Config) error {
c.ProviderStore = ps
return nil
}
}
// RoutingTableLatencyTolerance sets the maximum acceptable latency for peers
// in the routing table's cluster.
func RoutingTableLatencyTolerance(latency time.Duration) Option {
return func(c *dhtcfg.Config) error {
c.RoutingTable.LatencyTolerance = latency
return nil
}
}
// RoutingTableRefreshQueryTimeout sets the timeout for routing table refresh
// queries.
func RoutingTableRefreshQueryTimeout(timeout time.Duration) Option {
return func(c *dhtcfg.Config) error {
c.RoutingTable.RefreshQueryTimeout = timeout
return nil
}
}
// RoutingTableRefreshPeriod sets the period for refreshing buckets in the
// routing table. The DHT will refresh buckets every period by:
//
// 1. First searching for nearby peers to figure out how many buckets we should try to fill.
// 1. Then searching for a random key in each bucket that hasn't been queried in
// the last refresh period.
func RoutingTableRefreshPeriod(period time.Duration) Option {
return func(c *dhtcfg.Config) error {
c.RoutingTable.RefreshInterval = period
return nil
}
}
// Datastore configures the DHT to use the specified datastore.
//
// Defaults to an in-memory (temporary) map.
func Datastore(ds ds.Batching) Option {
return func(c *dhtcfg.Config) error {
c.Datastore = ds
return nil
}
}
// Mode configures which mode the DHT operates in (Client, Server, Auto).
//
// Defaults to ModeAuto.
func Mode(m ModeOpt) Option {
return func(c *dhtcfg.Config) error {
c.Mode = m
return nil
}
}
// Validator configures the DHT to use the specified validator.
//
// Defaults to a namespaced validator that can validate both public key (under the "pk"
// namespace) and IPNS records (under the "ipns" namespace). Setting the validator
// implies that the user wants to control the validators and therefore the default
// public key and IPNS validators will not be added.
func Validator(v record.Validator) Option {
return func(c *dhtcfg.Config) error {
c.Validator = v
c.ValidatorChanged = true
return nil
}
}
// NamespacedValidator adds a validator namespaced under `ns`. This option fails
// if the DHT is not using a `record.NamespacedValidator` as its validator (it
// uses one by default but this can be overridden with the `Validator` option).
// Adding a namespaced validator without changing the `Validator` will result in
// adding a new validator in addition to the default public key and IPNS validators.
// The "pk" and "ipns" namespaces cannot be overridden here unless a new `Validator`
// has been set first.
//
// Example: Given a validator registered as `NamespacedValidator("ipns",
// myValidator)`, all records with keys starting with `/ipns/` will be validated
// with `myValidator`.
func NamespacedValidator(ns string, v record.Validator) Option {
return func(c *dhtcfg.Config) error {
nsval, ok := c.Validator.(record.NamespacedValidator)
if !ok {
return fmt.Errorf("can only add namespaced validators to a NamespacedValidator")
}
nsval[ns] = v
return nil
}
}
// ProtocolPrefix sets an application specific prefix to be attached to all DHT protocols. For example,
// /myapp/kad/1.0.0 instead of /ipfs/kad/1.0.0. Prefix should be of the form /myapp.
//
// Defaults to dht.DefaultPrefix
func ProtocolPrefix(prefix protocol.ID) Option {
return func(c *dhtcfg.Config) error {
c.ProtocolPrefix = prefix
return nil
}
}
// ProtocolExtension adds an application specific protocol to the DHT protocol. For example,
// /ipfs/lan/kad/1.0.0 instead of /ipfs/kad/1.0.0. extension should be of the form /lan.
func ProtocolExtension(ext protocol.ID) Option {
return func(c *dhtcfg.Config) error {
c.ProtocolPrefix += ext
return nil
}
}
// V1ProtocolOverride overrides the protocolID used for /kad/1.0.0 with another. This is an
// advanced feature, and should only be used to handle legacy networks that have not been
// using protocolIDs of the form /app/kad/1.0.0.
//
// This option will override and ignore the ProtocolPrefix and ProtocolExtension options
func V1ProtocolOverride(proto protocol.ID) Option {
return func(c *dhtcfg.Config) error {
c.V1ProtocolOverride = proto
return nil
}
}
// BucketSize configures the bucket size (k in the Kademlia paper) of the routing table.
//
// The default value is 20.
func BucketSize(bucketSize int) Option {
return func(c *dhtcfg.Config) error {
c.BucketSize = bucketSize
return nil
}
}
// Concurrency configures the number of concurrent requests (alpha in the Kademlia paper) for a given query path.
//
// The default value is 10.
func Concurrency(alpha int) Option {
return func(c *dhtcfg.Config) error {
c.Concurrency = alpha
return nil
}
}
// Resiliency configures the number of peers closest to a target that must have responded in order for a given query
// path to complete.
//
// The default value is 3.
func Resiliency(beta int) Option {
return func(c *dhtcfg.Config) error {
c.Resiliency = beta
return nil
}
}
// LookupInterval configures maximal number of go routines that can be used to
// perform a lookup check operation, before adding a new node to the routing table.
func LookupCheckConcurrency(n int) Option {
return func(c *dhtcfg.Config) error {
c.LookupCheckConcurrency = n
return nil
}
}
// MaxRecordAge specifies the maximum time that any node will hold onto a record ("PutValue record")
// from the time its received. This does not apply to any other forms of validity that
// the record may contain.
// For example, a record may contain an ipns entry with an EOL saying its valid
// until the year 2020 (a great time in the future). For that record to stick around
// it must be rebroadcasted more frequently than once every 'MaxRecordAge'
func MaxRecordAge(maxAge time.Duration) Option {
return func(c *dhtcfg.Config) error {
c.MaxRecordAge = maxAge
return nil
}
}
// DisableAutoRefresh completely disables 'auto-refresh' on the DHT routing
// table. This means that we will neither refresh the routing table periodically
// nor when the routing table size goes below the minimum threshold.
func DisableAutoRefresh() Option {
return func(c *dhtcfg.Config) error {
c.RoutingTable.AutoRefresh = false
return nil
}
}
// DisableProviders disables storing and retrieving provider records.
//
// Defaults to enabled.
//
// WARNING: do not change this unless you're using a forked DHT (i.e., a private
// network and/or distinct DHT protocols with the `Protocols` option).
func DisableProviders() Option {
return func(c *dhtcfg.Config) error {
c.EnableProviders = false
return nil
}
}
// DisableValues disables storing and retrieving value records (including
// public keys).
//
// Defaults to enabled.
//
// WARNING: do not change this unless you're using a forked DHT (i.e., a private
// network and/or distinct DHT protocols with the `Protocols` option).
func DisableValues() Option {
return func(c *dhtcfg.Config) error {
c.EnableValues = false
return nil
}
}
// QueryFilter sets a function that approves which peers may be dialed in a query
func QueryFilter(filter QueryFilterFunc) Option {
return func(c *dhtcfg.Config) error {
c.QueryPeerFilter = filter
return nil
}
}
// RoutingTableFilter sets a function that approves which peers may be added to the routing table. The host should
// already have at least one connection to the peer under consideration.
func RoutingTableFilter(filter RouteTableFilterFunc) Option {
return func(c *dhtcfg.Config) error {
c.RoutingTable.PeerFilter = filter
return nil
}
}
// BootstrapPeers configures the bootstrapping nodes that we will connect to to seed
// and refresh our Routing Table if it becomes empty.
func BootstrapPeers(bootstrappers ...peer.AddrInfo) Option {
return func(c *dhtcfg.Config) error {
c.BootstrapPeers = func() []peer.AddrInfo {
return bootstrappers
}
return nil
}
}
// BootstrapPeersFunc configures the function that returns the bootstrapping nodes that we will
// connect to to seed and refresh our Routing Table if it becomes empty.
func BootstrapPeersFunc(getBootstrapPeers func() []peer.AddrInfo) Option {
return func(c *dhtcfg.Config) error {
c.BootstrapPeers = getBootstrapPeers
return nil
}
}
// RoutingTablePeerDiversityFilter configures the implementation of the `PeerIPGroupFilter` that will be used
// to construct the diversity filter for the Routing Table.
// Please see the docs for `peerdiversity.PeerIPGroupFilter` AND `peerdiversity.Filter` for more details.
func RoutingTablePeerDiversityFilter(pg peerdiversity.PeerIPGroupFilter) Option {
return func(c *dhtcfg.Config) error {
c.RoutingTable.DiversityFilter = pg
return nil
}
}
// disableFixLowPeersRoutine disables the "fixLowPeers" routine in the DHT.
// This is ONLY for tests.
func disableFixLowPeersRoutine(t *testing.T) Option {
return func(c *dhtcfg.Config) error {
c.DisableFixLowPeers = true
return nil
}
}
// forceAddressUpdateProcessing forces the DHT to handle changes to the hosts addresses.
// This occurs even when AutoRefresh has been disabled.
// This is ONLY for tests.
func forceAddressUpdateProcessing(t *testing.T) Option {
return func(c *dhtcfg.Config) error {
c.TestAddressUpdateProcessing = true
return nil
}
}
// EnableOptimisticProvide enables an optimization that skips the last hops of the provide process.
// This works by using the network size estimator (which uses the keyspace density of queries)
// to optimistically send ADD_PROVIDER requests when we most likely have found the last hop.
// It will also run some ADD_PROVIDER requests asynchronously in the background after returning,
// this allows to optimistically return earlier if some threshold number of RPCs have succeeded.
// The number of background/in-flight queries can be configured with the OptimisticProvideJobsPoolSize
// option.
//
// EXPERIMENTAL: This is an experimental option and might be removed in the future. Use at your own risk.
func EnableOptimisticProvide() Option {
return func(c *dhtcfg.Config) error {
c.EnableOptimisticProvide = true
return nil
}
}
// OptimisticProvideJobsPoolSize allows to configure the asynchronicity limit for in-flight ADD_PROVIDER RPCs.
// It makes sense to set it to a multiple of optProvReturnRatio * BucketSize. Check the description of
// EnableOptimisticProvide for more details.
//
// EXPERIMENTAL: This is an experimental option and might be removed in the future. Use at your own risk.
func OptimisticProvideJobsPoolSize(size int) Option {
return func(c *dhtcfg.Config) error {
c.OptimisticProvideJobsPoolSize = size
return nil
}
}
// AddressFilter allows to configure the address filtering function.
// This function is run before addresses are added to the peerstore.
// It is most useful to avoid adding localhost / local addresses.
func AddressFilter(f func([]ma.Multiaddr) []ma.Multiaddr) Option {
return func(c *dhtcfg.Config) error {
c.AddressFilter = f
return nil
}
}
|
package main
import (
"flag"
"fmt"
"log"
"net/http"
)
var (
addrFlag = flag.String("addr", ":5555", "server address:port")
)
func main() {
flag.Parse()
http.HandleFunc("/", helloWorld)
err := http.ListenAndServe(*addrFlag, nil)
if err != nil {
log.Fatal(err)
}
}
func helloWorld(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "hello world 2.0\n")
}
|
package main
import (
"fmt"
)
const (
url = "https://lpo.dt.navy.mil/data/DM/Environmental_Data_Deep_Moor_2015.txt"
)
func main() {
fmt.Println(url)
}
|
package router
import (
"context"
"net/http"
)
type handler struct {
endpoint Endpoint
decodeReq DecodeRequestFunc
encodeRes EncodeResponseFunc
encodeErr EncodeErrorFunc
}
func NewHandler(e Endpoint, d DecodeRequestFunc, en EncodeResponseFunc, er EncodeErrorFunc) *handler {
h := &handler{
endpoint: e,
decodeReq: d,
encodeRes: en,
encodeErr: er,
}
return h
}
func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
request, err := h.decodeReq(ctx, r)
if err != nil {
h.encodeErr(ctx, err, w)
return
}
response, err := h.endpoint(ctx, request)
if err != nil {
h.encodeErr(ctx, err, w)
return
}
err = h.encodeRes(ctx, w, response)
if err != nil {
h.encodeErr(ctx, err, w)
return
}
}
type DecodeRequestFunc func(context.Context, *http.Request) (request interface{}, err error)
type EncodeResponseFunc func(context.Context, http.ResponseWriter, interface{}) error
type EncodeErrorFunc func(context.Context, error, http.ResponseWriter)
|
// fzgo is a simple prototype of integrating dvyukov/go-fuzz into 'go test'.
//
// See the README at https://github.com/thepudds/fzgo for more details.
//
// There are three main directories used:
//
// 1. cacheDir is the location for the instrumented binary, and would typically be something like:
// GOPATH/pkg/fuzz/linux_amd64/619f7d77e9cd5d7433f8/fmt.FuzzFmt
//
// 2. fuzzDir is the destination supplied by the user via the -fuzzdir argument, and contains the workDir.
//
// 3. workDir is passed to go-fuzz-build and go-fuzz as the -workdir argument:
// if -fuzzdir is not specified: workDir is GOPATH/pkg/fuzz/corpus/<import-path>/<func>
// if -fuzzdir is '/some/path': workDir is /some/path/<import-path>/<func>
// if -fuzzdir is 'testdata': workDir is <pkg-dir>/testdata/fuzz/<func>
package main
import (
"flag"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/thepudds/fzgo/fuzz"
)
var (
flagCompile bool
flagFuzzFunc string
flagFuzzDir string
flagFuzzTime time.Duration
flagParallel int
flagRun string
flagTimeout time.Duration
flagVerbose bool
flagDebug string
)
var flagDefs = []fuzz.FlagDef{
{Name: "fuzz", Ptr: &flagFuzzFunc, Description: "fuzz at most one function matching `regexp`"},
{Name: "fuzzdir", Ptr: &flagFuzzDir, Description: "store fuzz artifacts in `dir` (default pkgpath/testdata/fuzz)"},
{Name: "fuzztime", Ptr: &flagFuzzTime, Description: "fuzz for duration `d` (default unlimited)"},
{Name: "parallel", Ptr: &flagParallel, Description: "start `n` fuzzing operations (default GOMAXPROCS)"},
{Name: "run", Ptr: &flagRun, Description: "if supplied with -fuzz, -run=Corpus/123ABCD executes corpus file matching regexp 123ABCD as a unit test." +
"Otherwise, run normal 'go test' with only those tests and examples matching the regexp."},
{Name: "timeout", Ptr: &flagTimeout, Description: "fail an individual call to a fuzz function after duration `d` (default 10s, minimum 1s)"},
{Name: "c", Ptr: &flagCompile, Description: "compile the instrumented code but do not run it"},
{Name: "v", Ptr: &flagVerbose, Description: "verbose: print additional output"},
{Name: "debug", Ptr: &flagDebug, Description: "comma separated list of debug options; currently only supports 'nomultifuzz'"},
}
// constants for status codes for os.Exit()
const (
Success = 0
OtherErr = 1
ArgErr = 2
)
func main() {
os.Exit(fzgoMain())
}
// fzgoMain implements main(), returning a status code usable by os.Exit() and the testscripts package.
// Success is status code 0.
func fzgoMain() int {
// register our flags
fs, err := fuzz.FlagSet("fzgo test -fuzz", flagDefs, usage)
if err != nil {
fmt.Println("fzgo:", err)
return OtherErr
}
// print our fzgo-specific help for variations like 'fzgo', 'fzgo help', 'fzgo -h', 'fzgo --help', 'fzgo help foo'
if len(os.Args) < 2 || os.Args[1] == "help" {
fs.Usage()
return ArgErr
}
if _, _, ok := fuzz.FindFlag(os.Args[1:2], []string{"h", "help"}); ok {
fs.Usage()
return ArgErr
}
if os.Args[1] != "test" {
// pass through to 'go' command
err = fuzz.ExecGo(os.Args[1:], nil)
if err != nil {
// ExecGo prints error if 'go' tool is not in path.
// Other than that, we currently rely on the 'go' tool to print any errors itself.
return OtherErr
}
return Success
}
// 'test' command is specified.
// check to see if we have a -fuzz flag, and if so, parse the args we will interpret.
pkgPattern, err := fuzz.ParseArgs(os.Args[2:], fs)
if err == flag.ErrHelp {
// if we get here, we already printed usage.
return ArgErr
} else if err != nil {
fmt.Println("fzgo:", err)
return ArgErr
}
if flagFuzzFunc == "" {
// 'fzgo test' without '-fuzz'
// We have not been asked to generate new fuzz-based inputs,
// but will instead:
// 1. we deterministically validate our corpus.
// it might be a subset or a single file if have something like -run=Corpus/01FFABCD.
// we don't try any crashers given those are expected to fail (prior to a fix, of course).
status := verifyCorpus(os.Args,
verifyCorpusOptions{run: flagRun, tryCrashers: false, verbose: flagVerbose})
if status != Success {
return status
}
// Because -fuzz is not set, we also:
// 2. pass our arguments through to the normal 'go' command, which will run normal 'go test'.
if flagFuzzFunc == "" {
err = fuzz.ExecGo(os.Args[1:], nil)
if err != nil {
return OtherErr
}
}
return Success
} else if flagFuzzFunc != "" && flagRun != "" {
//'fzgo test -fuzz=foo -run=bar'
// The -run means we have not been asked to generate new fuzz-based inputs,
// but instead will run our corpus, and possibly any crashers if
// -run matches (e.g., -run=TestCrashers or -run=TestCrashers/02ABCDEF).
// Crashers will only be executed if the -run argument matches.
return verifyCorpus(os.Args,
verifyCorpusOptions{run: flagRun, tryCrashers: true, verbose: flagVerbose})
}
// we now know we have been asked to do fuzzing.
// gather the basic fuzzing settings from our flags.
allowMultiFuzz := flagDebug != "nomultifuzz"
parallel := flagParallel
if parallel == 0 {
parallel = runtime.GOMAXPROCS(0)
}
funcTimeout := flagTimeout
if funcTimeout == 0 {
funcTimeout = 10 * time.Second
} else if funcTimeout < 1*time.Second {
fmt.Printf("fzgo: fuzz function timeout value %s in -timeout flag is less than minimum of 1 second\n", funcTimeout)
return ArgErr
}
// look for the functions we have been asked to fuzz.
functions, err := fuzz.FindFunc(pkgPattern, flagFuzzFunc, nil, allowMultiFuzz)
if err != nil {
fmt.Println("fzgo:", err)
return OtherErr
} else if len(functions) == 0 {
fmt.Printf("fzgo: failed to find fuzz function for pattern %v and func %v\n", pkgPattern, flagFuzzFunc)
return OtherErr
}
if flagVerbose {
var names []string
for _, function := range functions {
names = append(names, function.String())
}
fmt.Printf("fzgo: found functions %s\n", strings.Join(names, ", "))
}
// build our instrumented code, or find if is is already built in the fzgo cache
var targets []fuzz.Target
for _, function := range functions {
target, err := fuzz.Instrument(function, flagVerbose)
if err != nil {
fmt.Println("fzgo:", err)
return OtherErr
}
targets = append(targets, target)
}
if flagCompile {
fmt.Println("fzgo: finished instrumenting binaries")
return Success
}
// run forever if flagFuzzTime was not set (that is, has default value of 0).
loopForever := flagFuzzTime == 0
timeQuantum := 5 * time.Second
for {
for _, target := range targets {
// pull our last bit of info out of our arguments.
workDir := determineWorkDir(target.UserFunc, flagFuzzDir)
// seed our workDir with any other corpus that might exist from other known locations.
// see comment for copyCachedCorpus for discussion of current behavior vs. desired behavior.
if err = copyCachedCorpus(target.UserFunc, workDir); err != nil {
fmt.Println("fzgo:", err)
return OtherErr
}
// determine how long we will execute this particular fuzz invocation.
var fuzzDuration time.Duration
if !loopForever {
fuzzDuration = flagFuzzTime
} else {
if len(targets) > 1 {
fuzzDuration = timeQuantum
} else {
fuzzDuration = 0 // unlimited
}
}
// fuzz!
err = fuzz.Start(target, workDir, fuzzDuration, parallel, funcTimeout, flagVerbose)
if err != nil {
fmt.Println("fzgo:", err)
return OtherErr
}
fmt.Println() // blank separator line at end of one target's fuzz run.
}
// run forever if flagFuzzTime was not set,
// but otherwise break after fuzzing each target once for flagFuzzTime above.
if !loopForever {
break
}
timeQuantum *= 2
if timeQuantum > 10*time.Minute {
timeQuantum = 10 * time.Minute
}
}
return Success
}
type verifyCorpusOptions struct {
run string
tryCrashers bool
verbose bool
}
// verifyCorpus validates our corpus by executing any fuzz functions in our package pattern
// against any files in the corresponding corpus. This is an automatic form of regression test.
// args is os.Args.
func verifyCorpus(args []string, opt verifyCorpusOptions) int {
// we do this by first searching for any fuzz func ("." regexp) in our package pattern.
// TODO: move this elsewhere? Taken from fuzz.ParseArgs, but we can't use fuzz.ParseArgs as is.
// formerly, we used to also obtain nonPkgArgs here and pass them through, but now we effectively
// whitelist what we want to pass through to 'go test' (now including -run and -v).
testPkgPatterns, _, err := fuzz.FindPkgs(args[2:])
if err != nil {
fmt.Println("fzgo:", err)
return OtherErr
}
var testPkgPattern string
if len(testPkgPatterns) > 1 {
fmt.Printf("fzgo: more than one package pattern not allowed: %q", testPkgPatterns)
return ArgErr
} else if len(testPkgPatterns) == 0 {
testPkgPattern = "."
} else {
testPkgPattern = testPkgPatterns[0]
}
functions, err := fuzz.FindFunc(testPkgPattern, flagFuzzFunc, nil, true)
if err != nil {
fmt.Println("fzgo:", err)
return OtherErr
}
status := Success
for _, function := range functions {
// work through how many places we need to check based on
// what the user specified in flagFuzzDir.
var dirsToCheck []string
// we always check the "testdata" dir if it exists.
testdataWorkDir := determineWorkDir(function, "testdata")
dirsToCheck = append(dirsToCheck, testdataWorkDir)
// we also always check under GOPATH/pkg/fuzz/corpus/... if it exists.
gopathPkgWorkDir := determineWorkDir(function, "")
dirsToCheck = append(dirsToCheck, gopathPkgWorkDir)
// see if we need to check elsewhere as well.
if flagFuzzDir == "" {
// nothing else to do; the user did not specify a dir.
} else if flagFuzzDir == "testdata" {
// nothing else to do; we already added testdata dir.
} else {
// the user supplied a destination
userWorkDir := determineWorkDir(function, flagFuzzDir)
dirsToCheck = append(dirsToCheck, userWorkDir)
}
// we have 2 or 3 places to check
foundWorkDir := false
for _, workDir := range dirsToCheck {
if !fuzz.PathExists(filepath.Join(workDir, "corpus")) {
// corpus dir in this workDir does not exist, so skip.
continue
}
foundWorkDir = true
err := fuzz.VerifyCorpus(function, workDir, opt.run, opt.verbose)
if err == fuzz.ErrGoTestFailed {
// 'go test' itself should have printed an informative error,
// so here we just set a non-zero status code and continue.
status = OtherErr
} else if err != nil {
fmt.Println("fzgo:", err)
return OtherErr
}
if opt.tryCrashers {
// This might not end up matching anything based on the -run=foo regexp,
// but we try it anyway and let cmd/go skip executing the test if it doesn't match.
err = fuzz.VerifyCrashers(function, workDir, opt.run, opt.verbose)
if err == fuzz.ErrGoTestFailed {
// Similar to above, 'go test' itself should have printed an informative error.
status = OtherErr
} else if err != nil {
fmt.Println("fzgo:", err)
return OtherErr
}
}
}
if !foundWorkDir {
// TODO: consider emitting a warning? Or too noisy?
// Would be too noisy for cmd/go, but consider for now?
// fmt.Println("fzgo: did not find any corpus location for", function.FuzzName())
}
}
return status
}
// determineWorkDir translates from the user's specified -fuzzdir to an actual
// location on disk, including the default location if the user does not specify a -fuzzdir.
func determineWorkDir(function fuzz.Func, requestedFuzzDir string) string {
var workDir string
importPathDirs := filepath.FromSlash(function.PkgPath) // convert import path into filepath
if requestedFuzzDir == "" {
// default to GOPATH/pkg/fuzz/corpus/import/path/<func>
gp := fuzz.Gopath()
workDir = filepath.Join(gp, "pkg", "fuzz", "corpus", importPathDirs, function.FuncName)
} else if requestedFuzzDir == "testdata" {
// place under the package of interest in the testdata directory.
workDir = filepath.Join(function.PkgDir, "testdata", "fuzz", function.FuncName)
} else {
// requestedFuzzDir was specified to be an actual directory.
// still use the import path to handle fuzzing multiple functions across multiple packages.
workDir = filepath.Join(requestedFuzzDir, importPathDirs, function.FuncName)
}
return workDir
}
// copyCachedCorpus desired bheavior (or at least proposed-by-me behavior):
// 1. if destination corpus location doesn't exist, seed it from GOPATH/pkg/fuzz/corpus/import/path/<fuzzfunc>
// 2. related: fuzz while reading from all known locations that exist (e.g,. testdata if it exists, GOPATH/pkg/fuzz/corpus/...)
//
// However, 2. is not possible currently to do directly with dvyukov/go-fuzz for more than 1 corpus.
//
// Therefore, the current behavior of copyCachedCorpus approximates 1. and 2. like so:
// 1'. always copy all known corpus entries to the destination corpus location in all cases.
//
// Also, that current behavior could be reasonable for the proposed behavior in the sense that it is simple.
// Filenames that already exist in the destination are not updated.
// TODO: it is debatable if it should copy crashers and suppressions as well.
// For clarity, it only copies the corpus directory itself, and not crashers and supressions.
// This avoids making sometone think they have a new crasher after copying a crasher to a new location, for example,
// especially at this current prototype phase where the crasher reporting in
// go-fuzz does not know anything about multi-corpus locations.
func copyCachedCorpus(function fuzz.Func, dstWorkDir string) error {
dstCorpusDir := filepath.Join(dstWorkDir, "corpus")
gopathPkgWorkDir := determineWorkDir(function, "")
testdataWorkDir := determineWorkDir(function, "testdata")
for _, srcWorkDir := range []string{gopathPkgWorkDir, testdataWorkDir} {
srcCorpusDir := filepath.Join(srcWorkDir, "corpus")
if srcCorpusDir == dstCorpusDir {
// nothing to do
continue
}
if fuzz.PathExists(srcCorpusDir) {
// copyDir will create dstDir if needed, and won't overwrite files
// in dstDir that already exist.
if err := fuzz.CopyDir(dstCorpusDir, srcCorpusDir); err != nil {
return fmt.Errorf("failed seeding destination corpus: %v", err)
}
}
}
return nil
}
func usage(fs *flag.FlagSet) func() {
return func() {
fmt.Printf("\nfzgo is a simple prototype of integrating dvyukov/go-fuzz into 'go test'.\n\n")
fmt.Printf("fzgo supports typical go commands such as 'fzgo build', 'fgzo test', or 'fzgo env', and also supports\n")
fmt.Printf("the '-fuzz' flag and several other related flags proposed in https://golang.org/issue/19109.\n\n")
fmt.Printf("Instrumented binaries are automatically cached in GOPATH/pkg/fuzz.\n\n")
fmt.Printf("Sample usage:\n\n")
fmt.Printf(" fzgo test # test the current package\n")
fmt.Printf(" fzgo test -fuzz . # fuzz the current package with a function starting with 'Fuzz'\n")
fmt.Printf(" fzgo test -fuzz FuzzFoo # fuzz the current package with a function matching 'FuzzFoo'\n")
fmt.Printf(" fzgo test ./... -fuzz FuzzFoo # fuzz a package in ./... with a function matching 'FuzzFoo'\n")
fmt.Printf(" fzgo test sample/pkg -fuzz FuzzFoo # fuzz 'sample/pkg' with a function matching 'FuzzFoo'\n\n")
fmt.Printf("The following flags work with 'fzgo test -fuzz':\n\n")
for _, d := range flagDefs {
f := fs.Lookup(d.Name)
argname, usage := flag.UnquoteUsage(f)
fmt.Printf(" -%s %s\n %s\n", f.Name, argname, usage)
}
fmt.Println()
}
}
|
package consumer
type Limiter func(topic string)
|
package main
import (
"flag"
"fmt"
"os"
"sort"
"strings"
)
func newSpellCheck(srcpaths []string, ignfile string) (*Spellcheck, error) {
toks, err := GoTokens(srcpaths)
if err != nil {
return nil, err
}
splitToks := make(map[string]struct{})
for k, _ := range toks {
for _, field := range strings.Fields(k) {
splitToks[field] = struct{}{}
}
}
sp, serr := NewSpellcheck(splitToks, ignfile)
if serr != nil {
return nil, serr
}
return sp, nil
}
func main() {
ign := flag.String("ignore-file", "", "additional words to ignore")
useSpell := flag.Bool("use-spell", true, "check spelling")
useGoDoc := flag.Bool("use-godoc", true, "check godocs")
flag.Parse()
var cps []CheckPipe
if *useSpell {
ignfile := ""
if ign != nil {
ignfile = *ign
}
sp, serr := newSpellCheck(flag.Args(), ignfile)
if serr != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", serr)
os.Exit(1)
}
defer sp.Close()
cps = append(cps, sp.Check())
}
if *useGoDoc {
cps = append(cps, CheckGoDocs)
}
// find all errors
ct, cerr := Check(flag.Args(), cps)
if cerr != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", cerr)
os.Exit(1)
}
sort.Sort(CheckedLexemes(ct))
for _, c := range ct {
fmt.Printf("%s.%d: %s (%s: %s -> %s?)\n",
c.ctok.pos.Filename,
c.ctok.pos.Line,
c.ctok.lit,
c.rule,
c.words[0].word,
c.words[0].suggest)
}
}
|
package main
/**
37. 解数独
编写一个程序,通过已填充的空格来解决数独问题。
一个数独的解法需遵循如下规则:
- 数字 1-9 在每一行只能出现一次。
- 数字 1-9 在每一列只能出现一次。
- 数字 1-9 在每一个以粗实线分隔的 3x3 宫内只能出现一次。
空白格用 '.' 表示。

一个数独。

答案被标成红色。
Note:
- 给定的数独序列只包含数字 1-9 和字符 '.' 。
- 你可以假设给定的数独只有唯一解。
- 给定数独永远是 9x9 形式的。
*/
/**
...
*/
func SolveSudoku(board [][]byte) {
// nothing to do.
}
|
package utils
import (
"testing"
_ "gin-vue-admin/config"
)
func TestSendSMS(t *testing.T) {
SendShotMessage("13718320428", "123456")
}
|
package controllers
import (
"context"
"github.com/superbet-group/code-cadets-2021/homework_4/02_bets_api/internal/api/controllers/models"
)
type BetResponse interface {
GetBetById(ctx context.Context, id string) (models.BetResponseDto, bool, error)
GetBetsByUser(ctx context.Context, userId string) ([]models.BetResponseDto, error)
GetBetsByStatus(ctx context.Context, status string) ([]models.BetResponseDto, error)
}
|
package mhfpacket
import (
"errors"
"github.com/Andoryuuta/Erupe/network"
"github.com/Andoryuuta/Erupe/network/clientctx"
"github.com/Andoryuuta/byteframe"
)
// MsgMhfGetEarthValue represents the MSG_MHF_GET_EARTH_VALUE
type MsgMhfGetEarthValue struct {
AckHandle uint32
Unk0 uint32
Unk1 uint32
ReqType uint32
Unk3 uint32
Unk4 uint32
Unk5 uint32
Unk6 uint32
}
// Opcode returns the ID associated with this packet type.
func (m *MsgMhfGetEarthValue) Opcode() network.PacketID {
return network.MSG_MHF_GET_EARTH_VALUE
}
// Parse parses the packet from binary
func (m *MsgMhfGetEarthValue) Parse(bf *byteframe.ByteFrame, ctx *clientctx.ClientContext) error {
m.AckHandle = bf.ReadUint32()
m.Unk0 = bf.ReadUint32()
m.Unk1 = bf.ReadUint32()
m.ReqType = bf.ReadUint32()
m.Unk3 = bf.ReadUint32()
m.Unk4 = bf.ReadUint32()
m.Unk5 = bf.ReadUint32()
m.Unk6 = bf.ReadUint32()
return nil
}
// Build builds a binary packet from the current data.
func (m *MsgMhfGetEarthValue) Build(bf *byteframe.ByteFrame, ctx *clientctx.ClientContext) error {
return errors.New("Not implemented")
}
|
package main
import "code.google.com/p/go-tour/pic"
func Pic(dx, dy int) [][]uint8 {
ret := make([][]uint8, dy)
for i:=0; i<len(ret); i++ {
ret[i] = make([]uint8, dx)
}
return ret
}
func main() {
pic.Show(Pic)
}
|
package main
import (
"fmt"
"log"
//"sync"
"os"
"time"
"os/exec"
"strconv"
"bufio"
)
var usage = ` Usage: autoscaler [options] <url>
Options:
-h Custom Web Cluster Address, name1:value1
-r request rate (sent per second) threshold
-c cpu usage threshold
-t response time (average in one second) threshold
`
var totalServer int
var webAddress string
var kupper float32
var rateupper float32
var ttlupper float32
var ttllower float32
//var modelType chan int
var curTTL chan float32
var curRate chan float32
var proRate chan float32
var scaleType chan int
var monRate chan float32
var numServer chan int
//var kRate chan float32
var hWin []float32
var pWin []float32
func calave(win []float32) (ave float32) {
var sum float32
for i := 0; i < len(win) - 1; i++ {
sum += win[i]
}
return sum / float32(len(win) - 1)
}
func monitorttl() {
go func() {
cmd := exec.Command("/bin/bash", "-C", "./watchttlave.sh")
if _, err := cmd.Output(); err != nil {
log.Println(err)
}
} ()
time.Sleep(time.Duration(1) * time.Second)
fTTL, err := os.Open("./ttlave")
if err != nil {
log.Println(err)
}
reader := bufio.NewReader(fTTL)
scanner := bufio.NewScanner(reader)
scanner.Split(bufio.ScanLines)
/* ttlWin := make([]float32, 0, 2700)
var count int64
*/
var ave float32
for scanner.Scan() {
str := scanner.Text()
n, _ := strconv.ParseFloat(str, 64)
//ttlWin = append(ttlWin, float32(n))
/* ttlWin[count] = float32(n)
count++
if count == 2700 {
ave = calave(ttlWin)
count = 0
curTTL<-ave
time.Sleep(time.Duration(freq) * time.Second)
}
*/
ave = float32(n)
curTTL<- ave
}
}
func monitorrate() {
time.Sleep(time.Duration(1) * time.Second)
fRate, err := os.Open("./rate")
if err != nil {
log.Println(err)
}
reader := bufio.NewReader(fRate)
scanner := bufio.NewScanner(reader)
scanner.Split(bufio.ScanLines)
rateWin := make([]float32, 1)
for scanner.Scan() {
str := scanner.Text()
n, _ := strconv.ParseFloat(str, 64)
rateWin[0] = float32(n)
curRate<- rateWin[0]
// time.Sleep(time.Duration(freq) * time.Second)
}
}
func monitor() {
log.Println("Monitor is Running")
//var cr float64
//var ct float64
go func() {
monitorttl()
/* cmd := exec.Command("./Monitorttl")
re, err := cmd.Output()
if err != nil {
log.Println(err)
}
ct, _ = strconv.ParseFloat(re, 32)
curTTL<- float32(ct)
*/
}()
go func() {
/* cmd := exec.Command("./Monitorate")
re, err := cmd.Output()
if err != nil {
log.Println(err)
}
cr, _ = strconv.ParseFloat(string(re), 32)
curRate<- float32(cr)
*/
monitorrate()
}()
}
func Calk() (k float32) {
//var k float32
/* var sum float32
for i := len(pWin) - 1; i > len(pWin) - 4; i-- {
sum += (pWin[i] - pWin[i - 1]) / pWin[i - 1]
}
k = sum / float32(len(pWin) - 1)
*/
k = (pWin[len(pWin) - 1] - pWin[len(pWin) - 2]) / pWin[len(pWin) - 2]
return k
//kRate <- k
}
func CalNum(cr float32) {
var n int
rateupper = rateupper * float32(totalServer)
n = int(cr / rateupper)
numServer<-n
}
func addToSlice(win []float32, size int, n float32) {
win = append(win ,n)
win = win[1: size]
}
func alerter(freq int64) {
log.Println("Alerter is Running")
var k float32
var pr float32
var cr float32
var ct float32
var tpr float32
var tcr float32
var tct float32
var prcnt int64
var crcnt int64
var ctcnt int64
for {
select {
//case: k = <-kRate
case pr = <-proRate: {
prcnt++
if prcnt == freq {
tpr = pr
prcnt = 0
}
}
case cr = <-curRate: {
crcnt++
if crcnt == freq {
addToSlice(hWin, 10, cr)
addToSlice(pWin, 4, cr)
tcr = cr
crcnt = 0
}
}
case ct = <-curTTL: {
ctcnt++
if ctcnt == freq {
tct = ct
ctcnt = 0
}
}
}
//hWin = append(hWin, cr)
//pWin = append(pWin, cr)
if tcr > rateupper || tpr > rateupper || tct > ttlupper {
CalNum(cr)
k = Calk()
if k > kupper {
scaleType <- 2
}else {
scaleType <- 1
}
}
if tct < ttllower {
scaleType <- 3
}
}
}
func modeler(t int) {
log.Println("Moderler is Running")
var pr float32
//t = 1 // 1: MA 2: AR
if t == 1 {
i := len(hWin) - 1
pr = (hWin[i] + hWin[i - 1]) / float32(2)
proRate<- pr
}
p := 384.7019274542
a1 := -0.1576960777
a2 := -0.5020923248
a3 := -0.6671023113
if t == 2 {
pr = float32(float64(hWin[9]) * a1 + float64(hWin[8]) * a2 + float64(hWin[7]) * a3 + p)
proRate<- pr
}
}
func scale(t int, n int) {
if t == 1 {
cmd := exec.Command("./ScaleVmUp", strconv.Itoa(n))
if _, err := cmd.Output(); err != nil {
log.Println(err)
}
totalServer += n
time.Sleep(time.Duration(60) * time.Second)
}
if t == 2 {
cmd := exec.Command("./ScaleDockerUp", strconv.Itoa(n))
if _, err := cmd.Output(); err != nil {
log.Println(err)
}
time.Sleep(time.Duration(15) * time.Second)
totalServer += n
}
if t == 3 {
cmd := exec.Command("./ScaleDown", strconv.Itoa(1))
if _, err := cmd.Output(); err != nil {
log.Println(err)
}
totalServer -= 1
time.Sleep(time.Duration(15) * time.Second)
}
}
func scaler() {
log.Println("Scaler is Running")
for {
select {
case t := <- scaleType:
{
n := <- numServer
scale(t, n)
}
}
}
}
func UsageAndExit() {
fmt.Fprint(os.Stderr, usage)
os.Exit(1)
}
func main() {
if len(os.Args) != 3 {
UsageAndExit()
}
/* var wg sync.WaitGroup
wg.Add(4)
wg.Wait()
*/
totalServer = 1
webAddress = os.Args[3]
kupper = 0.5
rateupper = 6000.0
ttlupper = 1.0
ttllower = 0.3
// modelType = make(chan int)
curTTL = make(chan float32)
curRate = make(chan float32)
proRate = make(chan float32)
scaleType = make(chan int)
monRate = make(chan float32, 10)
numServer = make(chan int)
//kRate = make(chan float32)
hWin = make([]float32, 0, 10)
pWin = make([]float32, 0, 4)
go modeler(1)
go scaler()
go alerter(15)
go monitor()
}
|
package tccpoutputs
import (
"context"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/giantswarm/microerror"
"github.com/giantswarm/aws-operator/service/controller/legacy/v25/cloudformation"
"github.com/giantswarm/aws-operator/service/controller/legacy/v25/controllercontext"
"github.com/giantswarm/aws-operator/service/controller/legacy/v25/key"
)
const (
HostedZoneNameServersKey = "HostedZoneNameServers"
VPCIDKey = "VPCID"
VPCPeeringConnectionIDKey = "VPCPeeringConnectionID"
WorkerASGNameKey = "WorkerASGName"
)
func (r *Resource) EnsureCreated(ctx context.Context, obj interface{}) error {
cr, err := key.ToCustomObject(obj)
if err != nil {
return microerror.Mask(err)
}
cc, err := controllercontext.FromContext(ctx)
if err != nil {
return microerror.Mask(err)
}
var cloudFormation *cloudformation.CloudFormation
{
c := cloudformation.Config{
Client: cc.Client.TenantCluster.AWS.CloudFormation,
}
cloudFormation, err = cloudformation.New(c)
if err != nil {
return microerror.Mask(err)
}
}
var outputs []cloudformation.Output
{
r.logger.LogCtx(ctx, "level", "debug", "message", "finding the tenant cluster cloud formation stack outputs")
o, s, err := cloudFormation.DescribeOutputsAndStatus(key.MainGuestStackName(cr))
if cloudformation.IsStackNotFound(err) {
r.logger.LogCtx(ctx, "level", "debug", "message", "did not find the tenant cluster cloud formation stack outputs")
r.logger.LogCtx(ctx, "level", "debug", "message", "the tenant cluster cloud formation stack does not exist")
r.logger.LogCtx(ctx, "level", "debug", "message", "canceling resource")
return nil
} else if cloudformation.IsOutputsNotAccessible(err) {
r.logger.LogCtx(ctx, "level", "debug", "message", "did not find the tenant cluster cloud formation stack outputs")
r.logger.LogCtx(ctx, "level", "debug", "message", fmt.Sprintf("the tenant cluster main cloud formation stack output values are not accessible due to stack status %#q", s))
r.logger.LogCtx(ctx, "level", "debug", "message", "canceling resource")
cc.Status.TenantCluster.TCCP.IsTransitioning = true
return nil
} else if err != nil {
return microerror.Mask(err)
}
outputs = o
r.logger.LogCtx(ctx, "level", "debug", "message", "found the tenant cluster cloud formation stack outputs")
}
{
v, err := cloudFormation.GetOutputValue(outputs, key.DockerVolumeResourceNameKey)
if err != nil {
return microerror.Mask(err)
}
cc.Status.TenantCluster.MasterInstance.DockerVolumeResourceName = v
}
if r.route53Enabled {
v, err := cloudFormation.GetOutputValue(outputs, HostedZoneNameServersKey)
if err != nil {
return microerror.Mask(err)
}
cc.Status.TenantCluster.HostedZoneNameServers = v
}
{
v, err := cloudFormation.GetOutputValue(outputs, key.MasterImageIDKey)
if err != nil {
return microerror.Mask(err)
}
cc.Status.TenantCluster.MasterInstance.Image = v
}
{
v, err := cloudFormation.GetOutputValue(outputs, key.MasterInstanceResourceNameKey)
if err != nil {
return microerror.Mask(err)
}
cc.Status.TenantCluster.MasterInstance.ResourceName = v
}
{
v, err := cloudFormation.GetOutputValue(outputs, key.MasterInstanceTypeKey)
if err != nil {
return microerror.Mask(err)
}
cc.Status.TenantCluster.MasterInstance.Type = v
}
{
v, err := cloudFormation.GetOutputValue(outputs, key.MasterCloudConfigVersionKey)
if err != nil {
return microerror.Mask(err)
}
cc.Status.TenantCluster.MasterInstance.CloudConfigVersion = v
}
{
v, err := cloudFormation.GetOutputValue(outputs, WorkerASGNameKey)
if err != nil {
return microerror.Mask(err)
}
cc.Status.TenantCluster.TCCP.ASG.Name = v
}
{
v, err := cloudFormation.GetOutputValue(outputs, key.VersionBundleVersionKey)
if err != nil {
return microerror.Mask(err)
}
cc.Status.TenantCluster.VersionBundleVersion = v
}
{
v, err := cloudFormation.GetOutputValue(outputs, VPCIDKey)
if cloudformation.IsOutputNotFound(err) {
// TODO this exception is necessary for clusters upgrading from v24 to
// v25. The code can be cleaned up in v26 and the controller context value
// assignment can be managed like the other examples below.
//
// https://github.com/giantswarm/giantswarm/issues/5570
//
v, err := searchVPCID(cc.Client.TenantCluster.AWS.EC2, key.ClusterID(cr))
if err != nil {
return microerror.Mask(err)
}
cc.Status.TenantCluster.TCCP.VPC.ID = v
} else if err != nil {
return microerror.Mask(err)
} else {
cc.Status.TenantCluster.TCCP.VPC.ID = v
}
}
{
v, err := cloudFormation.GetOutputValue(outputs, VPCPeeringConnectionIDKey)
if cloudformation.IsOutputNotFound(err) {
// TODO this exception is necessary for clusters upgrading from v23 to
// v24. The code can be cleaned up in v25 and the controller context value
// assignment can be managed like the other examples below.
//
// https://github.com/giantswarm/giantswarm/issues/5496
//
v, err := searchPeeringConnectionID(cc.Client.TenantCluster.AWS.EC2, key.ClusterID(cr))
if err != nil {
return microerror.Mask(err)
}
cc.Status.TenantCluster.TCCP.VPC.PeeringConnectionID = v
} else if err != nil {
return microerror.Mask(err)
} else {
cc.Status.TenantCluster.TCCP.VPC.PeeringConnectionID = v
}
}
{
v, err := cloudFormation.GetOutputValue(outputs, key.WorkerCloudConfigVersionKey)
if err != nil {
return microerror.Mask(err)
}
cc.Status.TenantCluster.WorkerInstance.CloudConfigVersion = v
}
{
v, err := cloudFormation.GetOutputValue(outputs, key.WorkerDockerVolumeSizeKey)
if err != nil {
return microerror.Mask(err)
}
cc.Status.TenantCluster.WorkerInstance.DockerVolumeSizeGB = v
}
{
v, err := cloudFormation.GetOutputValue(outputs, key.WorkerImageIDKey)
if err != nil {
return microerror.Mask(err)
}
cc.Status.TenantCluster.WorkerInstance.Image = v
}
{
v, err := cloudFormation.GetOutputValue(outputs, key.WorkerInstanceTypeKey)
if err != nil {
return microerror.Mask(err)
}
cc.Status.TenantCluster.WorkerInstance.Type = v
}
return nil
}
func searchPeeringConnectionID(client EC2, clusterID string) (string, error) {
var peeringID string
{
i := &ec2.DescribeVpcPeeringConnectionsInput{
Filters: []*ec2.Filter{
{
Name: aws.String("status-code"),
Values: []*string{
aws.String("active"),
},
},
{
Name: aws.String("tag:Name"),
Values: []*string{
aws.String(clusterID),
},
},
},
}
o, err := client.DescribeVpcPeeringConnections(i)
if err != nil {
return "", microerror.Mask(err)
}
if len(o.VpcPeeringConnections) != 1 {
return "", microerror.Maskf(executionFailedError, "expected one vpc peering connection, got %d", len(o.VpcPeeringConnections))
}
peeringID = *o.VpcPeeringConnections[0].VpcPeeringConnectionId
}
return peeringID, nil
}
func searchVPCID(client EC2, clusterID string) (string, error) {
var vpcID string
{
i := &ec2.DescribeVpcsInput{
Filters: []*ec2.Filter{
{
Name: aws.String("tag:Name"),
Values: []*string{
aws.String(clusterID),
},
},
},
}
o, err := client.DescribeVpcs(i)
if err != nil {
return "", microerror.Mask(err)
}
if len(o.Vpcs) != 1 {
return "", microerror.Maskf(executionFailedError, "expected one vpc, got %d", len(o.Vpcs))
}
vpcID = *o.Vpcs[0].VpcId
}
return vpcID, nil
}
|
package core
type Language uint8
const (
English Language = iota
)
func (lang Language) toStopwordsCode() *string {
switch lang {
case English:
code := new(string)
*code = "en"
return code
default:
return nil
}
}
func (lang Language) toSnowballCode() *string {
switch lang {
case English:
code := new(string)
*code = "english"
return code
default:
return nil
}
}
|
package logs
import "unicode/utf8"
// Application identifies the application emitting the given log.
func Application(log string) string {
for _, char := range log {
switch {
case char == '❗':
return "recommendation"
case char == '🔍':
return "search"
case char == '☀':
return "weather"
}
}
return "default"
}
// Replace replaces all occurances of old with new, returning the modified log
// to the caller.
func Replace(log string, old, new rune) string {
l := ""
for _, char := range log {
if char == old {
l += string(new)
} else {
l += string(char)
}
}
return l
}
// WithinLimit determines whether or not the number of characters in log is
// within the limit.
func WithinLimit(log string, limit int) bool {
return utf8.RuneCountInString(log) <= limit
}
|
package oidcsdk
import "gopkg.in/square/go-jose.v2"
type IClient interface {
GetID() string
GetSecret() string
IsPublic() bool
GetIDTokenSigningAlg() jose.SignatureAlgorithm
GetRedirectURIs() []string
GetPostLogoutRedirectURIs() []string
GetApprovedScopes() Arguments
GetApprovedGrantTypes() Arguments
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.