text
stringlengths 11
4.05M
|
|---|
package engine
import (
"github.com/stretchr/testify/assert"
"github.com/zhenghaoz/gorse/core"
"os"
"path"
"testing"
)
func TestDB_InsertGetFeedback(t *testing.T) {
// Create database
db, err := Open(path.Join(core.TempDir, "/test_feedback.db"))
if err != nil {
t.Fatal(err)
}
// Insert feedback
users := []int{0, 1, 2, 3, 4}
items := []int{0, 2, 4, 6, 8}
feedback := []float64{0, 3, 6, 9, 12}
for i := range users {
if err := db.InsertFeedback(users[i], items[i], feedback[i]); err != nil {
t.Fatal(err)
}
}
// Count feedback
count, err := db.CountFeedback()
if err != nil {
t.Fatal(err)
}
assert.Equal(t, 5, count)
// Get feedback
retUsers, retItems, retFeedback, err := db.GetFeedback()
if err != nil {
t.Fatal(err)
}
assert.Equal(t, users, retUsers)
assert.Equal(t, items, retItems)
assert.Equal(t, feedback, retFeedback)
// Clean database
if err = os.Remove(path.Join(core.TempDir, "/test_feedback.db")); err != nil {
t.Fatal(err)
}
}
func TestDB_InsertGetItem(t *testing.T) {
// Create database
db, err := Open(path.Join(core.TempDir, "/test_items.db"))
if err != nil {
t.Fatal(err)
}
// Insert feedback
items := []int{0, 2, 4, 6, 8}
for _, itemId := range items {
if err := db.InsertItem(itemId); err != nil {
t.Fatal(err)
}
}
// Count feedback
count, err := db.CountItems()
if err != nil {
t.Fatal(err)
}
assert.Equal(t, 5, count)
// Get feedback
retItems, err := db.GetItems()
if err != nil {
t.Fatal(err)
}
assert.Equal(t, items, retItems)
// Clean database
if err = os.Remove(path.Join(core.TempDir, "/test_items.db")); err != nil {
t.Fatal(err)
}
}
func TestDB_SetGetMeta(t *testing.T) {
// Create database
db, err := Open(path.Join(core.TempDir, "/test_meta.db"))
if err != nil {
t.Fatal(err)
}
// Set meta
if err = db.SetMeta("1", "2"); err != nil {
t.Fatal(err)
}
// Get meta
value, err := db.GetMeta("1")
if err != nil {
t.Fatal(err)
}
assert.Equal(t, "2", value)
// Get meta not existed
value, err = db.GetMeta("NULL")
if err != nil {
t.Fatal(err)
}
assert.Equal(t, "", value)
// Clean database
if err = os.Remove(path.Join(core.TempDir, "/test_meta.db")); err != nil {
t.Fatal(err)
}
}
func TestDB_GetRandom(t *testing.T) {
// Create database
db, err := Open(path.Join(core.TempDir, "/test_random.db"))
if err != nil {
t.Fatal(err)
}
// Insert feedback
items := []int{0, 2, 4, 6, 8}
for _, itemId := range items {
if err := db.InsertItem(itemId); err != nil {
t.Fatal(err)
}
}
// Test multiple times
for i := 0; i < 3; i++ {
// Sample all
retItems, err := db.GetRandom(10)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, []RecommendedItem{{ItemId: 0}, {ItemId: 2}, {ItemId: 4}, {ItemId: 6}, {ItemId: 8}}, retItems)
// Sample part
items1, err := db.GetRandom(3)
if err != nil {
t.Fatal(err)
}
items2, err := db.GetRandom(3)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, 3, len(items1))
assert.Equal(t, 3, len(items2))
assert.NotEqual(t, items1, items2)
}
// Clean database
if err = os.Remove(path.Join(core.TempDir, "/test_random.db")); err != nil {
t.Fatal(err)
}
}
func TestDB_SetGetRecommends(t *testing.T) {
// Create database
db, err := Open(path.Join(core.TempDir, "/test_recommends.db"))
if err != nil {
t.Fatal(err)
}
// Put recommends
items := []RecommendedItem{{0, 0}, {1, 1}, {2, 2}, {3, 3}, {4, 4}}
if err = db.SetRecommends(0, items); err != nil {
t.Fatal(err)
}
// Get recommends
retItems, err := db.GetRecommends(0, 0)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, items, retItems)
// Get n recommends
nItems, err := db.GetRecommends(0, 3)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, items[:3], nItems)
// Test new user
if _, err = db.GetRecommends(1, 0); err == nil {
t.Fatal("error is expected for new user")
}
// Clean database
if err = os.Remove(path.Join(core.TempDir, "/test_recommends.db")); err != nil {
t.Fatal(err)
}
}
func TestDB_SetGetNeighbors(t *testing.T) {
// Create database
db, err := Open(path.Join(core.TempDir, "/test_neighbors.db"))
if err != nil {
t.Fatal(err)
}
// Put neighbors
items := []RecommendedItem{{0, 0}, {1, 1}, {2, 2}, {3, 3}, {4, 4}}
if err = db.SetNeighbors(0, items); err != nil {
t.Fatal(err)
}
// Get neighbors
retItems, err := db.GetNeighbors(0, 0)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, items, retItems)
// Get n neighbors
nItems, err := db.GetNeighbors(0, 3)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, items[:3], nItems)
// Test new user
if _, err = db.GetNeighbors(1, 0); err == nil {
t.Fatal("error is expected for new user")
}
// Clean database
if err = os.Remove(path.Join(core.TempDir, "/test_neighbors.db")); err != nil {
t.Fatal(err)
}
}
func TestDB_SetGetPopular(t *testing.T) {
// Create database
db, err := Open(path.Join(core.TempDir, "/test_popular.db"))
if err != nil {
t.Fatal(err)
}
// Put neighbors
items := []RecommendedItem{{0, 0}, {1, 1}, {2, 2}, {3, 3}, {4, 4}}
if err = db.SetPopular(items); err != nil {
t.Fatal(err)
}
// Get neighbors
retItems, err := db.GetPopular(0)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, items, retItems)
// Get n neighbors
nItems, err := db.GetPopular(3)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, items[:3], nItems)
// Clean database
if err = os.Remove(path.Join(core.TempDir, "/test_popular.db")); err != nil {
t.Fatal(err)
}
}
func TestDB_ToDataSet(t *testing.T) {
// Create database
db, err := Open(path.Join(core.TempDir, "/test_to_dataset.db"))
if err != nil {
t.Fatal(err)
}
// Load data
if err = db.LoadFeedbackFromCSV("../example/file_data/feedback_explicit_header.csv", ",", true); err != nil {
t.Fatal(err)
}
// To dataset
dataSet, err := db.ToDataSet()
if err != nil {
t.Fatal(err)
}
assert.Equal(t, 5, dataSet.Count())
assert.Equal(t, 5, dataSet.UserCount())
assert.Equal(t, 5, dataSet.ItemCount())
// Clean database
if err = os.Remove(path.Join(core.TempDir, "/test_to_dataset.db")); err != nil {
t.Fatal(err)
}
}
func TestDB_LoadFeedbackFromCSV(t *testing.T) {
// Create database
db, err := Open(path.Join(core.TempDir, "/test_load_feedback.db"))
if err != nil {
t.Fatal(err)
}
// Load data
if err = db.LoadFeedbackFromCSV("../example/file_data/feedback_explicit_header.csv", ",", true); err != nil {
t.Fatal(err)
}
// Count feedback
count, err := db.CountFeedback()
if err != nil {
t.Fatal(err)
}
assert.Equal(t, 5, count)
// Check data
users, items, feedback, err := db.GetFeedback()
if err != nil {
t.Fatal(err)
}
for i := 0; i < count; i++ {
assert.Equal(t, i, users[i])
assert.Equal(t, 2*i, items[i])
assert.Equal(t, 3*i, int(feedback[i]))
}
// Count feedback
count, err = db.CountItems()
if err != nil {
t.Fatal(err)
}
assert.Equal(t, 5, count)
// Check data
items, err = db.GetItems()
if err != nil {
t.Fatal(err)
}
for i := 0; i < count; i++ {
assert.Equal(t, 2*i, items[i])
}
// Clean database
if err = os.Remove(path.Join(core.TempDir, "/test_load_feedback.db")); err != nil {
t.Fatal(err)
}
}
func TestDB_LoadItemsFromCSV(t *testing.T) {
// Create database
db, err := Open(path.Join(core.TempDir, "/test_load_items.db"))
if err != nil {
t.Fatal(err)
}
// Load data
if err = db.LoadItemsFromCSV("../example/file_data/items.csv", "::", false); err != nil {
t.Fatal(err)
}
// Count feedback
count, err := db.CountItems()
if err != nil {
t.Fatal(err)
}
assert.Equal(t, 5, count)
// Check data
items, err := db.GetItems()
if err != nil {
t.Fatal(err)
}
for i := 0; i < count; i++ {
assert.Equal(t, 1+i, items[i])
}
// Clean database
if err = os.Remove(path.Join(core.TempDir, "/test_load_items.db")); err != nil {
t.Fatal(err)
}
}
func TestDB_SaveFeedbackToCSV(t *testing.T) {
// Create database
db, err := Open(path.Join(core.TempDir, "/test_save_feedback.db"))
if err != nil {
t.Fatal(err)
}
// Load data
if err = db.LoadFeedbackFromCSV("../example/file_data/feedback_explicit_header.csv", ",", true); err != nil {
t.Fatal(err)
}
// Save data
if err = db.SaveFeedbackToCSV(path.Join(core.TempDir, "test_save_feedback.csv"), ",", false); err != nil {
t.Fatal(err)
}
// Check data
data := core.LoadDataFromCSV(path.Join(core.TempDir, "test_save_feedback.csv"), ",", false)
assert.Equal(t, 5, data.Count())
for i := 0; i < data.Count(); i++ {
userId, itemId, value := data.Get(i)
userIndex, itemIndex, _ := data.GetWithIndex(i)
assert.Equal(t, i, userId)
assert.Equal(t, 2*i, itemId)
assert.Equal(t, 3*i, int(value))
assert.Equal(t, i, userIndex)
assert.Equal(t, i, itemIndex)
}
// Clean database
if err = os.Remove(path.Join(core.TempDir, "/test_save_feedback.db")); err != nil {
t.Fatal(err)
}
}
func TestDB_SaveItemsToCSV(t *testing.T) {
// Create database
db, err := Open(path.Join(core.TempDir, "/test_save_items.db"))
if err != nil {
t.Fatal(err)
}
// Load data
if err = db.LoadItemsFromCSV("../example/file_data/items.csv", "::", false); err != nil {
t.Fatal(err)
}
// Save data
if err = db.SaveItemsToCSV(path.Join(core.TempDir, "test_save_items.csv"), "::", false); err != nil {
t.Fatal(err)
}
// Check data
entities := core.LoadEntityFromCSV(path.Join(core.TempDir, "test_save_items.csv"), "::", "|", false,
[]string{"ItemId"}, 0)
expected := []map[string]interface{}{
{"ItemId": 1},
{"ItemId": 2},
{"ItemId": 3},
{"ItemId": 4},
{"ItemId": 5},
}
assert.Equal(t, expected, entities)
// Clean database
if err = os.Remove(path.Join(core.TempDir, "/test_save_items.db")); err != nil {
t.Fatal(err)
}
}
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rollout
import (
"context"
"fmt"
"io"
"github.com/pkg/errors"
k8stypes "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/pkg/oam"
kruisev1alpha1 "github.com/openkruise/rollouts/api/v1alpha1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/multicluster"
"github.com/oam-dev/kubevela/pkg/resourcetracker"
velaerrors "github.com/oam-dev/kubevela/pkg/utils/errors"
)
// ClusterRollout rollout in specified cluster
type ClusterRollout struct {
*kruisev1alpha1.Rollout
Cluster string
}
func getAssociatedRollouts(ctx context.Context, cli client.Client, app *v1beta1.Application, withHistoryRTs bool) ([]*ClusterRollout, error) {
rootRT, currentRT, historyRTs, _, err := resourcetracker.ListApplicationResourceTrackers(ctx, cli, app)
if err != nil {
return nil, errors.Wrapf(err, "failed to list resource trackers")
}
if !withHistoryRTs {
historyRTs = []*v1beta1.ResourceTracker{}
}
var rollouts []*ClusterRollout
for _, rt := range append(historyRTs, rootRT, currentRT) {
if rt == nil {
continue
}
for _, mr := range rt.Spec.ManagedResources {
if mr.APIVersion == kruisev1alpha1.SchemeGroupVersion.String() && mr.Kind == "Rollout" {
rollout := &kruisev1alpha1.Rollout{}
if err = cli.Get(multicluster.ContextWithClusterName(ctx, mr.Cluster), k8stypes.NamespacedName{Namespace: mr.Namespace, Name: mr.Name}, rollout); err != nil {
if multicluster.IsNotFoundOrClusterNotExists(err) || velaerrors.IsCRDNotExists(err) {
continue
}
return nil, errors.Wrapf(err, "failed to get kruise rollout %s/%s in cluster %s", mr.Namespace, mr.Name, mr.Cluster)
}
if value, ok := rollout.Annotations[oam.AnnotationSkipResume]; ok && value == "true" {
continue
}
rollouts = append(rollouts, &ClusterRollout{Rollout: rollout, Cluster: mr.Cluster})
}
}
}
return rollouts, nil
}
// SuspendRollout find all rollouts associated with the application (including history RTs) and resume them
func SuspendRollout(ctx context.Context, cli client.Client, app *v1beta1.Application, writer io.Writer) error {
rollouts, err := getAssociatedRollouts(ctx, cli, app, true)
if err != nil {
return err
}
for i := range rollouts {
rollout := rollouts[i]
if rollout.Status.Phase == kruisev1alpha1.RolloutPhaseProgressing && !rollout.Spec.Strategy.Paused {
_ctx := multicluster.ContextWithClusterName(ctx, rollout.Cluster)
rolloutKey := client.ObjectKeyFromObject(rollout.Rollout)
if err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
if err = cli.Get(_ctx, rolloutKey, rollout.Rollout); err != nil {
return err
}
if rollout.Status.Phase == kruisev1alpha1.RolloutPhaseProgressing && !rollout.Spec.Strategy.Paused {
rollout.Spec.Strategy.Paused = true
if err = cli.Update(_ctx, rollout.Rollout); err != nil {
return err
}
if writer != nil {
_, _ = fmt.Fprintf(writer, "Rollout %s/%s in cluster %s suspended.\n", rollout.Namespace, rollout.Name, rollout.Cluster)
}
return nil
}
return nil
}); err != nil {
return errors.Wrapf(err, "failed to suspend rollout %s/%s in cluster %s", rollout.Namespace, rollout.Name, rollout.Cluster)
}
}
}
return nil
}
// ResumeRollout find all rollouts associated with the application (in the current RT) and resume them
func ResumeRollout(ctx context.Context, cli client.Client, app *v1beta1.Application, writer io.Writer) (bool, error) {
rollouts, err := getAssociatedRollouts(ctx, cli, app, false)
if err != nil {
return false, err
}
modified := false
for i := range rollouts {
rollout := rollouts[i]
if rollout.Spec.Strategy.Paused || (rollout.Status.CanaryStatus != nil && rollout.Status.CanaryStatus.CurrentStepState == kruisev1alpha1.CanaryStepStatePaused) {
_ctx := multicluster.ContextWithClusterName(ctx, rollout.Cluster)
rolloutKey := client.ObjectKeyFromObject(rollout.Rollout)
resumed := false
if err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
if err = cli.Get(_ctx, rolloutKey, rollout.Rollout); err != nil {
return err
}
if rollout.Spec.Strategy.Paused {
rollout.Spec.Strategy.Paused = false
if err = cli.Update(_ctx, rollout.Rollout); err != nil {
return err
}
resumed = true
return nil
}
return nil
}); err != nil {
return false, errors.Wrapf(err, "failed to resume rollout %s/%s in cluster %s", rollout.Namespace, rollout.Name, rollout.Cluster)
}
if err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
if err = cli.Get(_ctx, rolloutKey, rollout.Rollout); err != nil {
return err
}
if rollout.Status.CanaryStatus != nil && rollout.Status.CanaryStatus.CurrentStepState == kruisev1alpha1.CanaryStepStatePaused {
rollout.Status.CanaryStatus.CurrentStepState = kruisev1alpha1.CanaryStepStateReady
if err = cli.Status().Update(_ctx, rollout.Rollout); err != nil {
return err
}
resumed = true
return nil
}
return nil
}); err != nil {
return false, errors.Wrapf(err, "failed to resume rollout %s/%s in cluster %s", rollout.Namespace, rollout.Name, rollout.Cluster)
}
if resumed {
modified = true
if writer != nil {
_, _ = fmt.Fprintf(writer, "Rollout %s/%s in cluster %s resumed.\n", rollout.Namespace, rollout.Name, rollout.Cluster)
}
}
}
}
return modified, nil
}
// RollbackRollout find all rollouts associated with the application (in the current RT) and disable the pause field.
func RollbackRollout(ctx context.Context, cli client.Client, app *v1beta1.Application, writer io.Writer) (bool, error) {
rollouts, err := getAssociatedRollouts(ctx, cli, app, false)
if err != nil {
return false, err
}
modified := false
for i := range rollouts {
rollout := rollouts[i]
if rollout.Spec.Strategy.Paused || (rollout.Status.CanaryStatus != nil && rollout.Status.CanaryStatus.CurrentStepState == kruisev1alpha1.CanaryStepStatePaused) {
_ctx := multicluster.ContextWithClusterName(ctx, rollout.Cluster)
rolloutKey := client.ObjectKeyFromObject(rollout.Rollout)
resumed := false
if err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
if err = cli.Get(_ctx, rolloutKey, rollout.Rollout); err != nil {
return err
}
if rollout.Spec.Strategy.Paused {
rollout.Spec.Strategy.Paused = false
if err = cli.Update(_ctx, rollout.Rollout); err != nil {
return err
}
resumed = true
return nil
}
return nil
}); err != nil {
return false, errors.Wrapf(err, "failed to rollback rollout %s/%s in cluster %s", rollout.Namespace, rollout.Name, rollout.Cluster)
}
if resumed {
modified = true
if writer != nil {
_, _ = fmt.Fprintf(writer, "Rollout %s/%s in cluster %s rollback.\n", rollout.Namespace, rollout.Name, rollout.Cluster)
}
}
}
}
return modified, nil
}
|
// Copyright (C) 2019 Cisco Systems Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"fmt"
"net"
"strings"
"golang.org/x/sys/unix"
vppip "github.com/projectcalico/vpp-dataplane/v3/vpplink/generated/bindings/ip"
"github.com/projectcalico/vpp-dataplane/v3/vpplink/generated/bindings/ip_types"
)
type IPProto uint8
const (
TCP IPProto = IPProto(ip_types.IP_API_PROTO_TCP)
UDP IPProto = IPProto(ip_types.IP_API_PROTO_UDP)
SCTP IPProto = IPProto(ip_types.IP_API_PROTO_SCTP)
ICMP IPProto = IPProto(ip_types.IP_API_PROTO_ICMP)
ICMP6 IPProto = IPProto(ip_types.IP_API_PROTO_ICMP6)
INVALID IPProto = IPProto(ip_types.IP_API_PROTO_RESERVED)
)
func (mode *IPProto) UnmarshalText(text []byte) error {
switch string(text) {
case "tcp":
*mode = TCP
case "udp":
*mode = UDP
default:
*mode = TCP
}
return nil
}
type IPFlowHash uint8
const (
FlowHashSrcIP IPFlowHash = IPFlowHash(vppip.IP_API_FLOW_HASH_SRC_IP)
FlowHashDstIP IPFlowHash = IPFlowHash(vppip.IP_API_FLOW_HASH_DST_IP)
FlowHashSrcPort IPFlowHash = IPFlowHash(vppip.IP_API_FLOW_HASH_SRC_PORT)
FlowHashDstPort IPFlowHash = IPFlowHash(vppip.IP_API_FLOW_HASH_DST_PORT)
FlowHashProto IPFlowHash = IPFlowHash(vppip.IP_API_FLOW_HASH_PROTO)
FlowHashReverse IPFlowHash = IPFlowHash(vppip.IP_API_FLOW_HASH_REVERSE)
FlowHashSymetric IPFlowHash = IPFlowHash(vppip.IP_API_FLOW_HASH_SYMETRIC)
FlowHashFlowLabel IPFlowHash = IPFlowHash(vppip.IP_API_FLOW_HASH_FLOW_LABEL)
)
const (
// Family type definitions
FAMILY_ALL = unix.AF_UNSPEC
FAMILY_V4 = unix.AF_INET
FAMILY_V6 = unix.AF_INET6
)
type IfAddress struct {
IPNet net.IPNet
SwIfIndex uint32
}
type IpPuntRedirect struct {
RxSwIfIndex uint32
IsIP6 bool
Paths []RoutePath
}
type VRF struct {
Name string
VrfID uint32
IsIP6 bool
}
func GetIPFamily(ip net.IP) int {
if len(ip) <= net.IPv4len {
return FAMILY_V4
}
if ip.To4() != nil {
return FAMILY_V4
}
return FAMILY_V6
}
func GetBoolIPFamily(isIP6 bool) ip_types.AddressFamily {
if isIP6 {
return ip_types.ADDRESS_IP6
}
return ip_types.ADDRESS_IP4
}
func IsIP4(ip net.IP) bool {
return GetIPFamily(ip) == FAMILY_V4
}
func IsIP6(ip net.IP) bool {
return GetIPFamily(ip) == FAMILY_V6
}
func (proto IPProto) String() string {
switch proto {
case UDP:
return "UDP"
case TCP:
return "TCP"
case SCTP:
return "SCTP"
case ICMP:
return "ICMP"
case ICMP6:
return "ICMP6"
default:
return "???"
}
}
func UnformatProto(proto string) (IPProto, error) {
switch strings.ToUpper(proto) {
case "UDP":
return UDP, nil
case "TCP":
return TCP, nil
case "SCTP":
return SCTP, nil
case "ICMP":
return ICMP, nil
case "ICMP6":
return ICMP6, nil
default:
return IPProto(0), fmt.Errorf("unknown proto %s", proto)
}
}
func ToVppIPProto(proto IPProto) ip_types.IPProto {
return ip_types.IPProto(proto)
}
// Make sure you really call this with an IPv4 address...
func ToVppIP4Address(addr net.IP) ip_types.IP4Address {
ip := [4]uint8{}
copy(ip[:], addr.To4())
return ip
}
func ToVppIP6Address(addr net.IP) ip_types.IP6Address {
ip := [16]uint8{}
copy(ip[:], addr)
return ip
}
func ToVppAddress(addr net.IP) ip_types.Address {
a := ip_types.Address{}
if addr.To4() == nil {
a.Af = ip_types.ADDRESS_IP6
ip := [16]uint8{}
copy(ip[:], addr)
a.Un = ip_types.AddressUnionIP6(ip)
} else {
a.Af = ip_types.ADDRESS_IP4
ip := [4]uint8{}
copy(ip[:], addr.To4())
a.Un = ip_types.AddressUnionIP4(ip)
}
return a
}
func FromVppIpAddressUnion(Un ip_types.AddressUnion, isv6 bool) net.IP {
if isv6 {
a := Un.GetIP6()
return net.IP(a[:])
} else {
a := Un.GetIP4()
return net.IP(a[:])
}
}
func FromVppAddress(addr ip_types.Address) net.IP {
return FromVppIpAddressUnion(addr.Un, addr.Af == ip_types.ADDRESS_IP6)
}
func ToVppAddressWithPrefix(prefix *net.IPNet) ip_types.AddressWithPrefix {
return ip_types.AddressWithPrefix(ToVppPrefix(prefix))
}
func ToVppPrefix(prefix *net.IPNet) ip_types.Prefix {
length, _ := prefix.Mask.Size()
r := ip_types.Prefix{
Address: ToVppAddress(prefix.IP),
Len: uint8(length),
}
return r
}
func FromVppAddressWithPrefix(prefix ip_types.AddressWithPrefix) *net.IPNet {
return FromVppPrefix(ip_types.Prefix(prefix))
}
func FromVppPrefix(prefix ip_types.Prefix) *net.IPNet {
addressSize := 32
if prefix.Address.Af == ip_types.ADDRESS_IP6 {
addressSize = 128
}
return &net.IPNet{
IP: FromVppAddress(prefix.Address),
Mask: net.CIDRMask(int(prefix.Len), addressSize),
}
}
func ToVppAddressFamily(isv6 bool) ip_types.AddressFamily {
if isv6 {
return ip_types.ADDRESS_IP6
}
return ip_types.ADDRESS_IP4
}
|
package main
import (
"Lab1/internal/pgk/Person/Delivery"
"Lab1/internal/pgk/Person/Repository"
"Lab1/internal/pgk/Person/Usecase"
"Lab1/internal/pgk/middleware"
"context"
"github.com/gorilla/mux"
"github.com/jackc/pgx/v4/pgxpool"
"github.com/joho/godotenv"
"log"
"net/http"
"os"
"time"
)
func init() {
if err := godotenv.Load(); err != nil {
log.Print("No .env file found")
}
}
func main() {
connectionToBD, state := os.LookupEnv("forDataBase")
if !state {
log.Fatal("connection string was not found")
}
connectionToServer, errors := pgxpool.Connect(context.Background(), connectionToBD)
if errors != nil {
log.Fatal("database connection not established")
}
personToRepository := Repository.NewPersonRepository(*connectionToServer)
personToUsecase := Usecase.NewPersonUsecase(personToRepository)
personToDelivery := Delivery.NewPersonHandler(personToUsecase)
router := mux.NewRouter()
router.Use(middleware.InternalServerError)
router.HandleFunc("/persons/{personID}", personToDelivery.Read).Methods("GET")
router.HandleFunc("/persons", personToDelivery.ReadAll).Methods("GET")
router.HandleFunc("/persons", personToDelivery.Create).Methods("POST")
router.HandleFunc("/persons/{personID}", personToDelivery.Update).Methods("PATCH")
router.HandleFunc("/persons/{personID}", personToDelivery.Delete).Methods("DELETE")
srv := &http.Server{
Handler: router,
Addr: ":5000",
WriteTimeout: 15 * time.Second,
ReadTimeout: 15 * time.Second,
}
log.Print("Server running at ", srv.Addr)
log.Fatal(srv.ListenAndServe())
}
|
// ˅
package main
// ˄
type IData interface {
Item
Add(item Item)
// ˅
// ˄
}
// ˅
// ˄
|
package resolver
import (
"context"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
)
type Probe struct {
addr string
conn *grpc.ClientConn
ctx context.Context
cancel context.CancelFunc
}
func newProbe(addr string, timeout time.Duration) (*Probe, error) {
ctx, cancel := context.WithCancel(context.Background())
conn, err := grpc.DialContext(ctx, addr, grpc.WithInsecure())
if err != nil {
return nil, err
}
return &Probe{
addr: addr,
conn: conn,
ctx: ctx,
cancel: cancel,
}, nil
}
func (p *Probe) exec() chan connectivity.State {
out := make(chan connectivity.State)
go func() {
defer close(out)
for {
current := p.conn.GetState()
out <- current
ok := p.conn.WaitForStateChange(p.ctx, current)
if !ok {
if p.ctx.Err() == context.DeadlineExceeded {
out <- connectivity.TransientFailure
}
return
}
}
}()
return out
}
func (p *Probe) close() {
p.cancel()
if p.conn != nil {
_ = p.conn.Close()
}
}
|
package main
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"path"
"strings"
"time"
"github.com/BurntSushi/toml"
"github.com/osbuild/osbuild-composer/internal/common"
"github.com/osbuild/osbuild-composer/internal/upload/azure"
"github.com/osbuild/osbuild-composer/internal/upload/koji"
"github.com/osbuild/osbuild-composer/internal/worker"
)
const configFile = "/etc/osbuild-worker/osbuild-worker.toml"
type connectionConfig struct {
CACertFile string
ClientKeyFile string
ClientCertFile string
}
// Represents the implementation of a job type as defined by the worker API.
type JobImplementation interface {
Run(job worker.Job) error
}
func createTLSConfig(config *connectionConfig) (*tls.Config, error) {
caCertPEM, err := ioutil.ReadFile(config.CACertFile)
if err != nil {
return nil, err
}
roots := x509.NewCertPool()
ok := roots.AppendCertsFromPEM(caCertPEM)
if !ok {
return nil, errors.New("failed to append root certificate")
}
cert, err := tls.LoadX509KeyPair(config.ClientCertFile, config.ClientKeyFile)
if err != nil {
return nil, err
}
return &tls.Config{
RootCAs: roots,
Certificates: []tls.Certificate{cert},
}, nil
}
// Regularly ask osbuild-composer if the compose we're currently working on was
// canceled and exit the process if it was.
// It would be cleaner to kill the osbuild process using (`exec.CommandContext`
// or similar), but osbuild does not currently support this. Exiting here will
// make systemd clean up the whole cgroup and restart this service.
func WatchJob(ctx context.Context, job worker.Job) {
for {
select {
case <-time.After(15 * time.Second):
canceled, err := job.Canceled()
if err == nil && canceled {
log.Println("Job was canceled. Exiting.")
os.Exit(0)
}
case <-ctx.Done():
return
}
}
}
func main() {
var config struct {
KojiServers map[string]struct {
Kerberos *struct {
Principal string `toml:"principal"`
KeyTab string `toml:"keytab"`
} `toml:"kerberos,omitempty"`
} `toml:"koji"`
GCP *struct {
Credentials string `toml:"credentials"`
} `toml:"gcp"`
Azure *struct {
Credentials string `toml:"credentials"`
} `toml:"azure"`
Authentication *struct {
OAuthURL string `toml:"oauth_url"`
OfflineTokenPath string `toml:"offline_token"`
} `toml:"authentication"`
}
var unix bool
flag.BoolVar(&unix, "unix", false, "Interpret 'address' as a path to a unix domain socket instead of a network address")
flag.Usage = func() {
fmt.Fprintf(flag.CommandLine.Output(), "Usage: %s [-unix] address\n", os.Args[0])
flag.PrintDefaults()
os.Exit(0)
}
flag.Parse()
address := flag.Arg(0)
if address == "" {
flag.Usage()
}
_, err := toml.DecodeFile(configFile, &config)
if err == nil {
log.Println("Composer configuration:")
encoder := toml.NewEncoder(log.Writer())
err := encoder.Encode(&config)
if err != nil {
log.Fatalf("Could not print config: %v", err)
}
} else if !os.IsNotExist(err) {
log.Fatalf("Could not load config file '%s': %v", configFile, err)
}
cacheDirectory, ok := os.LookupEnv("CACHE_DIRECTORY")
if !ok {
log.Fatal("CACHE_DIRECTORY is not set. Is the service file missing CacheDirectory=?")
}
store := path.Join(cacheDirectory, "osbuild-store")
output := path.Join(cacheDirectory, "output")
_ = os.Mkdir(output, os.ModeDir)
kojiServers := make(map[string]koji.GSSAPICredentials)
for server, creds := range config.KojiServers {
if creds.Kerberos == nil {
// For now we only support Kerberos authentication.
continue
}
kojiServers[server] = koji.GSSAPICredentials{
Principal: creds.Kerberos.Principal,
KeyTab: creds.Kerberos.KeyTab,
}
}
var client *worker.Client
if unix {
client = worker.NewClientUnix(address)
} else if config.Authentication != nil && config.Authentication.OfflineTokenPath != "" {
t, err := ioutil.ReadFile(config.Authentication.OfflineTokenPath)
if err != nil {
log.Fatalf("Could not read offline token: %v", err)
}
token := strings.TrimSpace(string(t))
if config.Authentication.OAuthURL == "" {
log.Fatal("OAuth URL should be specified together with the offline token")
}
client, err = worker.NewClient("https://"+address, nil, &token, &config.Authentication.OAuthURL)
if err != nil {
log.Fatalf("Error creating worker client: %v", err)
}
} else {
conf, err := createTLSConfig(&connectionConfig{
CACertFile: "/etc/osbuild-composer/ca-crt.pem",
ClientKeyFile: "/etc/osbuild-composer/worker-key.pem",
ClientCertFile: "/etc/osbuild-composer/worker-crt.pem",
})
if err != nil {
log.Fatalf("Error creating TLS config: %v", err)
}
client, err = worker.NewClient("https://"+address, conf, nil, nil)
if err != nil {
log.Fatalf("Error creating worker client: %v", err)
}
}
// Load Azure credentials early. If the credentials file is malformed,
// we can report the issue early instead of waiting for the first osbuild
// job with the org.osbuild.azure.image target.
var azureCredentials *azure.Credentials
if config.Azure != nil {
azureCredentials, err = azure.ParseAzureCredentialsFile(config.Azure.Credentials)
if err != nil {
log.Fatalf("cannot load azure credentials: %v", err)
}
}
// Check if the credentials file was provided in the worker configuration,
// and load it early to prevent potential failure due to issues with the file.
// Note that the content validity of the provided file is not checked and
// can not be reasonable checked with GCP other than by making real API calls.
var gcpCredentials []byte
if config.GCP != nil {
gcpCredentials, err = ioutil.ReadFile(config.GCP.Credentials)
if err != nil {
log.Fatalf("cannot load GCP credentials: %v", err)
}
}
jobImpls := map[string]JobImplementation{
"osbuild": &OSBuildJobImpl{
Store: store,
Output: output,
KojiServers: kojiServers,
GCPCreds: gcpCredentials,
AzureCreds: azureCredentials,
},
"osbuild-koji": &OSBuildKojiJobImpl{
Store: store,
Output: output,
KojiServers: kojiServers,
},
"koji-init": &KojiInitJobImpl{
KojiServers: kojiServers,
},
"koji-finalize": &KojiFinalizeJobImpl{
KojiServers: kojiServers,
},
}
acceptedJobTypes := []string{}
for jt := range jobImpls {
acceptedJobTypes = append(acceptedJobTypes, jt)
}
for {
fmt.Println("Waiting for a new job...")
job, err := client.RequestJob(acceptedJobTypes, common.CurrentArch())
if err != nil {
log.Fatal(err)
}
impl, exists := jobImpls[job.Type()]
if !exists {
log.Printf("Ignoring job with unknown type %s", job.Type())
continue
}
fmt.Printf("Running '%s' job %v\n", job.Type(), job.Id())
ctx, cancelWatcher := context.WithCancel(context.Background())
go WatchJob(ctx, job)
err = impl.Run(job)
cancelWatcher()
if err != nil {
log.Printf("Job %s failed: %v", job.Id(), err)
continue
}
log.Printf("Job %s finished", job.Id())
}
}
|
package tstune
import (
"bytes"
"fmt"
"io"
"os"
"strings"
"testing"
"github.com/timescale/timescaledb-tune/pkg/pgtune"
"github.com/timescale/timescaledb-tune/pkg/pgutils"
)
func stringSliceToBytesReader(lines []string) *bytes.Buffer {
return bytes.NewBufferString(strings.Join(lines, "\n"))
}
func TestRemoveDuplicatesProcessor(t *testing.T) {
lines := []*configLine{
{content: "foo = 'bar'"},
{content: "foo = 'baz'"},
{content: "foo = 'quaz'"},
}
p := &removeDuplicatesProcessor{regex: keyToRegexQuoted("foo")}
p.Process(lines[0])
if lines[0].remove {
t.Errorf("first instance incorrectly marked for remove")
}
check := func(idx int) {
err := p.Process(lines[idx])
if err != nil {
t.Errorf("unexpected error on test %d: %v", idx, err)
}
if !lines[idx-1].remove {
t.Errorf("configLine not marked to remove on test %d", idx)
}
if lines[idx].remove {
t.Errorf("configLine incorrectly marked to remove on test %d", idx)
}
}
check(1)
check(2)
}
func TestGetRemoveDuplicatesProcessors(t *testing.T) {
cases := []struct {
desc string
keys []string
}{
{
desc: "no keys",
keys: []string{},
},
{
desc: "one key",
keys: []string{"foo"},
},
{
desc: "two keys",
keys: []string{"foo", "bar"},
},
}
for _, c := range cases {
procs := getRemoveDuplicatesProcessors(c.keys)
if got := len(procs); got != len(c.keys) {
t.Errorf("%s: incorrect length: got %d want %d", c.desc, got, len(c.keys))
} else {
for i, key := range c.keys {
rdp := procs[i].(*removeDuplicatesProcessor)
want := keyToRegexQuoted(key).String()
if got := rdp.regex.String(); got != want {
t.Errorf("%s: incorrect proc at %d: got %s want %s", c.desc, i, got, want)
}
}
}
}
}
func TestGetConfigFilePath(t *testing.T) {
cases := []struct {
desc string
os string
pgVersion string
files []string
wantFile string
shouldErr bool
}{
{
desc: "mac - yes",
os: osMac,
files: []string{fileNameMac},
wantFile: fileNameMac,
shouldErr: false,
},
{
desc: "mac - no",
os: osMac,
files: []string{"/etc"},
wantFile: "",
shouldErr: true,
},
{
desc: "linux - pg10+debian",
os: osLinux,
pgVersion: pgutils.MajorVersion10,
files: []string{fmt.Sprintf(fileNameDebianFmt, "10")},
wantFile: fmt.Sprintf(fileNameDebianFmt, "10"),
shouldErr: false,
},
{
desc: "linux - pg9.6+debian",
os: osLinux,
pgVersion: pgutils.MajorVersion96,
files: []string{fmt.Sprintf(fileNameDebianFmt, "9.6")},
wantFile: fmt.Sprintf(fileNameDebianFmt, "9.6"),
shouldErr: false,
},
{
desc: "linux - mismatch+debian",
os: osLinux,
pgVersion: pgutils.MajorVersion96,
files: []string{fmt.Sprintf(fileNameDebianFmt, "10")},
wantFile: "",
shouldErr: true,
},
{
desc: "linux - pg10+rpm",
os: osLinux,
pgVersion: pgutils.MajorVersion10,
files: []string{fmt.Sprintf(fileNameRPMFmt, "10")},
wantFile: fmt.Sprintf(fileNameRPMFmt, "10"),
shouldErr: false,
},
{
desc: "linux - pg9.6+rpm",
os: osLinux,
pgVersion: pgutils.MajorVersion96,
files: []string{fmt.Sprintf(fileNameDebianFmt, "9.6")},
wantFile: fmt.Sprintf(fileNameDebianFmt, "9.6"),
shouldErr: false,
},
{
desc: "linux - mismatch+rpm",
os: osLinux,
pgVersion: pgutils.MajorVersion96,
files: []string{fmt.Sprintf(fileNameRPMFmt, "10")},
wantFile: "",
shouldErr: true,
},
{
desc: "linux - arch",
os: osLinux,
files: []string{fileNameArch},
wantFile: fileNameArch,
shouldErr: false,
},
{
desc: "linux - alpine",
os: osLinux,
files: []string{fileNameAlpine},
wantFile: fileNameAlpine,
shouldErr: false,
},
{
desc: "linux - no",
os: osLinux,
files: []string{fmt.Sprintf(fileNameDebianFmt, "9.0")},
wantFile: "",
shouldErr: true,
},
}
oldOSStatFn := osStatFn
for _, c := range cases {
osStatFn = func(fn string) (os.FileInfo, error) {
for _, s := range c.files {
if fn == s {
return nil, nil
}
}
return nil, os.ErrNotExist
}
filename, err := getConfigFilePath(c.os, c.pgVersion)
if err != nil && !c.shouldErr {
t.Errorf("%s: unexpected error: %v", c.desc, err)
} else if err == nil && c.shouldErr {
t.Errorf("%s: unexpected lack of error", c.desc)
}
if c.shouldErr && filename != "" {
t.Errorf("%s: unexpected filename in error case: got %s", c.desc, filename)
}
if got := filename; got != c.wantFile {
t.Errorf("%s: incorrect filename: got %s want %s", c.desc, got, c.wantFile)
}
}
osStatFn = oldOSStatFn
}
func newConfigFileStateFromSlice(t *testing.T, lines []string) *configFileState {
r := stringSliceToBytesReader(lines)
cfs, err := getConfigFileState(r)
if err != nil {
t.Fatalf("could not parse config lines: %v\nlines: %v", err, lines)
}
return cfs
}
func TestGetConfigFileState(t *testing.T) {
sharedLibLine := "shared_preload_libraries = 'timescaledb' # comment"
memoryLine := "#shared_buffers = 64MB"
walLine := "min_wal_size = 0GB # weird"
cases := []struct {
desc string
lines []string
want *configFileState
}{
{
desc: "empty file",
lines: []string{},
want: &configFileState{
lines: []*configLine{},
tuneParseResults: make(map[string]*tunableParseResult),
sharedLibResult: nil,
},
},
{
desc: "single irrelevant line",
lines: []string{"foo"},
want: &configFileState{
lines: []*configLine{{content: "foo"}},
tuneParseResults: make(map[string]*tunableParseResult),
sharedLibResult: nil,
},
},
{
desc: "shared lib line only",
lines: []string{sharedLibLine},
want: &configFileState{
lines: []*configLine{{content: sharedLibLine}},
tuneParseResults: make(map[string]*tunableParseResult),
sharedLibResult: &sharedLibResult{
idx: 0,
commented: false,
hasTimescale: true,
commentGroup: "",
libs: "timescaledb",
},
},
},
{
desc: "multi-line",
lines: []string{"foo", sharedLibLine, "bar", memoryLine, walLine, "baz"},
want: &configFileState{
lines: []*configLine{
{content: "foo"},
{content: sharedLibLine},
{content: "bar"},
{content: memoryLine},
{content: walLine},
{content: "baz"},
},
tuneParseResults: map[string]*tunableParseResult{
pgtune.SharedBuffersKey: {
idx: 3,
commented: true,
key: pgtune.SharedBuffersKey,
value: "64MB",
extra: "",
},
pgtune.MinWALKey: {
idx: 4,
commented: false,
key: pgtune.MinWALKey,
value: "0GB",
extra: " # weird",
},
},
sharedLibResult: &sharedLibResult{
idx: 1,
commented: false,
hasTimescale: true,
commentGroup: "",
libs: "timescaledb",
},
},
},
}
for _, c := range cases {
cfs := newConfigFileStateFromSlice(t, c.lines)
if got := len(cfs.lines); got != len(c.want.lines) {
t.Errorf("%s: incorrect number of cfs lines: got %d want %d", c.desc, got, len(c.want.lines))
} else {
for i, got := range cfs.lines {
if want := c.want.lines[i].content; got.content != want {
t.Errorf("%s: incorrect line at %d: got\n%s\nwant\n%s", c.desc, i, got.content, want)
}
}
}
if c.want.sharedLibResult != nil {
if cfs.sharedLibResult == nil {
t.Errorf("%s: unexpected nil shared lib result", c.desc)
} else {
want := fmt.Sprintf("%v", c.want.sharedLibResult)
if got := fmt.Sprintf("%v", cfs.sharedLibResult); got != want {
t.Errorf("%s: incorrect sharedLibResult: got %s want %s", c.desc, got, want)
}
}
}
if len(c.want.tuneParseResults) > 0 {
if got := len(cfs.tuneParseResults); got != len(c.want.tuneParseResults) {
t.Errorf("%s: incorrect tuneParseResults size: got %d want %d", c.desc, got, len(c.want.tuneParseResults))
} else {
for k, v := range c.want.tuneParseResults {
want := fmt.Sprintf("%v", v)
if got, ok := cfs.tuneParseResults[k]; fmt.Sprintf("%v", got) != want || !ok {
t.Errorf("%s: incorrect tuneParseResults for %s: got %s want %s", c.desc, k, fmt.Sprintf("%v", got), want)
}
}
}
}
}
}
type errReader struct {
count uint64
}
func (r *errReader) Read(p []byte) (int, error) {
if r.count > 1 {
return 0, fmt.Errorf("erroring")
}
p[len(p)-1] = '\n'
r.count++
return 1, nil
}
func TestGetConfigFileStateErr(t *testing.T) {
r := &errReader{}
cfs, err := getConfigFileState(r)
if cfs != nil {
t.Errorf("cfs not nil: %v", cfs)
}
if err == nil {
t.Errorf("err is nil")
}
}
const errProcess = "process error"
type countProcessor struct {
count int
shouldErr bool
}
func (p *countProcessor) Process(_ *configLine) error {
if p.shouldErr {
return fmt.Errorf(errProcess)
}
p.count++
return nil
}
func TestConfigFileStateProcessLines(t *testing.T) {
countProc1 := &countProcessor{}
countProc2 := &countProcessor{}
procs := []configLineProcessor{countProc1, countProc2}
lines := []string{"foo", "bar", "baz"}
wantCount := len(lines)
cfs := newConfigFileStateFromSlice(t, lines)
err := cfs.ProcessLines(procs...)
if err != nil {
t.Errorf("unexpected error in processing: %v", err)
}
if got := countProc1.count; got != wantCount {
t.Errorf("incorrect count for countProc1: got %d want %d", got, wantCount)
}
if got := countProc2.count; got != wantCount {
t.Errorf("incorrect count for countProc2: got %d want %d", got, wantCount)
}
badCountProc := &countProcessor{shouldErr: true}
procs = append(procs, badCountProc)
err = cfs.ProcessLines(procs...)
if err == nil {
t.Errorf("unexpected lack of error")
}
if got := err.Error(); got != errProcess {
t.Errorf("unexpected error: got %s want %s", got, errProcess)
}
}
const (
errTestTruncate = "truncate error"
errTestSeek = "seek error"
)
type testTruncateWriter struct {
*testWriter
seekErr bool
truncateErr bool
}
func (w *testTruncateWriter) Seek(_ int64, _ int) (int64, error) {
if w.seekErr {
return 0, fmt.Errorf(errTestSeek)
}
return 0, nil
}
func (w *testTruncateWriter) Truncate(_ int64) error {
if w.truncateErr {
return fmt.Errorf(errTestTruncate)
}
return nil
}
func TestConfigFileStateWriteTo(t *testing.T) {
cases := []struct {
desc string
lines []string
removeIdx int
errMsg string
w io.Writer
}{
{
desc: "empty",
lines: []string{},
removeIdx: -1,
w: &testWriter{false, []string{}},
},
{
desc: "one line",
lines: []string{"foo"},
removeIdx: -1,
w: &testWriter{false, []string{}},
},
{
desc: "many lines",
lines: []string{"foo", "bar", "baz", "quaz"},
removeIdx: -1,
w: &testWriter{false, []string{}},
},
{
desc: "many lines w/ truncating",
lines: []string{"foo", "bar", "baz", "quaz"},
removeIdx: -1,
w: &testTruncateWriter{&testWriter{false, []string{}}, false, false},
},
{
desc: "many lines, remove middle line",
lines: []string{"foo", "bar", "baz"},
removeIdx: 1,
w: &testWriter{false, []string{}},
},
{
desc: "error in truncate",
lines: []string{"foo"},
removeIdx: -1,
errMsg: errTestTruncate,
w: &testTruncateWriter{&testWriter{true, []string{}}, false, true},
},
{
desc: "error in seek",
lines: []string{"foo"},
removeIdx: -1,
errMsg: errTestSeek,
w: &testTruncateWriter{&testWriter{true, []string{}}, true, false},
},
{
desc: "error in write w/o truncating",
lines: []string{"foo"},
removeIdx: -1,
errMsg: errTestWriter,
w: &testWriter{true, []string{}},
},
{
desc: "error in write w/ truncating",
lines: []string{"foo"},
removeIdx: -1,
errMsg: errTestWriter,
w: &testTruncateWriter{&testWriter{true, []string{}}, false, false},
},
}
for _, c := range cases {
cfs := newConfigFileStateFromSlice(t, c.lines)
if c.removeIdx >= 0 {
cfs.lines[c.removeIdx].remove = true
}
_, err := cfs.WriteTo(c.w)
if c.errMsg == "" && err != nil {
t.Errorf("%s: unexpected error: %v", c.desc, err)
} else if c.errMsg != "" {
if err == nil {
t.Errorf("%s: unexpected lack of error", c.desc)
} else if got := err.Error(); got != c.errMsg {
t.Errorf("%s: unexpected type of error: %v", c.desc, err)
}
}
var w *testWriter
switch temp := c.w.(type) {
case *testWriter:
w = temp
case *testTruncateWriter:
w = temp.testWriter
}
lineCntModifier := 0
if c.removeIdx >= 0 {
lineCntModifier = 1
}
if len(c.lines) > 0 && c.errMsg == "" {
if got := len(w.lines); got != len(c.lines)-lineCntModifier {
t.Errorf("%s: incorrect output len: got %d want %d", c.desc, got, len(c.lines)-lineCntModifier)
}
idxModifier := 0
for i, want := range c.lines {
if i == c.removeIdx {
idxModifier = 1
continue
}
if got := w.lines[i-idxModifier]; got != want+"\n" {
t.Errorf("%s: incorrect line at %d: got %s want %s", c.desc, i, got, want+"\n")
}
}
}
}
}
|
package main
import "fmt"
func main() {
y := []string{"sun", "moon", "star", "rocket", "foot", "face"}
fmt.Println(y)
fmt.Println(cap(y))
fmt.Println(len(y))
for i, v := range y {
fmt.Println(i, v)
}
}
|
package ruleguard
import (
"go/ast"
"go/token"
"go/types"
"io"
)
type Context struct {
Types *types.Info
Fset *token.FileSet
Report func(n ast.Node, msg string)
}
func ParseRules(filename string, fset *token.FileSet, r io.Reader) (*GoRuleSet, error) {
p := newRulesParser()
return p.ParseFile(filename, fset, r)
}
func RunRules(ctx *Context, f *ast.File, rules *GoRuleSet) {
rr := rulesRunner{ctx: ctx, rules: rules}
rr.run(f)
}
type GoRuleSet struct {
universal *scopedGoRuleSet
local *scopedGoRuleSet
}
|
// Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package driver
import (
"context"
"crypto/tls"
"fmt"
"math/rand"
"net/url"
"strings"
"sync"
"time"
"github.com/pingcap/errors"
deadlockpb "github.com/pingcap/kvproto/pkg/deadlock"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/tidb/executor/importer"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/store/copr"
derr "github.com/pingcap/tidb/store/driver/error"
txn_driver "github.com/pingcap/tidb/store/driver/txn"
"github.com/pingcap/tidb/store/gcworker"
"github.com/pingcap/tidb/util/logutil"
"github.com/tikv/client-go/v2/config"
"github.com/tikv/client-go/v2/tikv"
"github.com/tikv/client-go/v2/tikvrpc"
"github.com/tikv/client-go/v2/util"
pd "github.com/tikv/pd/client"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/keepalive"
)
type storeCache struct {
sync.Mutex
cache map[string]*tikvStore
}
var mc storeCache
func init() {
mc.cache = make(map[string]*tikvStore)
rand.Seed(time.Now().UnixNano())
// Setup the Hooks to dynamic control global resource controller.
variable.EnableGlobalResourceControlFunc = tikv.EnableResourceControl
variable.DisableGlobalResourceControlFunc = tikv.DisableResourceControl
// cannot use this package directly, it causes import cycle
importer.GetKVStore = getKVStore
}
// Option is a function that changes some config of Driver
type Option func(*TiKVDriver)
// WithSecurity changes the config.Security used by tikv driver.
func WithSecurity(s config.Security) Option {
return func(c *TiKVDriver) {
c.security = s
}
}
// WithTiKVClientConfig changes the config.TiKVClient used by tikv driver.
func WithTiKVClientConfig(client config.TiKVClient) Option {
return func(c *TiKVDriver) {
c.tikvConfig = client
}
}
// WithTxnLocalLatches changes the config.TxnLocalLatches used by tikv driver.
func WithTxnLocalLatches(t config.TxnLocalLatches) Option {
return func(c *TiKVDriver) {
c.txnLocalLatches = t
}
}
// WithPDClientConfig changes the config.PDClient used by tikv driver.
func WithPDClientConfig(client config.PDClient) Option {
return func(c *TiKVDriver) {
c.pdConfig = client
}
}
func getKVStore(path string, tls config.Security) (kv.Storage, error) {
return TiKVDriver{}.OpenWithOptions(path, WithSecurity(tls))
}
// TiKVDriver implements engine TiKV.
type TiKVDriver struct {
pdConfig config.PDClient
security config.Security
tikvConfig config.TiKVClient
txnLocalLatches config.TxnLocalLatches
}
// Open opens or creates an TiKV storage with given path using global config.
// Path example: tikv://etcd-node1:port,etcd-node2:port?cluster=1&disableGC=false
func (d TiKVDriver) Open(path string) (kv.Storage, error) {
return d.OpenWithOptions(path)
}
func (d *TiKVDriver) setDefaultAndOptions(options ...Option) {
tidbCfg := config.GetGlobalConfig()
d.pdConfig = tidbCfg.PDClient
d.security = tidbCfg.Security
d.tikvConfig = tidbCfg.TiKVClient
d.txnLocalLatches = tidbCfg.TxnLocalLatches
for _, f := range options {
f(d)
}
}
// OpenWithOptions is used by other program that use tidb as a library, to avoid modifying GlobalConfig
// unspecified options will be set to global config
func (d TiKVDriver) OpenWithOptions(path string, options ...Option) (resStore kv.Storage, err error) {
mc.Lock()
defer mc.Unlock()
d.setDefaultAndOptions(options...)
etcdAddrs, disableGC, keyspaceName, err := config.ParsePath(path)
if err != nil {
return nil, errors.Trace(err)
}
var (
pdCli pd.Client
spkv *tikv.EtcdSafePointKV
s *tikv.KVStore
)
defer func() {
if err != nil {
if s != nil {
// if store is created, it will close spkv and pdCli inside
_ = s.Close()
return
}
if spkv != nil {
_ = spkv.Close()
}
if pdCli != nil {
pdCli.Close()
}
}
}()
pdCli, err = pd.NewClient(etcdAddrs, pd.SecurityOption{
CAPath: d.security.ClusterSSLCA,
CertPath: d.security.ClusterSSLCert,
KeyPath: d.security.ClusterSSLKey,
},
pd.WithGRPCDialOptions(
grpc.WithKeepaliveParams(keepalive.ClientParameters{
Time: time.Duration(d.tikvConfig.GrpcKeepAliveTime) * time.Second,
Timeout: time.Duration(d.tikvConfig.GrpcKeepAliveTimeout) * time.Second,
}),
),
pd.WithCustomTimeoutOption(time.Duration(d.pdConfig.PDServerTimeout)*time.Second),
pd.WithForwardingOption(config.GetGlobalConfig().EnableForwarding))
if err != nil {
return nil, errors.Trace(err)
}
pdCli = util.InterceptedPDClient{Client: pdCli}
// FIXME: uuid will be a very long and ugly string, simplify it.
uuid := fmt.Sprintf("tikv-%v", pdCli.GetClusterID(context.TODO()))
if store, ok := mc.cache[uuid]; ok {
pdCli.Close()
return store, nil
}
tlsConfig, err := d.security.ToTLSConfig()
if err != nil {
return nil, errors.Trace(err)
}
spkv, err = tikv.NewEtcdSafePointKV(etcdAddrs, tlsConfig)
if err != nil {
return nil, errors.Trace(err)
}
// ---------------- keyspace logic ----------------
var (
pdClient *tikv.CodecPDClient
)
if keyspaceName == "" {
logutil.BgLogger().Info("using API V1.")
pdClient = tikv.NewCodecPDClient(tikv.ModeTxn, pdCli)
} else {
logutil.BgLogger().Info("using API V2.", zap.String("keyspaceName", keyspaceName))
pdClient, err = tikv.NewCodecPDClientWithKeyspace(tikv.ModeTxn, pdCli, keyspaceName)
if err != nil {
return nil, errors.Trace(err)
}
}
codec := pdClient.GetCodec()
rpcClient := tikv.NewRPCClient(
tikv.WithSecurity(d.security),
tikv.WithCodec(codec),
)
s, err = tikv.NewKVStore(uuid, pdClient, spkv, rpcClient, tikv.WithPDHTTPClient(tlsConfig, etcdAddrs))
if err != nil {
return nil, errors.Trace(err)
}
// ---------------- keyspace logic ----------------
if d.txnLocalLatches.Enabled {
s.EnableTxnLocalLatches(d.txnLocalLatches.Capacity)
}
coprCacheConfig := &config.GetGlobalConfig().TiKVClient.CoprCache
coprStore, err := copr.NewStore(s, coprCacheConfig)
if err != nil {
return nil, errors.Trace(err)
}
store := &tikvStore{
KVStore: s,
etcdAddrs: etcdAddrs,
tlsConfig: tlsConfig,
memCache: kv.NewCacheDB(),
enableGC: !disableGC,
coprStore: coprStore,
codec: codec,
}
mc.cache[uuid] = store
return store, nil
}
type tikvStore struct {
*tikv.KVStore
etcdAddrs []string
tlsConfig *tls.Config
memCache kv.MemManager // this is used to query from memory
enableGC bool
gcWorker *gcworker.GCWorker
coprStore *copr.Store
codec tikv.Codec
}
// Name gets the name of the storage engine
func (s *tikvStore) Name() string {
return "TiKV"
}
// Describe returns of brief introduction of the storage
func (s *tikvStore) Describe() string {
return "TiKV is a distributed transactional key-value database"
}
var ldflagGetEtcdAddrsFromConfig = "0" // 1:Yes, otherwise:No
const getAllMembersBackoff = 5000
// EtcdAddrs returns etcd server addresses.
func (s *tikvStore) EtcdAddrs() ([]string, error) {
if s.etcdAddrs == nil {
return nil, nil
}
if ldflagGetEtcdAddrsFromConfig == "1" {
// For automated test purpose.
// To manipulate connection to etcd by mandatorily setting path to a proxy.
cfg := config.GetGlobalConfig()
return strings.Split(cfg.Path, ","), nil
}
ctx := context.Background()
bo := tikv.NewBackoffer(ctx, getAllMembersBackoff)
etcdAddrs := make([]string, 0)
pdClient := s.GetPDClient()
if pdClient == nil {
return nil, errors.New("Etcd client not found")
}
for {
members, err := pdClient.GetAllMembers(ctx)
if err != nil {
err := bo.Backoff(tikv.BoRegionMiss(), err)
if err != nil {
return nil, err
}
continue
}
for _, member := range members {
if len(member.ClientUrls) > 0 {
u, err := url.Parse(member.ClientUrls[0])
if err != nil {
logutil.BgLogger().Error("fail to parse client url from pd members", zap.String("client_url", member.ClientUrls[0]), zap.Error(err))
return nil, err
}
etcdAddrs = append(etcdAddrs, u.Host)
}
}
return etcdAddrs, nil
}
}
// TLSConfig returns the tls config to connect to etcd.
func (s *tikvStore) TLSConfig() *tls.Config {
return s.tlsConfig
}
// StartGCWorker starts GC worker, it's called in BootstrapSession, don't call this function more than once.
func (s *tikvStore) StartGCWorker() error {
if !s.enableGC {
return nil
}
gcWorker, err := gcworker.NewGCWorker(s, s.GetPDClient())
if err != nil {
return derr.ToTiDBErr(err)
}
gcWorker.Start()
s.gcWorker = gcWorker
return nil
}
func (s *tikvStore) GetClient() kv.Client {
return s.coprStore.GetClient()
}
func (s *tikvStore) GetMPPClient() kv.MPPClient {
return s.coprStore.GetMPPClient()
}
// Close and unregister the store.
func (s *tikvStore) Close() error {
mc.Lock()
defer mc.Unlock()
delete(mc.cache, s.UUID())
if s.gcWorker != nil {
s.gcWorker.Close()
}
s.coprStore.Close()
err := s.KVStore.Close()
return derr.ToTiDBErr(err)
}
// GetMemCache return memory manager of the storage
func (s *tikvStore) GetMemCache() kv.MemManager {
return s.memCache
}
// Begin a global transaction.
func (s *tikvStore) Begin(opts ...tikv.TxnOption) (kv.Transaction, error) {
txn, err := s.KVStore.Begin(opts...)
if err != nil {
return nil, derr.ToTiDBErr(err)
}
return txn_driver.NewTiKVTxn(txn), err
}
// GetSnapshot gets a snapshot that is able to read any data which data is <= ver.
// if ver is MaxVersion or > current max committed version, we will use current version for this snapshot.
func (s *tikvStore) GetSnapshot(ver kv.Version) kv.Snapshot {
return txn_driver.NewSnapshot(s.KVStore.GetSnapshot(ver.Ver))
}
// CurrentVersion returns current max committed version with the given txnScope (local or global).
func (s *tikvStore) CurrentVersion(txnScope string) (kv.Version, error) {
ver, err := s.KVStore.CurrentTimestamp(txnScope)
return kv.NewVersion(ver), derr.ToTiDBErr(err)
}
// ShowStatus returns the specified status of the storage
func (s *tikvStore) ShowStatus(ctx context.Context, key string) (interface{}, error) {
return nil, kv.ErrNotImplemented
}
// GetLockWaits get return lock waits info
func (s *tikvStore) GetLockWaits() ([]*deadlockpb.WaitForEntry, error) {
stores := s.GetRegionCache().GetStoresByType(tikvrpc.TiKV)
//nolint: prealloc
var result []*deadlockpb.WaitForEntry
for _, store := range stores {
resp, err := s.GetTiKVClient().SendRequest(context.TODO(), store.GetAddr(), tikvrpc.NewRequest(tikvrpc.CmdLockWaitInfo, &kvrpcpb.GetLockWaitInfoRequest{}), time.Second*30)
if err != nil {
logutil.BgLogger().Warn("query lock wait info failed", zap.Error(err))
continue
}
if resp.Resp == nil {
logutil.BgLogger().Warn("lock wait info from store is nil")
continue
}
entries := resp.Resp.(*kvrpcpb.GetLockWaitInfoResponse).Entries
result = append(result, entries...)
}
return result, nil
}
func (s *tikvStore) GetCodec() tikv.Codec {
return s.codec
}
|
package download
// Data struct for download options.
type Options struct {
Destination string
}
// Set the destination
func (options Options) SetDestination(destination string) {
options.Destination = destination
}
|
package make_tree
import "io"
// Actions are the callbacks to use to
// make a particular filesystem tree.
type Action interface {
// Executes the direct action. It must
// return an error on failure.
Do(currentDirectory string, dump io.Writer, logRan func(string, Action)) error
// Executes the inverse action. It should
// not return error on failure, but silently
// forgive.
Rollback(currentDirectory string, dump io.Writer)
}
|
package mongo
import (
"context"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
// Connect is a wrapped function to the original mongo.Connect for avoiding package name conflict
func Connect(ctx context.Context, opts ...*options.ClientOptions) (*mongo.Client, error) {
return mongo.Connect(ctx, opts...)
}
|
package main
import (
"github.com/nsf/termbox-go"
)
type Commander struct {
}
func (self *Commander) Draw(w, h int) {
for i := 1; i < w; i++ {
termbox.SetCell(i, h-2, '─', 0x31, 0x00)
}
}
|
package modules
import "fmt"
type Executor interface {
fmt.Stringer
Execute([]string) error
}
var SupportedModules = map[string]func() Executor{
"backend": NewBackendModule,
}
func ModuleFactory(module string) (Executor, error) {
if moduleInitFunc, ok := SupportedModules[module]; !ok {
return nil, fmt.Errorf("module not supported")
} else {
return moduleInitFunc(), nil
}
}
|
package parsing_test
import (
"testing"
. "github.com/s2gatev/sqlmorph/ast"
)
func TestSelectParsing(t *testing.T) {
runSuccessTests(t, []successTest{
{
Query: `SELECT Name FROM User`,
Expected: &Select{
Fields: []*Field{
&Field{Name: "Name"},
},
Table: &Table{Name: "User"},
},
},
{
Query: `SELECT Name, Location, Age FROM User`,
Expected: &Select{
Fields: []*Field{
&Field{Name: "Name"}, &Field{Name: "Location"}, &Field{Name: "Age"},
},
Table: &Table{Name: "User"},
},
},
{
Query: `SELECT * FROM User`,
Expected: &Select{
Fields: []*Field{
&Field{Name: "*"},
},
Table: &Table{Name: "User"},
},
},
{
Query: `SELECT u.Name, u.Location, u.Age FROM User u`,
Expected: &Select{
Fields: []*Field{
&Field{Target: "u", Name: "Name"},
&Field{Target: "u", Name: "Location"},
&Field{Target: "u", Name: "Age"},
},
Table: &Table{Name: "User", Alias: "u"},
},
},
{
Query: `SELECT u.Name FROM User u WHERE u.Age=21`,
Expected: &Select{
Fields: []*Field{
&Field{Target: "u", Name: "Name"},
},
Conditions: []*EqualsCondition{
&EqualsCondition{
Field: &Field{Target: "u", Name: "Age"},
Value: "21",
},
},
Table: &Table{Name: "User", Alias: "u"},
},
},
{
Query: `SELECT u.Name, u.Location, u.Age FROM User u LIMIT 10`,
Expected: &Select{
Fields: []*Field{
&Field{Target: "u", Name: "Name"},
&Field{Target: "u", Name: "Location"},
&Field{Target: "u", Name: "Age"},
},
Limit: "10",
Table: &Table{Name: "User", Alias: "u"},
},
},
{
Query: `SELECT u.Name, u.Location, u.Age FROM User u LIMIT 10 OFFSET 20`,
Expected: &Select{
Fields: []*Field{
&Field{Target: "u", Name: "Name"},
&Field{Target: "u", Name: "Location"},
&Field{Target: "u", Name: "Age"},
},
Limit: "10",
Offset: "20",
Table: &Table{Name: "User", Alias: "u"},
},
},
{
Query: `SELECT Name, Location FROM User INNER JOIN Address ON ID=UserID`,
Expected: &Select{
Fields: []*Field{
&Field{Name: "Name"},
&Field{Name: "Location"},
},
Table: &Table{Name: "User"},
JoinTables: []Join{
&InnerJoin{
Table: &Table{Name: "Address"},
Left: &Field{Name: "ID"},
Right: &Field{Name: "UserID"},
},
},
},
},
{
Query: `SELECT u.Name, a.Location FROM User u INNER JOIN Address a ON u.ID=a.UserID`,
Expected: &Select{
Fields: []*Field{
&Field{Target: "u", Name: "Name"},
&Field{Target: "a", Name: "Location"},
},
Table: &Table{Name: "User", Alias: "u"},
JoinTables: []Join{
&InnerJoin{
Table: &Table{Name: "Address", Alias: "a"},
Left: &Field{Target: "u", Name: "ID"},
Right: &Field{Target: "a", Name: "UserID"},
},
},
},
},
{
Query: `SELECT u.Name FROM User u CROSS JOIN Client`,
Expected: &Select{
Fields: []*Field{
&Field{Target: "u", Name: "Name"},
},
Table: &Table{Name: "User", Alias: "u"},
JoinTables: []Join{
&CrossJoin{
Table: &Table{Name: "Client"},
},
},
},
},
{
Query: `SELECT u.Name, c.Name FROM User u CROSS JOIN Client c`,
Expected: &Select{
Fields: []*Field{
&Field{Target: "u", Name: "Name"},
&Field{Target: "c", Name: "Name"},
},
Table: &Table{Name: "User", Alias: "u"},
JoinTables: []Join{
&CrossJoin{
Table: &Table{Name: "Client", Alias: "c"},
},
},
},
},
{
Query: `SELECT Name, Location FROM User LEFT JOIN Address ON ID=UserID`,
Expected: &Select{
Fields: []*Field{
&Field{Name: "Name"},
&Field{Name: "Location"},
},
Table: &Table{Name: "User"},
JoinTables: []Join{
&LeftJoin{
Table: &Table{Name: "Address"},
Left: &Field{Name: "ID"},
Right: &Field{Name: "UserID"},
},
},
},
},
{
Query: `SELECT u.Name, a.Location FROM User u LEFT JOIN Address a ON u.ID=a.UserID`,
Expected: &Select{
Fields: []*Field{
&Field{Target: "u", Name: "Name"},
&Field{Target: "a", Name: "Location"},
},
Table: &Table{Name: "User", Alias: "u"},
JoinTables: []Join{
&LeftJoin{
Table: &Table{Name: "Address", Alias: "a"},
Left: &Field{Target: "u", Name: "ID"},
Right: &Field{Target: "a", Name: "UserID"},
},
},
},
},
{
Query: `SELECT Name, Location FROM User RIGHT JOIN Address ON ID=UserID`,
Expected: &Select{
Fields: []*Field{
&Field{Name: "Name"},
&Field{Name: "Location"},
},
Table: &Table{Name: "User"},
JoinTables: []Join{
&RightJoin{
Table: &Table{Name: "Address"},
Left: &Field{Name: "ID"},
Right: &Field{Name: "UserID"},
},
},
},
},
{
Query: `SELECT u.Name, a.Location FROM User u RIGHT JOIN Address a ON u.ID=a.UserID`,
Expected: &Select{
Fields: []*Field{
&Field{Target: "u", Name: "Name"},
&Field{Target: "a", Name: "Location"},
},
Table: &Table{Name: "User", Alias: "u"},
JoinTables: []Join{
&RightJoin{
Table: &Table{Name: "Address", Alias: "a"},
Left: &Field{Target: "u", Name: "ID"},
Right: &Field{Target: "a", Name: "UserID"},
},
},
},
},
})
runErrorTests(t, []errorTest{
{
Query: `SELECT u.Name, u.Location, u.Age FROM LIMIT 10`,
ErrorMessage: "FROM statement must be followed by a target class. Found LIMIT.",
},
{
Query: `SELECT u.Name, u.Location, u.Age FROM User u LIMIT OFFSET`,
ErrorMessage: "LIMIT statement must be followed by a number. Found OFFSET.",
},
{
Query: `SELECT u.Name, u.Location, u.Age FROM User u LIMIT 10 OFFSET`,
ErrorMessage: "OFFSET statement must be followed by a number.",
},
{
Query: `SELECT FROM User`,
ErrorMessage: "SELECT statement must be followed by field list. Found FROM.",
},
{
Query: `SELECT u.Name FROM User u WHERE LIMIT`,
ErrorMessage: "WHERE statement must be followed by condition list. Found LIMIT.",
},
{
Query: `SELECT u.Name FROM User u WHERE a!=2 LIMIT`,
ErrorMessage: "WHERE statement must be followed by condition list. Found !.",
},
{
Query: `SELECT u.Name FROM User u WHERE a=! LIMIT`,
ErrorMessage: "WHERE statement must be followed by condition list. Found !.",
},
{
Query: `SELECT u.Name, a.Location FROM User u INNER Address a ON u.ID=a.UserID`,
ErrorMessage: "Expected JOIN following INNER. Found Address.",
},
{
Query: `SELECT u.Name, a.Location FROM User u INNER JOIN ON u.ID=a.UserID`,
ErrorMessage: "INNER JOIN statement must be followed by a target class. Found ON.",
},
{
Query: `SELECT u.Name, a.Location FROM User u INNER JOIN Address a`,
ErrorMessage: "INNER JOIN statement must have an ON clause.",
},
{
Query: `SELECT u.Name, a.Location FROM User u INNER JOIN Address a ON !u=a.UserID`,
ErrorMessage: "Wrong join fields in INNER JOIN statement. Found !.",
},
{
Query: `SELECT u.Name, a.Location FROM User u INNER JOIN Address a ON u.ID!=a.UserID`,
ErrorMessage: "Wrong join fields in INNER JOIN statement. Found !.",
},
{
Query: `SELECT u.Name, a.Location FROM User u INNER JOIN Address a ON u.ID=!a.UserID`,
ErrorMessage: "Wrong join fields in INNER JOIN statement. Found !.",
},
{
Query: `SELECT u.Name, c.Name FROM User u CROSS Client c`,
ErrorMessage: "Expected JOIN following CROSS. Found Client.",
},
{
Query: `SELECT u.Name, c.Name FROM User u CROSS JOIN`,
ErrorMessage: "CROSS JOIN statement must be followed by a target class.",
},
{
Query: `SELECT u.Name, a.Location FROM User u LEFT Address a ON u.ID=a.UserID`,
ErrorMessage: "Expected JOIN following LEFT. Found Address.",
},
{
Query: `SELECT u.Name, a.Location FROM User u LEFT JOIN ON u.ID=a.UserID`,
ErrorMessage: "LEFT JOIN statement must be followed by a target class. Found ON.",
},
{
Query: `SELECT u.Name, a.Location FROM User u LEFT JOIN Address a`,
ErrorMessage: "LEFT JOIN statement must have an ON clause.",
},
{
Query: `SELECT u.Name, a.Location FROM User u LEFT JOIN Address a ON !u=a.UserID`,
ErrorMessage: "Wrong join fields in LEFT JOIN statement. Found !.",
},
{
Query: `SELECT u.Name, a.Location FROM User u LEFT JOIN Address a ON u.ID!=a.UserID`,
ErrorMessage: "Wrong join fields in LEFT JOIN statement. Found !.",
},
{
Query: `SELECT u.Name, a.Location FROM User u LEFT JOIN Address a ON u.ID=!a.UserID`,
ErrorMessage: "Wrong join fields in LEFT JOIN statement. Found !.",
},
{
Query: `SELECT u.Name, a.Location FROM User u RIGHT Address a ON u.ID=a.UserID`,
ErrorMessage: "Expected JOIN following RIGHT. Found Address.",
},
{
Query: `SELECT u.Name, a.Location FROM User u RIGHT JOIN ON u.ID=a.UserID`,
ErrorMessage: "RIGHT JOIN statement must be followed by a target class. Found ON.",
},
{
Query: `SELECT u.Name, a.Location FROM User u RIGHT JOIN Address a`,
ErrorMessage: "RIGHT JOIN statement must have an ON clause.",
},
{
Query: `SELECT u.Name, a.Location FROM User u RIGHT JOIN Address a ON !u=a.UserID`,
ErrorMessage: "Wrong join fields in RIGHT JOIN statement. Found !.",
},
{
Query: `SELECT u.Name, a.Location FROM User u RIGHT JOIN Address a ON u.ID!=a.UserID`,
ErrorMessage: "Wrong join fields in RIGHT JOIN statement. Found !.",
},
{
Query: `SELECT u.Name, a.Location FROM User u RIGHT JOIN Address a ON u.ID=!a.UserID`,
ErrorMessage: "Wrong join fields in RIGHT JOIN statement. Found !.",
},
})
}
|
// https://eli.thegreenplace.net/2020/pubsub-using-channels-in-go/
package main
import (
"fmt"
"sync"
)
type Pubsub struct {
mu sync.RWMutex
subs map[string][]chan string
closed bool
}
func NewPubsub() *Pubsub {
ps := &Pubsub{}
ps.subs = make(map[string][]chan string)
return ps
}
func (ps *Pubsub) Subscribe(topic string) <-chan string {
ps.mu.Lock()
defer ps.mu.Unlock()
ch := make(chan string, 1) // think hard about buffer size
ps.subs[topic] = append(ps.subs[topic], ch)
return ch
}
func (ps *Pubsub) Publish(topic string, msg string) {
ps.mu.RLock()
defer ps.mu.RUnlock()
if ps.closed {
return
}
for _, ch := range ps.subs[topic] {
ch <- msg
}
}
func (ps *Pubsub) Close() {
ps.mu.Lock()
defer ps.mu.Unlock()
if !ps.closed {
ps.closed = true
for _, subs := range ps.subs {
for _, ch := range subs {
close(ch)
}
}
}
}
func main() {
ps := NewPubsub()
ch := ps.Subscribe("stuff")
ps.Publish("stuff", "a message about stuff")
fmt.Println(<-ch) // a message about stuff
}
|
package model
import (
"github.com/feast-dev/feast/go/protos/feast/core"
)
type FeatureViewProjection struct {
Name string
NameAlias string
Features []*Field
JoinKeyMap map[string]string
}
func (fv *FeatureViewProjection) NameToUse() string {
if len(fv.NameAlias) == 0 {
return fv.Name
}
return fv.NameAlias
}
func NewFeatureViewProjectionFromProto(proto *core.FeatureViewProjection) *FeatureViewProjection {
featureProjection := &FeatureViewProjection{Name: proto.FeatureViewName,
NameAlias: proto.FeatureViewNameAlias,
JoinKeyMap: proto.JoinKeyMap,
}
features := make([]*Field, len(proto.FeatureColumns))
for index, featureSpecV2 := range proto.FeatureColumns {
features[index] = NewFieldFromProto(featureSpecV2)
}
featureProjection.Features = features
return featureProjection
}
func NewFeatureViewProjectionFromDefinition(base *BaseFeatureView) *FeatureViewProjection {
return &FeatureViewProjection{Name: base.Name,
NameAlias: "",
Features: base.Features,
JoinKeyMap: make(map[string]string),
}
}
|
// Package russian implements functions to manipulate russian words.
package russian
import (
"fmt"
"github.com/POSOlSoft/go/mathhelper"
)
// GrammaticalGender - russian grammatical gender.
type GrammaticalGender int
const (
// Neuter - средний род.
Neuter = iota
// Masculine - мужской род.
Masculine
// Feminine - женский род.
Feminine
)
// numeralNumberCase - русские число/падеж, используемые с числами.
type numeralNumberCase int
// numeralNumberCase choises
const (
// единственное число, именительный падеж (1, 21 час (но 11 часов))
singularNominative = iota
// единственное число, родительный падеж (2, 3, 4, 22 часа (но 12, 13, 14 часов))
singularGenitive
// множественное число, родительный падеж (0, 5 (и всё остальное) часов)
pluralGenitive
)
func getNumeralNumberCasePrim(last2 int64) numeralNumberCase {
// last2 от 0 до 99
// в общем случае определяется двумя последними цифрами
if last2 == 11 || last2 == 12 || last2 == 13 || last2 == 14 {
return pluralGenitive
}
// теперь определяется одной последней цифрой
var last1 = last2 % 10
if last1 == 1 {
return singularNominative
}
if last1 == 2 || last1 == 3 || last1 == 4 {
return singularGenitive
}
return pluralGenitive
}
func getNumeralNumberCase(i int64) numeralNumberCase {
return getNumeralNumberCasePrim(mathhelper.AbsInt(i) % 100)
}
func numberAndRussianItems(items int64, showZero bool, russianItems string) string {
if !showZero && items == 0 {
return ""
}
return fmt.Sprintf("%v %v", items, russianItems)
}
|
package core
import (
"net/http"
"strconv"
"strings"
"github.com/gin-gonic/gin"
"github.com/textileio/go-textile/pb"
)
// lsThreadFeed godoc
// @Summary Paginates post and annotation block types
// @Description Paginates post (join|leave|files|message) and annotation (comment|like) block types
// @Description The mode option dictates how the feed is displayed:
// @Description "chrono": All feed block types are shown. Annotations always nest their target post,
// @Description i.e., the post a comment is about.
// @Description "annotated": Annotations are nested under post targets, but are not shown in the
// @Description top-level feed.
// @Description "stacks": Related blocks are chronologically grouped into "stacks". A new stack is
// @Description started if an unrelated block breaks continuity. This mode is used by Textile
// @Description Photos. Stacks may include:
// @Description * The initial post with some nested annotations. Newer annotations may have already
// @Description been listed.
// @Description * One or more annotations about a post. The newest annotation assumes the "top"
// @Description position in the stack. Additional annotations are nested under the target.
// @Description Newer annotations may have already been listed in the case as well.
// @Tags feed
// @Produce application/json
// @Param X-Textile-Opts header string false "thread: Thread ID (can also use 'default'), offset: Offset ID to start listing from (omit for latest), limit: List page size (default: 5), mode: Feed mode (one of 'chrono', 'annotated', or 'stacks')" default(thread=,offset=,limit=5,mode="chrono")
// @Success 200 {object} pb.FeedItemList "feed"
// @Failure 400 {string} string "Bad Request"
// @Failure 404 {string} string "Not Found"
// @Failure 500 {string} string "Internal Server Error"
// @Router /feed [get]
func (a *api) lsThreadFeed(g *gin.Context) {
opts, err := a.readOpts(g)
if err != nil {
a.abort500(g, err)
return
}
mode := strings.ToUpper(opts["mode"])
req := &pb.FeedRequest{
Offset: opts["offset"],
Thread: opts["thread"],
Mode: pb.FeedRequest_Mode(pb.FeedRequest_Mode_value[mode]),
Limit: 5,
}
if req.Thread != "" {
thrd := a.node.Thread(req.Thread)
if thrd == nil {
g.String(http.StatusNotFound, ErrThreadNotFound.Error())
return
}
}
if opts["limit"] != "" {
limit, err := strconv.Atoi(opts["limit"])
if err != nil {
g.String(http.StatusBadRequest, err.Error())
return
}
req.Limit = int32(limit)
}
list, err := a.node.Feed(req)
if err != nil {
g.String(http.StatusBadRequest, err.Error())
return
}
pbJSON(g, http.StatusOK, list)
}
|
package utils
import (
"strings"
"github.com/gosimple/slug"
"encoding/base64"
)
func Slugify(raw string) string {
s := slug.Make(strings.ToLower(raw))
if s == "" {
// If the raw name is only characters outside of the
// sluggable characters, the slug creation will return an
// empty string which will mess up URLs. This failsafe picks
// that up and creates the slug as a base64 identifier instead.
s = base64.RawURLEncoding.EncodeToString([]byte(raw))
if slug.MaxLength != 0 && len(s) > slug.MaxLength {
s = s[:slug.MaxLength]
}
}
return s
}
|
package main
// Display a character string with a decorative frame.
func main() {
displayA := NewMessageDisplay("Nice to meet you.")
displayA.Show(displayA)
displayB := NewSideFrame(displayA, "!")
displayB.Show(displayB)
displayC := NewFullFrame(displayB)
displayC.Show(displayC)
displayD := NewSideFrame(
NewFullFrame(
NewFullFrame(
NewSideFrame(
NewSideFrame(
NewFullFrame(
NewMessageDisplay("See you again.")),
"#"),
"#"))),
"#")
displayD.Show(displayD)
}
|
package shortenertest
import (
"testing"
"github.com/go-playground/validator"
"github.com/toms1441/urlsh/internal/shortener"
)
var invalidconfig = [3]shortener.Config{
{},
{Length: 1},
{Characters: "1"},
}
var validconfig = shortener.Config{
Length: 4,
Characters: "abcdef",
}
var validate *validator.Validate
func TestConfigValidation(t *testing.T) {
if validate == nil {
validate = validator.New()
}
for k, v := range invalidconfig {
err := validate.Struct(v)
if err == nil {
t.Fatalf("validate.Struct == nil - %d", k)
}
}
err := validate.Struct(validconfig)
if err != nil {
t.Fatalf("validate.Struct: %v", err)
}
}
|
package main
import (
"bufio"
"fmt"
"strings"
"os"
"strconv"
)
func sliceAtoi(sa []string) ([]int, error) {
si := make([]int, 0, len(sa))
for _, a := range sa {
i, err := strconv.Atoi(a)
if err != nil {
return si, err
}
si = append(si, i)
}
return si, nil
}
func ReadArray(filePath string) ([]int, error) {
fileHandle,_ := os.Open(filePath)
defer fileHandle.Close()
fileScanner := bufio.NewScanner(fileHandle)
fileScanner.Scan()
//fmt.Println(fileScanner.Text())
return sliceAtoi(strings.Split(fileScanner.Text(),","))
}
func main() {
a,_ := ReadArray(os.Args[1])
fmt.Println(a)
d:for i := 0; i < len(a); i = i+4 {
//fmt.Println(a[i])
switch a[i] {
case 1:
a[a[i+3]] = a[a[i+1]] + a[a[i+2]]
case 2:
a[a[i+3]] = a[a[i+1]] * a[a[i+2]]
case 99:
break d
}
}
fmt.Println(a)
}
|
package main
import(
"fmt"
"os"
"net"
)
//Function main uses localhost port 1300 to listen and accpet connections
//uses a go routine to handle client connections
func main() {
address := "127.0.0.1:1300"
tcpAddr, err := net.ResolveTCPAddr("tcp4", address)
checkError(err)
listener, err := net.ListenTCP("tcp", tcpAddr)
checkError(err)
for {
conn, err := listener.Accept()
if err != nil {
continue
}
go handleClient(conn)
}
}
//Function handleClient closes connection on exit and reads a buffer of max 512 bytes and writes that buffer back
func handleClient(conn net.Conn) {
defer conn.Close()
var buffer [512] byte
for {
nBytes , err := conn.Read(buffer[0:])
if err != nil {
return
}
_, err2 := conn.Write(buffer[0:nBytes])
if err2 != nil {
return
}
}
}
//Function to check errors and print if any
func checkError(err error){
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %s", err.Error())
os.Exit(1)
}
}
|
package app
import "github.com/bryanl/dolb/entity"
// AgentBuilder creates and configures agents.
type AgentBuilder interface {
Create(id int) (*entity.Agent, error)
Configure(agent *entity.Agent) error
}
|
package problem0315
func countSmaller(nums []int) []int {
count := make([]int, len(nums))
indexes := make([]int, len(nums))
for i := range nums {
indexes[i] = i
}
mergeSort(nums, &count, &indexes, 0, len(nums)-1)
return count
}
func mergeSort(nums []int, count, indexes *[]int, left, right int) {
if left >= right {
return
}
mid := (left + right) / 2
mergeSort(nums, count, indexes, left, mid) // 排序左侧
mergeSort(nums, count, indexes, mid+1, right) // 排序右侧
merge(nums, count, indexes, left, right) // 合并
}
func merge(nums []int, count, indexes *[]int, left, right int) { // 右侧有几个数比它小
merged := make([]int, right-left+1) // 下标转化数组
mid := (left + right) / 2
leftCursor := left // 左侧游标
rightCursor := mid + 1 // 右侧游标
mergedCursor := 0
rightCount := 0
for leftCursor <= mid || rightCursor <= right { // 能合并
if leftCursor > mid {
// 左侧的数组已经处理完了,直接将右侧数组加进来
rightCount++
merged[mergedCursor] = (*indexes)[rightCursor]
rightCursor++
} else if rightCursor > right {
// 右侧数组处理完了
(*count)[(*indexes)[leftCursor]] = (*count)[(*indexes)[leftCursor]] + rightCount
merged[mergedCursor] = (*indexes)[leftCursor]
leftCursor++
} else if nums[(*indexes)[leftCursor]] <= nums[(*indexes)[rightCursor]] {
(*count)[(*indexes)[leftCursor]] += rightCount
merged[mergedCursor] = (*indexes)[leftCursor]
leftCursor++
} else {
rightCount++
merged[mergedCursor] = (*indexes)[rightCursor]
rightCursor++
}
mergedCursor++
}
for i := 0; i < len(merged); i++ {
(*indexes)[left+i] = merged[i]
}
}
|
package cron
import (
"errors"
"github.com/spf13/cobra"
"github.com/wish/ctl/cmd/util/parsing"
"github.com/wish/ctl/pkg/client"
)
func unsuspendCmd(c *client.Client) *cobra.Command {
return &cobra.Command{
Use: "unsuspend cronjob [flags]",
Short: "Unsuspend a cron job",
Long: `Unsuspends the specified cron job.
If the cron job is not suspended, does nothing.`,
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
ctxs, _ := cmd.Flags().GetStringSlice("context")
namespace, _ := cmd.Flags().GetString("namespace")
options, err := parsing.ListOptions(cmd, args)
if err != nil {
return err
}
all, err := c.ListCronJobsOverContexts(ctxs, namespace, options)
if err != nil {
return err
}
if len(all) == 0 {
return errors.New("no cronjobs found")
} else if len(all) > 1 {
return errors.New("too many cronjobs match the criteria")
}
success, err := c.SetCronJobSuspend(all[0].Context, all[0].Namespace, all[0].Name, false)
if err != nil {
return err
}
if success {
cmd.Println("Successfully unsuspended cron job", args[0])
} else {
cmd.Printf("Cron job \"%s\" was already unsuspended\n", args[0])
}
return err
},
}
}
|
package storage
import (
"io"
"os"
"github.com/mlmhl/gcrawler/types"
)
const fileStorageName = "File"
var _ Storage = FileStorage{}
// FileStorage write all Item to a file on local disk.
type FileStorage struct {
file *os.File
}
func NewFileStorage(path string) (FileStorage, error) {
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0600)
if err != nil {
return FileStorage{}, err
}
return FileStorage{file}, nil
}
func (s FileStorage) Close() {
if s.file != nil {
s.file.Close()
}
}
func (s FileStorage) Put(item types.Item) error {
content := item.Content() + "\n"
n, err := s.file.WriteString(content)
if err == nil && n < len(content) {
err = io.ErrShortWrite
}
return err
}
func (s FileStorage) Name() string {
return fileStorageName
}
|
/*
Copyright 2021 CodeNotary, Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package singleapp
import (
"os"
"github.com/codenotary/immudb/embedded/appendable"
)
const DefaultFileMode = os.FileMode(0644)
const DefaultCompressionFormat = appendable.DefaultCompressionFormat
const DefaultCompressionLevel = appendable.DefaultCompressionLevel
type Options struct {
readOnly bool
synced bool
fileMode os.FileMode
compressionFormat int
compressionLevel int
metadata []byte
}
func DefaultOptions() *Options {
return &Options{
readOnly: false,
synced: true,
fileMode: DefaultFileMode,
compressionFormat: DefaultCompressionFormat,
compressionLevel: DefaultCompressionLevel,
}
}
func validOptions(opts *Options) bool {
return opts != nil
}
func (opts *Options) WithReadOnly(readOnly bool) *Options {
opts.readOnly = readOnly
return opts
}
func (opts *Options) WithSynced(synced bool) *Options {
opts.synced = synced
return opts
}
func (opts *Options) WithFileMode(fileMode os.FileMode) *Options {
opts.fileMode = fileMode
return opts
}
func (opts *Options) WithCompressionFormat(compressionFormat int) *Options {
opts.compressionFormat = compressionFormat
return opts
}
func (opts *Options) WithCompresionLevel(compressionLevel int) *Options {
opts.compressionLevel = compressionLevel
return opts
}
func (opts *Options) WithMetadata(metadata []byte) *Options {
opts.metadata = metadata
return opts
}
|
package main
import (
"fmt"
"log"
"os"
"github.com/square/p2/pkg/kp"
"github.com/square/p2/pkg/kp/flags"
"github.com/square/p2/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2"
"github.com/square/p2/pkg/version"
)
var (
nodeName = kingpin.Flag("node", "The node to do the scheduling on. Uses the hostname by default.").String()
watchReality = kingpin.Flag("reality", "Watch the reality store instead of the intent store. False by default").Default("false").Bool()
hooks = kingpin.Flag("hook", "Watch hooks.").Bool()
)
func main() {
kingpin.Version(version.VERSION)
_, opts := flags.ParseWithConsulOptions()
client := kp.NewConsulClient(opts)
store := kp.NewConsulStore(client)
if *nodeName == "" {
hostname, err := os.Hostname()
if err != nil {
log.Fatalf("Could not get the hostname to do scheduling: %s", err)
}
*nodeName = hostname
}
podPrefix := kp.INTENT_TREE
if *watchReality {
podPrefix = kp.REALITY_TREE
} else if *hooks {
podPrefix = kp.HOOK_TREE
}
log.Printf("Watching manifests at %s/%s/\n", podPrefix, *nodeName)
quit := make(chan struct{})
errChan := make(chan error)
podCh := make(chan []kp.ManifestResult)
go store.WatchPods(podPrefix, *nodeName, quit, errChan, podCh)
for {
select {
case results := <-podCh:
if len(results) == 0 {
fmt.Println(fmt.Sprintf("No manifests exist for %s under %s (they may have been deleted)", *nodeName, podPrefix))
} else {
for _, result := range results {
fmt.Println("")
result.Manifest.Write(os.Stdout)
}
}
case err := <-errChan:
log.Fatalf("Error occurred while listening to pods: %s", err)
}
}
}
|
// Copyright 2020 The Reed Developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
package discover
import (
"github.com/reed/common/byteutil/byteconv"
"github.com/reed/log"
"github.com/sirupsen/logrus"
"math/rand"
"sort"
"sync"
"time"
)
// References
// Kademlia: A Peer-to-peer Information System Based on the XOR Metric
// http://www.scs.stanford.edu/~dm/home/papers/kpos.pdf
const (
kBucketSize = 16
// The most important procedure a Kademlia participant must perform is to
// locate the k closest nodes to some given node ID. We call this procedure
// a node lookup. Kademlia employs a recursive algorithm for node lookups.
// The lookup initiator starts by picking α nodes remoteAddr its closest non-empty
// k-bucket (or, if that bucket has fewer than α entries, it just takes the
// α closest nodes it knows of). The initiator then sends parallel, asynchronous
// FIND NODE RPCs to the α nodes it has chosen.
alpha = 3
IDBits = len(NodeID{}) * 8
)
type Table struct {
mutex sync.Mutex
Bucket [IDBits][]*bNode
OurNode *Node
}
type bNode struct {
node *Node
lastConnAt time.Time
}
func NewTable(ourNode *Node) (*Table, error) {
t := &Table{
Bucket: [IDBits][]*bNode{},
OurNode: ourNode,
}
return t, nil
}
func (t *Table) getNodeAccurate(id NodeID) (bNode *bNode, dist int, index int) {
d := logarithmDist(t.OurNode.ID, id)
kbs := t.Bucket[d]
bn, idx := getNodeFromKbs(kbs, id)
if bn == nil {
return nil, 0, 0
}
return bn, d, idx
}
func (t *Table) delete(id NodeID) {
dist := logarithmDist(t.OurNode.ID, id)
for i, bn := range t.Bucket[dist] {
if bn.node.ID == id {
t.Bucket[dist] = append(t.Bucket[dist][:i], t.Bucket[dist][i+1:]...)
return
}
}
log.Logger.Info("delete node complete")
t.printLog()
}
func (t *Table) Add(n *Node) {
t.mutex.Lock()
defer t.mutex.Unlock()
if n == t.OurNode {
return
}
for _, b := range t.Bucket {
if contains(b, n.ID) {
log.Logger.Debug("node exists in table")
return
}
}
dist := logarithmDist(t.OurNode.ID, n.ID)
if len(t.Bucket[dist]) < kBucketSize {
t.Bucket[dist] = append(t.Bucket[dist], &bNode{node: n, lastConnAt: time.Now().UTC()})
}
log.Logger.WithFields(logrus.Fields{"ID": n.ID.ToString(), "IP": n.IP.String()}).Info("added node")
// TODO when len(kBucket) >= kBucketSize
// do something...
}
func (t *Table) updateConnTimeAndRemoveToLast(n *Node) {
t.mutex.Lock()
defer t.mutex.Unlock()
bn, dist, index := t.getNodeAccurate(n.ID)
if bn != nil {
bn.lastConnAt = time.Now().UTC()
if len(t.Bucket[dist]) != index+1 {
// move to last
copy(t.Bucket[dist][index:], t.Bucket[dist][index+1:])
t.Bucket[dist][len(t.Bucket[dist])-1] = bn
}
}
}
func (t *Table) closest(target NodeID) *nodesByDistance {
nd := &nodesByDistance{target: target}
for _, b := range t.Bucket {
for _, n := range b {
nd.push(n.node)
}
}
return nd
}
// chooseRandomNode choose the node who has not performed a node lookup within an hour.
func (t *Table) chooseRandomNode() *Node {
bt := time.Now().Add(-1 * time.Hour)
n := t.getRandomOne(func(bn *bNode) bool {
return bt.After(bn.lastConnAt)
})
if n == nil {
n = t.getRandomOne(func(bn *bNode) bool {
return true
})
}
return n
}
func (t *Table) getRandomOne(condition func(bn *bNode) bool) *Node {
var nodes []*Node
for _, b := range t.Bucket {
for _, n := range b {
if condition(n) {
nodes = append(nodes, n.node)
}
}
if len(nodes) >= kBucketSize {
break
}
}
if len(nodes) == 0 {
return nil
}
rd := rand.New(rand.NewSource(time.Now().UnixNano()))
return nodes[rd.Intn(len(nodes))]
}
func (t *Table) GetRandNodes(count int, excludePeerIDs []NodeID) []*Node {
exclude := func(id NodeID) bool {
if len(excludePeerIDs) == 0 {
return false
}
for _, eID := range excludePeerIDs {
if eID == id {
return true
}
}
return false
}
var nodes []*Node
for _, b := range t.Bucket {
for _, n := range b {
nodes = append(nodes, n.node)
}
}
if len(nodes) == 0 {
return nodes
}
// Shuffle
for i := uint32(len(nodes) - 1); i > 0; i-- {
j := randUint32(i)
nodes[i], nodes[j] = nodes[j], nodes[i]
}
var chooses []*Node
for _, n := range nodes {
if !exclude(n.ID) {
chooses = append(chooses, n)
if len(chooses) >= count {
break
}
}
}
return chooses
}
func (t *Table) printLog() {
for i, b := range t.Bucket {
if len(b) == 0 {
continue
}
log.Logger.Debugf("K-Bucket No:%d", i)
for _, bn := range b {
log.Logger.Debugf("---Addr:%s:%d ID:%s LastConnAt:%v", bn.node.IP, bn.node.UDPPort, bn.node.ID.ToString(), bn.lastConnAt)
}
}
}
type nodesByDistance struct {
entries []*Node
target NodeID
}
func (nd *nodesByDistance) push(n *Node) {
for _, entry := range nd.entries {
if entry.ID == n.ID {
return
}
}
ix := sort.Search(len(nd.entries), func(i int) bool {
return computeDist(nd.target, nd.entries[i].ID, n.ID) > 0
})
if len(nd.entries) < kBucketSize {
nd.entries = append(nd.entries, n)
}
if ix == len(nd.entries) {
// farther away than all nodes we already have.
// if there was room for it, the node is now the last element.
} else {
// slide existing entries down to make room
// this will overwrite the entry we just appended.
copy(nd.entries[ix+1:], nd.entries[ix:])
nd.entries[ix] = n
}
}
func computeDist(target, a, b NodeID) int {
for i := range target {
da := a[i] ^ target[i]
db := b[i] ^ target[i]
if da > db {
return 1
} else if da < db {
return -1
}
}
return 0
}
func getNodeFromKbs(ns []*bNode, id NodeID) (*bNode, int) {
if len(ns) == 0 {
return nil, 0
}
for i, bn := range ns {
if bn.node.ID == id {
return bn, i
}
}
return nil, 0
}
func contains(ns []*bNode, id NodeID) bool {
bNode, _ := getNodeFromKbs(ns, id)
return bNode != nil
}
func randUint32(max uint32) uint32 {
if max < 2 {
return 0
}
var b [4]byte
rand.Read(b[:])
return byteconv.ByteToUint32(b[:]) % max
}
// logarithmDist return distance between a and b
// return log2(a^b)
// k-bucket distance description
// 0 [2^0,2^1) 存放距离为1,且前255bit相同,第256bit开始不同(即前255bit为0)
// 1 [2^1,2^2) 存放距离为2~3,且前254bit相同,第255bit开始不同
// 2 [2^2,2^3) 存放距离为4~7,且前253bit相同,第254bit开始不同
// ...
// MEMO:
// ID长度为32Byte,256bit。
// 上面循环每一位,进行异或(^)操作,结果0表示相同,1表示不同
// 所以“前导0个数为255”表示有255个bit是相同的
func logarithmDist(a, b NodeID) int {
for i := range a {
x := a[i] ^ b[i]
if x != 0 {
lz := i*8 + lzcount[x] // 256bit leading zero counts
return IDBits - 1 - lz
}
}
return 0
}
// table of leading zero counts for bytes [0..255]
var lzcount = [256]int{
8, 7, 6, 6, 5, 5, 5, 5,
4, 4, 4, 4, 4, 4, 4, 4,
3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
}
|
package chanqueue
import (
"sync"
)
type CQueue struct {
sync.RWMutex
data []struct{} // Size should remain constant throughout.
out int // Where to pop.
in int // Where to push.
done bool
}
func NewCQueue() *CQueue {
return &CQueue{
data: make([]struct{}, 1000),
}
}
// Done marks queue as done.
func (q *CQueue) Done() {
q.Lock()
defer q.Unlock()
q.done = true
}
// Done checks if queue is done.
func (q *CQueue) IsDone() bool {
q.RLock()
defer q.RUnlock()
return q.done && (q.in == q.out)
}
func (q *CQueue) tryPush() bool {
q.Lock()
defer q.Unlock()
newIn := (q.in + 1) % len(q.data)
if newIn == q.out {
return false
}
// q.data[q.in] = new element.
q.in = newIn
return true
}
func (q *CQueue) Push() {
for !q.tryPush() {
}
}
func (q *CQueue) IsEmpty() bool {
q.RLock()
defer q.RUnlock()
return q.in == q.out
}
func (q *CQueue) tryPop() bool {
q.Lock()
defer q.Unlock()
if q.in == q.out {
// Queue is empty.
return false
}
// Element to return is q.data[q.out].
q.out = (q.out + 1) % len(q.data)
return true
}
// Pop returns an item.
func (q *CQueue) Pop() {
for !q.tryPop() {
}
}
|
package main
import "fmt"
func main() {
// 1、以下代码正确的输出是什么?
var fn1 = func() {
}
var fn2 = func() {
}
// if fn1 != fn2 {
// println("fn1 not equal fn2")
// }
// 答案: 编译错误, 函数不能比较, 函数只能与nil比较
if fn1 != nil {
println("fn1 not equal nil")
}
if fn2 != nil {
println("fn2 not equal nil")
}
// 以上两个可以正常输出
// 2 、下面的代码输出什么?
type T struct {
n int
}
// m := make(map[int]T)
// m[0].n = 1
// fmt.Println(m[0].n)
// 编译错误:cannot assign to struct field m[0].n in map
// 修改后的代码:
m := make(map[int]T)
t := T{1}
m[0] = t
fmt.Println(m[0].n)
}
|
package main
import (
"bytes"
"encoding/hex"
"log"
"github.com/syndtr/goleveldb/leveldb"
)
//Blockchain Variable
const (
BlockChainLast = "tip"
BlockchainFile = "lubit.db.block"
)
// BlockChain struct
type BlockChain struct {
tip []byte
lvl *leveldb.DB
}
// NewBlockChain return a new block chain
func NewBlockChain() *BlockChain {
lvl, err := leveldb.OpenFile(BlockchainFile, nil)
if err != nil {
log.Panic(err)
}
tip, err := lvl.Get([]byte(BlockChainLast), nil)
if err != nil {
// empty chain
if leveldb.ErrNotFound == err {
block := NewGenesisBlock()
tip = block.CurrHash
lvl.Put(block.CurrHash, block.Serialize(), nil)
lvl.Put([]byte(BlockChainLast), block.CurrHash, nil)
} else {
log.Panic("LevelDB get tip failed")
}
}
chain := &BlockChain{
tip: tip,
lvl: lvl,
}
return chain
}
// NewGenesisBlock for blockchain
func NewGenesisBlock() *Block {
tx := NewGenesisTransaction()
block := NewBlock("lubitcoin genesis block", nil, []Transaction{*tx})
return block
}
// AddBlock chain add new block
func (chain *BlockChain) AddBlock(block *Block) {
block.PrevHash = chain.tip
chain.lvl.Put(block.CurrHash, block.Serialize(), nil)
chain.lvl.Put([]byte(BlockChainLast), block.CurrHash, nil)
chain.tip = block.CurrHash
}
// ListBlocks list&dump block
func (chain *BlockChain) ListBlocks() {
iter := chain.tip
for {
if iter == nil {
return
}
enc, _ := chain.lvl.Get(iter, nil)
block := DeserializeBlock(enc)
block.Dump()
if block.PrevHash == nil {
return
}
iter = block.PrevHash
}
}
// FindUTXOByAddress iterate address amount
func (chain *BlockChain) FindUTXOByAddress(addr []byte, amount int) (map[string]int, int, error) {
balance := 0
UTXO := make(map[string]int) // Unspent Transaction Output
STXI := make(map[string]bool) // spent transaction input
iter := chain.tip
for {
enc, _ := chain.lvl.Get([]byte(iter), nil)
block := DeserializeBlock(enc)
txs := block.Transactions
for _, tx := range txs {
txid := hex.EncodeToString(tx.TXID)
if (amount != -1) && (balance >= amount) {
break
}
// TXOUT : check previous inputs
if _, exist := STXI[txid]; exist {
log.Println("STXI exist", STXI, txid)
} else {
for _, out := range tx.TXOutputs {
// check address
if !bytes.Equal([]byte(out.Address), addr) {
continue
}
UTXO[txid] = out.Amount
balance += out.Amount
}
}
// TXINPUT
for _, in := range tx.TXInputs {
id := hex.EncodeToString(in.TXID)
if false == bytes.Equal([]byte(in.Address), addr) {
continue
} else {
id = hex.EncodeToString(in.TXID)
STXI[id] = true
}
}
}
// genis block break
if block.PrevHash == nil {
log.Println("genesis block arrived")
break
}
iter = block.PrevHash
}
return UTXO, balance, nil
}
// FindUTXO return all the UTXO
func (chain *BlockChain) FindUTXO() map[string][]TXOutput {
UTXO := make(map[string][]TXOutput)
STXI := make(map[string][]string)
iter := chain.tip
for {
enc, err := chain.lvl.Get([]byte(iter), nil)
if err != nil {
log.Panic(err)
}
block := DeserializeBlock(enc)
for _, tx := range block.Transactions {
txid := hex.EncodeToString(tx.TXID)
// UTXO collect
for _, out := range tx.TXOutputs {
exist := false
if STXI[txid] != nil {
for _, addr := range STXI[txid] {
if addr == out.Address {
exist = true
}
}
}
if !exist {
UTXO[txid] = append(UTXO[txid], out)
log.Printf("UTXO: %+v \n", UTXO)
}
}
// STXI
for _, in := range tx.TXInputs {
id := hex.EncodeToString(in.TXID)
STXI[id] = append(STXI[id], in.Address)
}
}
if block.PrevHash == nil {
break
} else {
iter = block.PrevHash
}
}
return UTXO
}
func (chain *BlockChain) GenerateUTXO() {
}
|
package main
import (
"flag"
"io"
_ "github.com/go-sql-driver/mysql"
"net/http"
)
func silentHandle(w http.ResponseWriter, req *http.Request) {
liveid := req.FormValue("liveid")
userid := req.FormValue("userid")
if len(liveid) < 6 {
if len(userid) < 3 {
io.WriteString(w, "parameter error!\n")
return
}
}
live, ok := live_map[liveid]
if ok == false {
io.WriteString(w, "no liveid!\n")
return
}
live_map[liveid].silentusers = append(live_map[liveid].silentusers, userid)
for _, conn := range live.connections {
if conn.userid == userid {
conn.silent = true
}
}
io.WriteString(w, "ok!\n")
}
func main() {
flag.Parse()
live_map = make(map[string]*Live)
initLogger()
//initRedis()
/* start timer */
go timer()
h := newMainhub()
go h.run()
go check_persons()
/* start unix server */
//go unixServer()
http.Handle("/wspage/", http.FileServer(http.Dir("./js")))
http.Handle("/", http.StripPrefix("/", http.FileServer(http.Dir("./js"))))
http.Handle("/ws", wsHandler{h: h})
// http.Handle("/testapi/", testapiHandler{})
//file upload
http.HandleFunc("/image", uploadHandle)
http.HandleFunc("/silent", silentHandle)
if err := http.ListenAndServe(*addr, nil); err != nil {
logger.Println("wschat ListenAndServe:", err)
}
}
|
package languages
const (
// LanguagesGetLanguages is a string representation of the current endpoint for getting languages
LanguagesGetLanguages = "v1/metadata/getLanguages"
)
// Language is a struct containing matching data for a language found in text
type Language struct {
// Name - the language identified
Name string `json:"name"`
// Confidence - a float value from 0.0 to 1.0 of our trust in the result
Confidence float32 `json:"confidence"`
}
|
package main
import (
"code.google.com/p/go.net/websocket"
"log"
)
type Msg struct {
Route string `json:"route"`
Data map[string]string `json:"data"`
}
func (m *Msg) Send(ws *websocket.Conn) {
if err := websocket.JSON.Send(ws, &m); err != nil {
log.Println("send err", err)
}
}
|
package main
import (
"fmt"
"github.com/sunmi-OS/gocore/utils"
)
func main() {
var urls string
urls = "https://www.sunmi.com/"
e, err := utils.UrlEncode(urls)
if err != nil {
fmt.Println("UrlEncode failed error", err)
}
fmt.Println("UrlEncode", e)
r, err := utils.UrlDecode(urls)
if err != nil {
fmt.Println("UrlDecode failed error", err)
}
fmt.Println("UrlDecode", r)
}
|
package commands
import (
"crypto/ecdsa"
"crypto/ed25519"
"crypto/rsa"
"crypto/x509"
"fmt"
"strings"
"time"
"github.com/spf13/cobra"
"github.com/authelia/authelia/v4/internal/utils"
)
func newCryptoCmd(ctx *CmdCtx) (cmd *cobra.Command) {
cmd = &cobra.Command{
Use: cmdUseCrypto,
Short: cmdAutheliaCryptoShort,
Long: cmdAutheliaCryptoLong,
Example: cmdAutheliaCryptoExample,
Args: cobra.NoArgs,
DisableAutoGenTag: true,
}
cmd.AddCommand(
newCryptoRandCmd(ctx),
newCryptoCertificateCmd(ctx),
newCryptoHashCmd(ctx),
newCryptoPairCmd(ctx),
)
return cmd
}
func newCryptoRandCmd(ctx *CmdCtx) (cmd *cobra.Command) {
cmd = &cobra.Command{
Use: cmdUseRand,
Short: cmdAutheliaCryptoRandShort,
Long: cmdAutheliaCryptoRandLong,
Example: cmdAutheliaCryptoRandExample,
Args: cobra.NoArgs,
RunE: ctx.CryptoRandRunE,
DisableAutoGenTag: true,
}
cmd.Flags().StringP(cmdFlagNameCharSet, "x", cmdFlagValueCharSet, cmdFlagUsageCharset)
cmd.Flags().String(cmdFlagNameCharacters, "", cmdFlagUsageCharacters)
cmd.Flags().IntP(cmdFlagNameLength, "n", 72, cmdFlagUsageLength)
return cmd
}
func newCryptoCertificateCmd(ctx *CmdCtx) (cmd *cobra.Command) {
cmd = &cobra.Command{
Use: cmdUseCertificate,
Short: cmdAutheliaCryptoCertificateShort,
Long: cmdAutheliaCryptoCertificateLong,
Example: cmdAutheliaCryptoCertificateExample,
Args: cobra.NoArgs,
DisableAutoGenTag: true,
}
cmd.AddCommand(
newCryptoCertificateSubCmd(ctx, cmdUseRSA),
newCryptoCertificateSubCmd(ctx, cmdUseECDSA),
newCryptoCertificateSubCmd(ctx, cmdUseEd25519),
)
return cmd
}
func newCryptoCertificateSubCmd(ctx *CmdCtx, use string) (cmd *cobra.Command) {
useFmt := fmtCryptoCertificateUse(use)
cmd = &cobra.Command{
Use: use,
Short: fmt.Sprintf(fmtCmdAutheliaCryptoCertificateSubShort, useFmt),
Long: fmt.Sprintf(fmtCmdAutheliaCryptoCertificateSubLong, useFmt, useFmt),
Example: fmt.Sprintf(fmtCmdAutheliaCryptoCertificateSubExample, use),
Args: cobra.NoArgs,
DisableAutoGenTag: true,
}
cmd.AddCommand(newCryptoGenerateCmd(ctx, cmdUseCertificate, use), newCryptoCertificateRequestCmd(ctx, use))
return cmd
}
func newCryptoCertificateRequestCmd(ctx *CmdCtx, algorithm string) (cmd *cobra.Command) {
cmd = &cobra.Command{
Use: cmdUseRequest,
Args: cobra.NoArgs,
RunE: ctx.CryptoCertificateRequestRunE,
DisableAutoGenTag: true,
}
cmdFlagsCryptoPrivateKey(cmd)
cmdFlagsCryptoCertificateCommon(cmd)
cmdFlagsCryptoCertificateRequest(cmd)
algorithmFmt := fmtCryptoCertificateUse(algorithm)
cmd.Short = fmt.Sprintf(fmtCmdAutheliaCryptoCertificateGenerateRequestShort, algorithmFmt, cryptoCertCSROut)
cmd.Long = fmt.Sprintf(fmtCmdAutheliaCryptoCertificateGenerateRequestLong, algorithmFmt, cryptoCertCSROut, algorithmFmt, cryptoCertCSROut)
switch algorithm {
case cmdUseRSA:
cmd.Example = cmdAutheliaCryptoCertificateRSARequestExample
cmdFlagsCryptoPrivateKeyRSA(cmd)
case cmdUseECDSA:
cmd.Example = cmdAutheliaCryptoCertificateECDSARequestExample
cmdFlagsCryptoPrivateKeyECDSA(cmd)
case cmdUseEd25519:
cmd.Example = cmdAutheliaCryptoCertificateEd25519RequestExample
cmdFlagsCryptoPrivateKeyEd25519(cmd)
}
return cmd
}
func newCryptoPairCmd(ctx *CmdCtx) (cmd *cobra.Command) {
cmd = &cobra.Command{
Use: cmdUsePair,
Short: cmdAutheliaCryptoPairShort,
Long: cmdAutheliaCryptoPairLong,
Example: cmdAutheliaCryptoPairExample,
Args: cobra.NoArgs,
DisableAutoGenTag: true,
}
cmd.AddCommand(
newCryptoPairSubCmd(ctx, cmdUseRSA),
newCryptoPairSubCmd(ctx, cmdUseECDSA),
newCryptoPairSubCmd(ctx, cmdUseEd25519),
)
return cmd
}
func newCryptoPairSubCmd(ctx *CmdCtx, use string) (cmd *cobra.Command) {
var (
example, useFmt string
)
useFmt = fmtCryptoCertificateUse(use)
switch use {
case cmdUseRSA:
example = cmdAutheliaCryptoPairRSAExample
case cmdUseECDSA:
example = cmdAutheliaCryptoPairECDSAExample
case cmdUseEd25519:
example = cmdAutheliaCryptoPairEd25519Example
}
cmd = &cobra.Command{
Use: use,
Short: fmt.Sprintf(cmdAutheliaCryptoPairSubShort, useFmt),
Long: fmt.Sprintf(cmdAutheliaCryptoPairSubLong, useFmt, useFmt),
Example: example,
Args: cobra.NoArgs,
RunE: ctx.CryptoGenerateRunE,
DisableAutoGenTag: true,
}
cmd.AddCommand(newCryptoGenerateCmd(ctx, cmdUsePair, use))
return cmd
}
func newCryptoGenerateCmd(ctx *CmdCtx, category, algorithm string) (cmd *cobra.Command) {
cmd = &cobra.Command{
Use: cmdUseGenerate,
Args: cobra.NoArgs,
RunE: ctx.CryptoGenerateRunE,
DisableAutoGenTag: true,
}
cmdFlagsCryptoPrivateKey(cmd)
algorithmFmt := fmtCryptoCertificateUse(algorithm)
switch category {
case cmdUseCertificate:
cmdFlagsCryptoCertificateCommon(cmd)
cmdFlagsCryptoCertificateGenerate(cmd)
cmd.Short = fmt.Sprintf(fmtCmdAutheliaCryptoCertificateGenerateRequestShort, algorithmFmt, cryptoCertPubCertOut)
cmd.Long = fmt.Sprintf(fmtCmdAutheliaCryptoCertificateGenerateRequestLong, algorithmFmt, cryptoCertPubCertOut, algorithmFmt, cryptoCertPubCertOut)
switch algorithm {
case cmdUseRSA:
cmd.Example = cmdAutheliaCryptoCertificateRSAGenerateExample
cmdFlagsCryptoPrivateKeyRSA(cmd)
case cmdUseECDSA:
cmd.Example = cmdAutheliaCryptoCertificateECDSAGenerateExample
cmdFlagsCryptoPrivateKeyECDSA(cmd)
case cmdUseEd25519:
cmd.Example = cmdAutheliaCryptoCertificateEd25519GenerateExample
cmdFlagsCryptoPrivateKeyEd25519(cmd)
}
case cmdUsePair:
cmdFlagsCryptoPairGenerate(cmd)
cmd.Short = fmt.Sprintf(fmtCmdAutheliaCryptoPairGenerateShort, algorithmFmt)
cmd.Long = fmt.Sprintf(fmtCmdAutheliaCryptoPairGenerateLong, algorithmFmt, algorithmFmt)
switch algorithm {
case cmdUseRSA:
cmd.Example = cmdAutheliaCryptoPairRSAGenerateExample
cmdFlagsCryptoPrivateKeyRSA(cmd)
case cmdUseECDSA:
cmd.Example = cmdAutheliaCryptoPairECDSAGenerateExample
cmdFlagsCryptoPrivateKeyECDSA(cmd)
case cmdUseEd25519:
cmd.Example = cmdAutheliaCryptoPairEd25519GenerateExample
cmdFlagsCryptoPrivateKeyEd25519(cmd)
}
}
return cmd
}
// CryptoRandRunE is the RunE for the authelia crypto rand command.
func (ctx *CmdCtx) CryptoRandRunE(cmd *cobra.Command, args []string) (err error) {
var (
random string
)
if random, err = flagsGetRandomCharacters(cmd.Flags(), cmdFlagNameLength, cmdFlagNameCharSet, cmdFlagNameCharacters); err != nil {
return err
}
fmt.Printf("Random Value: %s\n", random)
return nil
}
// CryptoGenerateRunE is the RunE for the authelia crypto [pair|certificate] [rsa|ecdsa|ed25519] commands.
func (ctx *CmdCtx) CryptoGenerateRunE(cmd *cobra.Command, args []string) (err error) {
var (
privateKey any
)
if privateKey, err = ctx.cryptoGenPrivateKeyFromCmd(cmd); err != nil {
return err
}
if cmd.Parent().Parent().Use == cmdUseCertificate {
return ctx.CryptoCertificateGenerateRunE(cmd, args, privateKey)
}
return ctx.CryptoPairGenerateRunE(cmd, args, privateKey)
}
// CryptoCertificateRequestRunE is the RunE for the authelia crypto certificate request command.
func (ctx *CmdCtx) CryptoCertificateRequestRunE(cmd *cobra.Command, _ []string) (err error) {
var (
template *x509.CertificateRequest
privateKey any
csr []byte
privateKeyPath, csrPath string
pkcs8 bool
)
if privateKey, err = ctx.cryptoGenPrivateKeyFromCmd(cmd); err != nil {
return err
}
if pkcs8, err = cmd.Flags().GetBool(cmdFlagNamePKCS8); err != nil {
return err
}
if template, err = cryptoGetCSRFromCmd(cmd); err != nil {
return err
}
b := strings.Builder{}
b.WriteString("Generating Certificate Request\n\n")
b.WriteString("Subject:\n")
b.WriteString(fmt.Sprintf("\tCommon Name: %s, Organization: %s, Organizational Unit: %s\n", template.Subject.CommonName, template.Subject.Organization, template.Subject.OrganizationalUnit))
b.WriteString(fmt.Sprintf("\tCountry: %v, Province: %v, Street Address: %v, Postal Code: %v, Locality: %v\n\n", template.Subject.Country, template.Subject.Province, template.Subject.StreetAddress, template.Subject.PostalCode, template.Subject.Locality))
b.WriteString("Properties:\n")
b.WriteString(fmt.Sprintf("\tSignature Algorithm: %s, Public Key Algorithm: %s", template.SignatureAlgorithm, template.PublicKeyAlgorithm))
switch k := privateKey.(type) {
case *rsa.PrivateKey:
b.WriteString(fmt.Sprintf(", Bits: %d", k.N.BitLen()))
case *ecdsa.PrivateKey:
b.WriteString(fmt.Sprintf(", Elliptic Curve: %s", k.Curve.Params().Name))
}
b.WriteString(fmt.Sprintf("\n\tSubject Alternative Names: %s\n\n", strings.Join(cryptoSANsToString(template.DNSNames, template.IPAddresses), ", ")))
if _, privateKeyPath, csrPath, err = cryptoGetWritePathsFromCmd(cmd); err != nil {
return err
}
b.WriteString("Output Paths:\n")
b.WriteString(fmt.Sprintf("\tPrivate Key: %s\n", privateKeyPath))
b.WriteString(fmt.Sprintf("\tCertificate Request: %s\n\n", csrPath))
fmt.Print(b.String())
b.Reset()
if csr, err = x509.CreateCertificateRequest(ctx.providers.Random, template, privateKey); err != nil {
return fmt.Errorf("failed to create certificate request: %w", err)
}
if err = utils.WriteKeyToPEM(privateKey, privateKeyPath, pkcs8); err != nil {
return err
}
if err = utils.WriteCertificateBytesAsPEMToPath(csrPath, true, csr); err != nil {
return err
}
return nil
}
// CryptoCertificateGenerateRunE is the RunE for the authelia crypto certificate [rsa|ecdsa|ed25519] commands.
func (ctx *CmdCtx) CryptoCertificateGenerateRunE(cmd *cobra.Command, _ []string, privateKey any) (err error) {
var (
template, caCertificate, parent *x509.Certificate
publicKey, caPrivateKey, signatureKey any
pkcs8 bool
)
if pkcs8, err = cmd.Flags().GetBool(cmdFlagNamePKCS8); err != nil {
return err
}
if publicKey = utils.PublicKeyFromPrivateKey(privateKey); publicKey == nil {
return fmt.Errorf("failed to obtain public key from private key")
}
if caPrivateKey, caCertificate, err = cryptoGetCAFromCmd(cmd); err != nil {
return err
}
signatureKey = privateKey
if caPrivateKey != nil {
signatureKey = caPrivateKey
}
if template, err = ctx.cryptoGetCertificateFromCmd(cmd); err != nil {
return err
}
b := &strings.Builder{}
b.WriteString("Generating Certificate\n\n")
b.WriteString(fmt.Sprintf("\tSerial: %x\n\n", template.SerialNumber))
switch caCertificate {
case nil:
parent = template
b.WriteString("Signed By:\n\tSelf-Signed\n")
default:
parent = caCertificate
b.WriteString(fmt.Sprintf("Signed By:\n\t%s\n", caCertificate.Subject.CommonName))
b.WriteString(fmt.Sprintf("\tSerial: %x, Expires: %s\n", caCertificate.SerialNumber, caCertificate.NotAfter.Format(time.RFC3339)))
}
b.WriteString("\nSubject:\n")
b.WriteString(fmt.Sprintf("\tCommon Name: %s, Organization: %s, Organizational Unit: %s\n", template.Subject.CommonName, template.Subject.Organization, template.Subject.OrganizationalUnit))
b.WriteString(fmt.Sprintf("\tCountry: %v, Province: %v, Street Address: %v, Postal Code: %v, Locality: %v\n\n", template.Subject.Country, template.Subject.Province, template.Subject.StreetAddress, template.Subject.PostalCode, template.Subject.Locality))
b.WriteString("Properties:\n")
b.WriteString(fmt.Sprintf("\tNot Before: %s, Not After: %s\n", template.NotBefore.Format(time.RFC3339), template.NotAfter.Format(time.RFC3339)))
b.WriteString(fmt.Sprintf("\tCA: %v, CSR: %v, Signature Algorithm: %s, Public Key Algorithm: %s", template.IsCA, false, template.SignatureAlgorithm, template.PublicKeyAlgorithm))
switch k := privateKey.(type) {
case *rsa.PrivateKey:
b.WriteString(fmt.Sprintf(", Bits: %d", k.N.BitLen()))
case *ecdsa.PrivateKey:
b.WriteString(fmt.Sprintf(", Elliptic Curve: %s", k.Curve.Params().Name))
}
b.WriteString(fmt.Sprintf("\n\tSubject Alternative Names: %s\n\n", strings.Join(cryptoSANsToString(template.DNSNames, template.IPAddresses), ", ")))
var (
dir, privateKeyPath, certificatePath string
certificate []byte
)
if dir, privateKeyPath, certificatePath, err = cryptoGetWritePathsFromCmd(cmd); err != nil {
return err
}
b.WriteString("Output Paths:\n")
b.WriteString(fmt.Sprintf("\tPrivate Key: %s\n", privateKeyPath))
b.WriteString(fmt.Sprintf("\tCertificate: %s\n", certificatePath))
if certificate, err = x509.CreateCertificate(ctx.providers.Random, template, parent, publicKey, signatureKey); err != nil {
return fmt.Errorf("failed to create certificate: %w", err)
}
if err = utils.WriteKeyToPEM(privateKey, privateKeyPath, pkcs8); err != nil {
return err
}
if err = utils.WriteCertificateBytesAsPEMToPath(certificatePath, false, certificate); err != nil {
return err
}
if cmd.Flags().Changed(cmdFlagNameBundles) {
if err = cryptoGenerateCertificateBundlesFromCmd(cmd, b, dir, caCertificate, certificate, privateKey); err != nil {
return err
}
}
b.WriteString("\n")
fmt.Print(b.String())
b.Reset()
return nil
}
// CryptoPairGenerateRunE is the RunE for the authelia crypto pair [rsa|ecdsa|ed25519] commands.
func (ctx *CmdCtx) CryptoPairGenerateRunE(cmd *cobra.Command, _ []string, privateKey any) (err error) {
var (
privateKeyPath, publicKeyPath string
pkcs8 bool
)
if pkcs8, err = cmd.Flags().GetBool(cmdFlagNamePKCS8); err != nil {
return err
}
if _, privateKeyPath, publicKeyPath, err = cryptoGetWritePathsFromCmd(cmd); err != nil {
return err
}
b := strings.Builder{}
b.WriteString("Generating key pair\n\n")
switch k := privateKey.(type) {
case *rsa.PrivateKey:
b.WriteString(fmt.Sprintf("\tAlgorithm: RSA-%d %d bits\n\n", k.Size(), k.N.BitLen()))
case *ecdsa.PrivateKey:
b.WriteString(fmt.Sprintf("\tAlgorithm: ECDSA Curve %s\n\n", k.Curve.Params().Name))
case ed25519.PrivateKey:
b.WriteString("\tAlgorithm: Ed25519\n\n")
}
b.WriteString("Output Paths:\n")
b.WriteString(fmt.Sprintf("\tPrivate Key: %s\n", privateKeyPath))
b.WriteString(fmt.Sprintf("\tPublic Key: %s\n\n", publicKeyPath))
fmt.Print(b.String())
b.Reset()
if err = utils.WriteKeyToPEM(privateKey, privateKeyPath, pkcs8); err != nil {
return err
}
var publicKey any
if publicKey = utils.PublicKeyFromPrivateKey(privateKey); publicKey == nil {
return fmt.Errorf("failed to obtain public key from private key")
}
if err = utils.WriteKeyToPEM(publicKey, publicKeyPath, pkcs8); err != nil {
return err
}
return nil
}
|
package gui
import "tetra/lib/glman"
// TestPane is a pane for testing
type TestPane struct {
Pane
btn IButton
}
// Init a new object
func (pn *TestPane) Init() {
pn.btn = NewButton()
pn.btn.SetFont(glman.LoadFont("WQY-ZenHei", 20))
pn.Insert(-1, pn.btn)
}
// State to string
func (pn *TestPane) State() ([]byte, error) {
return []byte(pn.btn.Text()), nil
}
// SetState from string
func (pn *TestPane) SetState(data []byte) error {
pn.btn.SetText(string(data))
return nil
}
|
package hot100
// 关键:
// 判断是否需要额外加一即可
func plusOne(digits []int) []int {
if len(digits) == 0 {
return nil
}
for i := len(digits) - 1; i >= 0; i-- {
digits[i]++
digits[i] %= 10
if digits[i]!=0{
return digits
}
}
// 当运行到这里的时候,表明之前的数字都是 99999
ret:=make([]int, len(digits)+1)
ret[0]=1
return ret
}
|
package storage
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
)
type S3Storage struct {
Bucket string
}
func NewS3Storage(bucket string) S3Storage {
return S3Storage{
Bucket: bucket,
}
}
func (f S3Storage) Store(r Repository) error {
sess, err := session.NewSessionWithOptions(session.Options{
Profile: os.Getenv("AWS_PROFILE"),
})
if err != nil {
return err
}
svc := s3.New(sess)
if len(r.Plugins) == 0 {
params := &s3.DeleteObjectInput{
Bucket: aws.String(f.Bucket),
Key: aws.String(fmt.Sprintf("%s.json", r.ID)),
}
_, err := svc.DeleteObject(params)
return err
}
bs, err := json.MarshalIndent(&r, "", "\t")
if err != nil {
return err
}
params := &s3.PutObjectInput{
Bucket: aws.String(f.Bucket),
Key: aws.String(fmt.Sprintf("%s.json", r.ID)),
Body: bytes.NewReader(bs),
}
_, err = svc.PutObject(params)
return err
}
func (f S3Storage) Load() ([]Repository, error) {
sess, err := session.NewSessionWithOptions(session.Options{
Profile: os.Getenv("AWS_PROFILE"),
})
if err != nil {
return nil, fmt.Errorf("Unable to get S3 session: %q", err)
}
svc := s3.New(sess)
params := &s3.ListObjectsInput{
Bucket: aws.String(f.Bucket),
}
resp, err := svc.ListObjects(params)
if err != nil {
return nil, fmt.Errorf("Unable to list objects: %q", err)
}
var repos []Repository
for _, o := range resp.Contents {
params := &s3.GetObjectInput{
Bucket: aws.String(f.Bucket),
Key: aws.String(*o.Key),
}
resp, err := svc.GetObject(params)
if err != nil {
return nil, err
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var r Repository
json.Unmarshal(b, &r)
repos = append(repos, r)
}
return repos, nil
}
|
package main
import (
"github.com/micro/go-micro/client"
"github.com/plexmediamanager/micro-redis/proto"
"github.com/plexmediamanager/micro-redis/redis"
"github.com/plexmediamanager/micro-redis/resolver"
"github.com/plexmediamanager/service"
"github.com/plexmediamanager/service/log"
"time"
)
func main() {
application := service.CreateApplication()
redisClient, err := redis.Initialize().Connect()
if err != nil {
log.Panic(err)
}
err = application.InitializeConfiguration()
if err != nil {
log.Panic(err)
}
err = application.InitializeMicroService()
if err != nil {
log.Panic(err)
}
err = application.Service().Client().Init(
client.PoolSize(10),
client.Retries(30),
client.RequestTimeout(1 * time.Second),
)
if err != nil {
log.Panic(err)
}
err = proto.RegisterRedisServiceHandler(application.Service().Server(), resolver.RedisService{ Redis: redisClient })
if err != nil {
log.Panic(err)
}
go application.StartMicroService()
service.WaitForOSSignal(1)
err = redisClient.Disconnect()
if err != nil {
log.Printf("Connection to Redis was closed with the following error: %v", err)
}
}
|
package cmd_test
import (
"testing"
"opendev.org/airship/airshipctl/cmd"
"opendev.org/airship/airshipctl/testutil"
)
func TestVersion(t *testing.T) {
versionCmd := cmd.NewVersionCommand()
cmdTests := []*testutil.CmdTest{
{
Name: "version",
CmdLine: "",
Cmd: versionCmd,
},
{
Name: "version-help",
CmdLine: "--help",
Cmd: versionCmd,
},
}
for _, tt := range cmdTests {
testutil.RunTest(t, tt)
}
}
|
package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/etcd-io/etcd/clientv3"
"github.com/gin-gonic/gin"
)
type node struct {
Id string `json:"node_id"`
TcpAddr string `json:"tcp_addr"`
HttpAddr string `json:"http_addr"`
Weight string `json:"weight"`
JoinTime string `json:"join_time"`
}
type topicData struct {
Name string `json:"name"`
PopNum int64 `json:"pop_num"`
PushNum int64 `json:"push_num"`
BucketNum int `json:"bucket_num"`
DeadNum int `json:"dead_num"`
StartTime string `json:"start_time"`
}
type respStruct struct {
Code int `json:"code"`
Data interface{} `json:"data"`
Msg string `json:"msg"`
}
type msg struct {
Topic string `json:"topic"`
Body string `json:"body"`
Delay int `json:"delay"`
routeKey string `json:"route_key"`
}
var webAddr string
var etcdCli *clientv3.Client
var registerAddr string
func main() {
// parse command options
var endpoints string
flag.StringVar(&endpoints, "ectd_endpoints", "127.0.0.1:2379", "etcd endpoints")
flag.StringVar(&webAddr, "web_addr", ":8080", "the address of gmq-web")
flag.Parse()
// connect to etcd
ectdEndpoints := strings.Split(endpoints, ",")
err := connectToEtcd(ectdEndpoints)
if err != nil {
log.Fatalf("connect to etcd failed, %s", err)
}
// run gin
var ctx context.Context
ctx = context.Background()
ctx, cancel := context.WithCancel(ctx)
run(ctx, cancel)
}
func connectToEtcd(endpoints []string) error {
cli, err := clientv3.New(clientv3.Config{
Endpoints: endpoints,
DialTimeout: 3 * time.Second,
})
if err != nil {
return fmt.Errorf("can't create etcd client.")
}
etcdCli = cli
return nil
}
func run(ctx context.Context, cancel context.CancelFunc) {
// gin.SetMode(gin.ReleaseMode)
r := gin.New()
r.StaticFS("/static", http.Dir("static"))
r.LoadHTMLGlob("views/*")
r.GET("/", index)
r.GET("/login", login)
r.GET("/home", home)
// 节点管理
r.GET("/nodeList", nodeList)
r.GET("/addNode", addNode)
r.GET("/getNodes", getNodes)
r.POST("/registerNode", registerNode)
r.GET("/unRegisterNode", unRegisterNode)
r.POST("/editNodeWeight", editNodeWeight)
// 主题topic管理
r.GET("/topicList", topicList)
r.GET("/removeTopic", removeTopic)
r.GET("/getTopics", getTopics)
r.GET("/setIsAutoAck", setIsAutoAck)
// 消息管理
r.GET("/msgDemo", msgDemo)
r.GET("/declare", declareQueue)
r.POST("/push", push)
r.GET("/pop", pop)
r.GET("/ack", ack)
// r.GET("/dead", dead)
// r.GET("/mpush", mpush)
serv := &http.Server{
Addr: webAddr,
Handler: r,
}
go func() {
<-ctx.Done()
if err := serv.Shutdown(ctx); err != nil {
log.Fatalln("web exit:", err)
}
log.Println("web exist")
}()
if err := serv.ListenAndServe(); err != nil {
return
}
}
// 首页
func index(c *gin.Context) {
c.HTML(http.StatusOK, "entry.html", gin.H{
"siteName": "gmq-web管理",
"version": "v3.0",
"loginUserName": "wuzhc",
})
}
// 主页
func home(c *gin.Context) {
c.HTML(http.StatusOK, "home.html", gin.H{
"title": "主页",
})
}
// 登录页
func login(c *gin.Context) {
c.HTML(http.StatusOK, "login.html", gin.H{
"title": "登录页面",
})
}
// 节点管理页面
func nodeList(c *gin.Context) {
c.HTML(http.StatusOK, "node_list.html", gin.H{
"title": "节点管理",
})
}
// 注册节点页面
func addNode(c *gin.Context) {
c.HTML(http.StatusOK, "add_node.html", gin.H{
"title": "注册节点",
})
}
// topic列表
func topicList(c *gin.Context) {
nodes, err := _getNodes()
if err != nil {
c.HTML(http.StatusBadGateway, "error.html", gin.H{
"error": err,
})
return
}
c.HTML(http.StatusOK, "topic_list.html", gin.H{
"title": "topic管理",
"nodes": nodes,
})
}
// 获取正在运行的topic统计信息
func getTopics(c *gin.Context) {
addr := c.Query("addr")
if len(addr) == 0 {
c.JSON(http.StatusBadRequest, rspErr("please select a node."))
return
}
gmqApi("get", "http://"+addr+"/getAllTopicStat", nil, c)
}
// 删除topic
func removeTopic(c *gin.Context) {
addr := c.Query("addr")
topic := c.Query("topic")
if len(addr) == 0 {
c.JSON(http.StatusBadRequest, rspErr("please select a node."))
return
}
if len(topic) == 0 {
c.JSON(http.StatusBadRequest, rspErr("topic is empty."))
return
}
v := url.Values{}
v.Set("topic", topic)
gmqApi("get", "http://"+addr+"/exitTopic", v, c)
}
func setIsAutoAck(c *gin.Context) {
addr := c.Query("addr")
topic := c.Query("topic")
if len(addr) == 0 {
c.JSON(http.StatusBadRequest, rspErr("please select a node."))
return
}
if len(topic) == 0 {
c.JSON(http.StatusBadRequest, rspErr("topic is empty."))
return
}
v := url.Values{}
v.Set("topic", topic)
gmqApi("get", "http://"+addr+"/setIsAutoAck", v, c)
}
// 获取注册中心所有注册节点
func getNodes(c *gin.Context) {
nodes, err := _getNodes()
if err != nil {
c.JSON(http.StatusBadGateway, err)
return
}
var rspData respStruct
rspData.Data = nodes
c.JSON(http.StatusOK, rspData)
}
// 注销节点
func unRegisterNode(c *gin.Context) {
addr := c.Query("addr")
if len(addr) == 0 {
c.JSON(http.StatusBadRequest, rspErr("addr is empty"))
return
}
nodes, err := _getNodes()
if err != nil {
c.JSON(http.StatusBadGateway, rspErr(err.Error()))
return
}
var nodeKey string
for k, n := range nodes {
if n.TcpAddr == addr {
nodeKey = k
break
}
}
if len(nodeKey) == 0 {
c.JSON(http.StatusBadGateway, rspErr("addr can't match node."))
return
}
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
_, err := etcdCli.Get(ctx, nodeKey)
cancel()
if err != nil {
c.JSON(http.StatusBadGateway, rspErr(err.Error()))
return
}
c.JSON(http.StatusOK, rspSuccess("success"))
}
// 注册节点
func registerNode(c *gin.Context) {
tcpAddr := c.PostForm("tcp_addr")
if len(tcpAddr) == 0 {
c.JSON(http.StatusBadRequest, rspErr("tcp_add is empty"))
return
}
httpAddr := c.PostForm("http_addr")
if len(httpAddr) == 0 {
c.JSON(http.StatusBadRequest, rspErr("http_add is empty"))
return
}
weight := c.PostForm("weight")
if len(weight) == 0 {
c.JSON(http.StatusBadRequest, rspErr("weight must be greater than 0."))
return
}
id := c.PostForm("node_id")
nodeId, _ := strconv.Atoi(id)
if nodeId > 1024 || nodeId < 1 {
c.JSON(http.StatusBadRequest, rspErr("node_id must be between 1 and 1024."))
return
}
gmqApi("get", registerAddr+"/register?node_id="+id+"&tcp_addr="+tcpAddr+"&http_addr="+httpAddr+"&weight="+weight, nil, c)
}
// 修改节点权重
func editNodeWeight(c *gin.Context) {
tcpAddr := c.PostForm("addr")
if len(tcpAddr) == 0 {
c.JSON(http.StatusBadRequest, rspErr("tcp_add is empty"))
return
}
weight := c.PostForm("weight")
if len(weight) == 0 {
c.JSON(http.StatusBadRequest, rspErr("weight must be greater than 0."))
return
}
gmqApi("get", registerAddr+"/editWeight?tcp_addr="+tcpAddr+"&weight="+weight, nil, c)
}
// 消息测试
func msgDemo(c *gin.Context) {
nodes, err := _getNodes()
if err != nil {
c.HTML(http.StatusBadGateway, "error.html", gin.H{
"error": err,
})
return
}
c.HTML(http.StatusOK, "msg_demo.html", gin.H{
"title": "消息测试",
"nodes": nodes,
})
}
// 推送消息
func push(c *gin.Context) {
addr := c.PostForm("addr")
topic := c.PostForm("topic")
content := c.PostForm("content")
routeKey := c.PostForm("routeKey")
delay := c.DefaultPostForm("delay", "0")
if len(addr) == 0 {
c.JSON(http.StatusBadRequest, "please select a node.")
return
}
if len(topic) == 0 {
c.JSON(http.StatusBadRequest, "topic is empty")
return
}
if len(content) == 0 {
c.JSON(http.StatusBadRequest, "content is empty")
return
}
if len(routeKey) == 0 {
c.JSON(http.StatusBadRequest, "routeKey is empty")
return
}
m := msg{}
m.Topic = topic
m.Body = content
m.Delay, _ = strconv.Atoi(delay)
m.routeKey = routeKey
data, err := json.Marshal(m)
if err != nil {
c.JSON(http.StatusBadGateway, "encode message failed.")
return
}
v := url.Values{}
v.Set("data", string(data))
gmqApi("POST", "http://"+addr+"/push", v, c)
}
// 消费消息
// curl "http://127.0.0.1:9504/pop?topic=xxx&bindKey=xxx"
func pop(c *gin.Context) {
topic := c.Query("topic")
addr := c.Query("addr")
bindKey := c.Query("bindKey")
if len(topic) == 0 {
c.JSON(http.StatusBadRequest, "topic is empty")
return
}
if len(addr) == 0 {
c.JSON(http.StatusBadRequest, "please select a node.")
return
}
if len(bindKey) == 0 {
c.JSON(http.StatusBadRequest, "bindKey is empty.")
return
}
v := url.Values{}
v.Set("topic", topic)
v.Set("bindKey", bindKey)
gmqApi("get", "http://"+addr+"/pop", v, c)
}
// 声明队列
// curl "http://127.0.0.1:9504/declareQueue?topic=xxx&bindKey=kkk"
func declareQueue(c *gin.Context) {
addr := c.Query("addr")
bindKey := c.Query("bindKey")
topic := c.Query("topic")
if len(addr) == 0 {
c.JSON(http.StatusBadRequest, "addr is empty")
return
}
if len(bindKey) == 0 {
c.JSON(http.StatusBadRequest, "bindKey is empty")
return
}
if len(topic) == 0 {
c.JSON(http.StatusBadRequest, "topic is empty")
return
}
v := url.Values{}
v.Set("topic", topic)
v.Set("bindKey", bindKey)
gmqApi("get", "http://"+addr+"/declareQueue", v, c)
}
// 确认消息
func ack(c *gin.Context) {
addr := c.Query("addr")
msgId := c.Query("msgId")
topic := c.Query("topic")
bindKey := c.Query("bindKey")
if len(addr) == 0 {
c.JSON(http.StatusBadRequest, "addr is empty")
return
}
if len(msgId) == 0 {
c.JSON(http.StatusBadRequest, "msgId is empty")
return
}
if len(topic) == 0 {
c.JSON(http.StatusBadRequest, "topic is empty")
return
}
if len(bindKey) == 0 {
c.JSON(http.StatusBadRequest, "bindKey is empty")
return
}
v := url.Values{}
v.Set("topic", topic)
v.Set("msgId", msgId)
v.Set("bindKey", bindKey)
gmqApi("get", "http://"+addr+"/ack", v, c)
}
// 批量推送消息
func mpush(c *gin.Context) {
c.JSON(http.StatusOK, "unsport")
}
func gmqApi(method string, addr string, data url.Values, c *gin.Context) {
client := &http.Client{}
method = strings.ToUpper(method)
var (
req *http.Request
err error
)
if method == "POST" {
req, err = http.NewRequest(method, addr, strings.NewReader(data.Encode()))
if err != nil {
c.JSON(http.StatusBadGateway, rspErr(err))
return
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
} else if method == "GET" {
req, err = http.NewRequest("GET", addr+"?"+data.Encode(), nil)
if err != nil {
c.JSON(http.StatusBadGateway, rspErr(err))
return
}
} else {
c.JSON(http.StatusBadRequest, rspErr("unkown request method."))
return
}
resp, err := client.Do(req)
if err != nil {
c.JSON(http.StatusBadGateway, rspErr(err))
return
}
if resp.StatusCode != 200 {
c.JSON(resp.StatusCode, rspErr("request failed."))
return
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
c.JSON(http.StatusBadGateway, rspErr(err))
return
}
var rspData respStruct
if err := json.Unmarshal(body, &rspData); err != nil {
c.JSON(http.StatusBadGateway, rspErr(err))
return
}
c.JSON(http.StatusOK, rspData)
}
// 获取节点
func _getNodes() (map[string]node, error) {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
resp, err := etcdCli.Get(ctx, "/gmq/node", clientv3.WithPrefix())
cancel()
if err != nil {
return nil, err
}
var n node
nodes := make(map[string]node)
for _, ev := range resp.Kvs {
fmt.Printf("%s => %s\n", ev.Key, ev.Value)
if err := json.Unmarshal(ev.Value, &n); err != nil {
return nil, err
}
nodes[string(ev.Key)] = n
}
return nodes, nil
}
func rspErr(msg interface{}) gin.H {
var resp = make(gin.H)
resp["code"] = 1
resp["msg"] = msg
resp["data"] = nil
return resp
}
func rspData(data interface{}) gin.H {
var resp = make(gin.H)
resp["code"] = 0
resp["msg"] = ""
resp["data"] = data
return resp
}
func rspSuccess(msg interface{}) gin.H {
var resp = make(gin.H)
resp["code"] = 0
resp["msg"] = msg
resp["data"] = nil
return resp
}
|
package main
import (
"encoding/json"
"errors"
"strings"
"time"
"bytes"
"fmt"
"os/exec"
"github.com/k0kubun/pp"
log "github.com/sirupsen/logrus"
)
func FindCommandPath(name string, env []string) (bool, string) {
cmd := exec.Command("/bin/sh", "-c", fmt.Sprintf("command -v %s", name))
cmd.Env = env
stdout, err := cmd.Output()
if err != nil && !strings.Contains(err.Error(), `: no child processes`) {
msg := fmt.Sprintf("\n** Failed to find Command %s: %s\n\ncmd=%s | env=%v | stdout=%s | \n", name, err.Error(), cmd, cmd.Env, stdout)
log.Error(msg)
return false, ``
} else {
err = cmd.Start()
if err != nil {
err = errors.New("COMMAND_ERROR")
}
defer cmd.Wait()
cmd_path := strings.Replace(string(stdout), "\n", "", -1)
return true, cmd_path
}
}
func execute_speedtest_cli(exec_path string) (string, error) {
cmd := exec.Command(exec_path, "--json")
var stdout bytes.Buffer
cmd.Stdout = &stdout
err := cmd.Run()
if err != nil {
log.Fatalf("cmd.Run() failed with %s\n", err)
}
return string(stdout.Bytes()), nil
}
func main() {
res, err := get_speedtest_result()
if err != nil {
log.Fatal(err)
}
pp.Println(res)
}
func get_speedtest_result() (*SpeedTestResult, error) {
var res SpeedTestResult
ok, exec_path := FindCommandPath(`speedtest-cli`, []string{})
if !ok {
log.Fatal(`speedtest-cli not found`)
}
started := time.Now()
speedtest_json, err := execute_speedtest_cli(exec_path)
if err != nil {
log.Fatal(err)
}
uerr := json.Unmarshal([]byte(speedtest_json), &res)
if uerr != nil {
log.Fatal(uerr)
}
res.Duration = time.Since(started)
return &res, nil
}
|
package counter
import (
"sync"
"time"
)
type bucket struct {
val int64
next *bucket
}
func (b *bucket) Add(val int64) {
b.val += val
}
func (b *bucket) Value() int64 {
return b.val
}
func (b *bucket) Reset() {
b.val = 0
}
var _ Counter = new(rollingCounter)
type rollingCounter struct {
mu sync.RWMutex
buckets []bucket
bucketTime int64
lastAccess int64
cur *bucket
}
// NewRolling creates a new window. windowTime is the time covering the entire
// window. windowBuckets is the number of buckets the window is divided into.
// An example: a 10 second window with 10 buckets will have 10 buckets covering
// 1 second each.
func NewRolling(window time.Duration, winBucket int) Counter {
buckets := make([]bucket, winBucket)
bucket := &buckets[0]
for i := 1; i < winBucket; i++ {
bucket.next = &buckets[i]
bucket = bucket.next
}
bucket.next = &buckets[0]
bucketTime := time.Duration(window.Nanoseconds() / int64(winBucket))
return &rollingCounter{
cur: &buckets[0],
buckets: buckets,
bucketTime: int64(bucketTime),
lastAccess: time.Now().UnixNano(),
}
}
// Add increments the counter by value and return new value.
func (r *rollingCounter) Add(val int64) {
r.mu.Lock()
r.lastBucket().Add(val)
r.mu.Unlock()
}
// Value get the counter value.
func (r *rollingCounter) Value() (sum int64) {
now := time.Now().UnixNano()
r.mu.RLock()
b := r.cur
i := r.elapsed(now)
for j := 0; j < len(r.buckets); j++ {
// skip all future reset bucket.
if i > 0 {
i--
} else {
sum += b.Value()
}
b = b.next
}
r.mu.RUnlock()
return
}
// Reset reset the counter.
func (r *rollingCounter) Reset() {
r.mu.Lock()
for i := range r.buckets {
r.buckets[i].Reset()
}
r.mu.Unlock()
}
func (r *rollingCounter) elapsed(now int64) (i int) {
var e int64
if e = now - r.lastAccess; e <= r.bucketTime {
return
}
if i = int(e / r.bucketTime); i > len(r.buckets) {
i = len(r.buckets)
}
return
}
func (r *rollingCounter) lastBucket() (b *bucket) {
now := time.Now().UnixNano()
b = r.cur
// reset the buckets between now and number of buckets ago. If
// that is more that the existing buckets, reset all.
if i := r.elapsed(now); i > 0 {
r.lastAccess = now
for ; i > 0; i-- {
// replace the next used bucket.
b = b.next
b.Reset()
}
}
r.cur = b
return
}
|
package database
import (
"database/sql"
"gotodo/database/todos"
)
// Database defines our database struct.
type Database struct {
Todos *todos.Database
}
// New returns as new gotodo database.
// able to pass down the database in todos folder
func New(db *sql.DB) *Database {
return &Database{
Todos: todos.New(db),
}
}
|
package controllers
import (
"github.com/astaxie/beego"
"github.com/astaxie/beego/context"
)
var success = 0
type LoginController struct {
beego.Controller
}
func (c *LoginController) Get() {
c.TplName = "login.html"
c.Data["IsSuccess"] = success == 1
c.Data["IsFailed"] = success == 2
}
func (c *LoginController) Post() {
uname := c.Input().Get("uname")
pwd := c.Input().Get("pwd")
autoLogin := c.Input().Get("autoLogin") == "on"
if beego.AppConfig.String("uname") == uname &&
beego.AppConfig.String("pwd") == pwd {
maxAge := 0
if autoLogin {
maxAge = 1<<32 - 1
}
c.Ctx.SetCookie("uname", uname, maxAge, "/")
c.Ctx.SetCookie("pwd", pwd, maxAge, "/")
c.Data["IsSuccess"] = true
c.Data["IsFailed"] = false
success = 0
c.Redirect("/mis", 301)
beego.Info("Login Successful! suc: ", success)
return
} else {
c.Data["IsSuccess"] = false
c.Data["IsFailed"] = true
success = 2
beego.Info("Login Failed! suc: ", success)
}
c.Redirect("/mis/login", 301)
return
}
func checkAccount(ctx *context.Context) bool {
ck, err := ctx.Request.Cookie("uname")
if err != nil {
return false
}
uname := ck.Value
ck, err = ctx.Request.Cookie("pwd")
if err != nil {
return false
}
pwd := ck.Value
beego.Info("Get from Cookie: uname - pwd: ", uname, pwd)
return beego.AppConfig.String("uname") == uname &&
beego.AppConfig.String("pwd") == pwd
}
|
package jarviscore
import (
"context"
// "encoding/json"
"net/http"
"time"
// "github.com/zhs007/dtdataserv/proto"
// "github.com/zhs007/jarviscore/base"
// "go.uber.org/zap"
)
// func replyDTReport(w http.ResponseWriter, report *dtdatapb.DTReport) {
// jsonBytes, err := json.Marshal(report)
// if err != nil {
// jarvisbase.Warn("replyDTReport:Marshal", zap.Error(err))
// return
// }
// w.Write(jsonBytes)
// }
// HTTPServer -
type HTTPServer struct {
addr string
serv *http.Server
node JarvisNode
}
func (s *HTTPServer) onTasks(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Write([]byte("OK"))
// token := r.URL.Query().Get("token")
// cache, err := s.db.getCache(r.Context(), token)
// if err != nil {
// replyDTReport(w, newErrorDTReport(err))
// return
// }
// replyDTReport(w, cache)
}
// HTTPServer -
func newHTTPServer(addr string, node JarvisNode) (*HTTPServer, error) {
s := &HTTPServer{
addr: addr,
serv: nil,
node: node,
}
return s, nil
}
func (s *HTTPServer) start(ctx context.Context) error {
mux := http.NewServeMux()
mux.HandleFunc("/task/tasks", func(w http.ResponseWriter, r *http.Request) {
s.onTasks(w, r)
})
// fsh := http.FileServer(http.Dir("./www/static"))
// mux.Handle("/", http.StripPrefix("/", fsh))
server := &http.Server{
Addr: s.addr,
ReadTimeout: 60 * time.Second,
WriteTimeout: 60 * time.Second,
Handler: mux,
}
s.serv = server
err := server.ListenAndServe()
if err != nil {
return err
}
return nil
}
func (s *HTTPServer) stop() {
if s.serv != nil {
s.serv.Close()
}
return
}
|
package main
import (
"fmt"
"net/http"
"net"
)
func getIP(w http.ResponseWriter, req *http.Request) string {
returnValue := "window.ipaddr="
ip, port, err := net.SplitHostPort(req.RemoteAddr)
_ = port
if err == nil {
userIP := net.ParseIP(ip)
if userIP == nil {
return returnValue + "'';"
}
forward := req.Header.Get("X-Forwarded-For")
if len(forward) > 0 {
return returnValue + "'" + forward + "';"
}
if len(ip) > 0 {
return returnValue + "'" + ip + "';"
}
}
return returnValue + "'';"
}
func handler(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, getIP(w, r))
}
func main() {
http.HandleFunc("/", handler)
http.ListenAndServeTLS(":443", "SSL.cer", "measure.agilemeasure.com.key", nil)
}
|
// Copyright © 2020. All rights reserved.
// Author: Ilya Stroy.
// Contacts: qioalice@gmail.com, https://github.com/qioalice
// License: https://opensource.org/licenses/MIT
package privet
type (
_SpecialTranslationClass string
)
//goland:noinspection GoSnakeCaseUsage
const (
__SPTR_PREFIX = _SpecialTranslationClass("i18nErr: ")
__SPTR_SUFFIX = _SpecialTranslationClass(". Key: ")
_SPTR_TRANSLATION_NOT_FOUND = __SPTR_PREFIX +
_SpecialTranslationClass("TranslationNotFound") + __SPTR_SUFFIX
_SPTR_LOCALE_IS_NIL = __SPTR_PREFIX +
_SpecialTranslationClass("LocaleIsNil") + __SPTR_SUFFIX
_SPTR_TRANSLATION_KEY_IS_EMPTY = __SPTR_PREFIX +
_SpecialTranslationClass("TranslationKeyIsEmpty") + __SPTR_SUFFIX
_SPTR_TRANSLATION_KEY_IS_INCORRECT = __SPTR_PREFIX +
_SpecialTranslationClass("TranslationKeyIsIncorrect") + __SPTR_SUFFIX
)
/*
Trivia:
Locale.Tr() or Client.Tr() may have an error.
Not existed or empty translation key, not initialized Client,
an errors of interpolation of language phrase with arguments, and others.
We need to way to say caller that there was an error.
I do not want to use *ekaerr.Error
as a 2nd return argument of Locale.Tr() or Client.Tr() methods.
Caller's checks will be too hard to read.
There is another way.
A special strings. It's OK. Users will say:
"Ha, bad translations. Found an easter egg. Or visual translation bug."
And it's ok. It will not lead to some bad consequences. I mean, very bad.
So, sptr() is just a generator of that "easter egg" - a special string
that you (as a caller) may get instead of language phrase. If something went wrong.
And "_SPTR_" starts constants are classes for that generator.
*/
func sptr(class _SpecialTranslationClass, originalKey string) string {
return string(class) + originalKey
}
|
package fasdas
import (
"bufio"
"fmt"
"log"
"os"
"strings"
)
func Zadanie4() {
a := zapola()
fmt.Println(a)
}
func zapola() string {
text := getInputText()
s := strings.Split(text, "")
naoborot(s)
return strings.Join(s, "")
}
func getInputText() string {
scanner := bufio.NewScanner(bufio.NewReader(os.Stdin))
scanner.Scan()
text := scanner.Text()
if scanner.Err() != nil {
log.Fatal(scanner.Err())
}
return text
}
func naoborot(x []string) {
temp := len(x) - 1
for i := 0; i < len(x)/2; i++ {
x[i], x[temp-i] = x[temp-i], x[i]
}
}
type User struct {
name string
age int
}
func GoUser() {
r := new(User)
setUser(r)
fmt.Println(r)
}
func setUser(u *User) {
fmt.Print("Давай сюда имя: ")
fmt.Scanln(&u.name)
fmt.Print("Давай сюда старость: ")
fmt.Scanln(&u.age)
}
func wordReversSimple(s string) string {
oldSts := strings.Split(s, "")
newSts := []string{}
for i := len(oldSts) - 1; i >= 0; i-- {
newSts = append(newSts, oldSts[i])
}
return strings.Join(newSts, "")
}
func y(s string) string {
size := len(s)
return strings.Map(func(r rune) rune {
size--
return rune(s[size])
}, s)
}
|
package main
import "fmt"
func main() {
c := make(chan int)
go func() {
for i := 0; i < 10; i++ {
c <- i
}
// close(c)
}()
fmt.Println(<-c)
// // My solution :
// for i := 0; i < 10; i++ {
// fmt.Println(<-c)
// }
}
// Why does this only print zero?
// And what can you do to get it to print all 0 - 9 numbers ?
// My:
// → It's blocked at line 14 until we get a value on the channel.
// Once we get the first value, we print it and we end the program.
// Nothing is there to block and print the rest of the values.
|
package unionfind
import (
"fmt"
"testing"
)
func TestNewUnionFind3(t *testing.T) {
fmt.Println("============UF3==========")
uf := NewUnionFind3(5)
fmt.Println("uf:", uf)
fmt.Println("1的parent:", uf.Find(1))
fmt.Println("0和3的连接关系:", uf.IsConnected(0, 3))
fmt.Println("0和3连接中...")
uf.Union(0, 3)
fmt.Println("0和3的连接关系:", uf.IsConnected(0, 3))
fmt.Println("uf:", uf)
}
|
package daemons
import (
"encoding/binary"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"docktor/server/storage"
"docktor/server/types"
"github.com/labstack/echo/v4"
log "github.com/sirupsen/logrus"
"golang.org/x/net/websocket"
)
// getContainers get containers from daemon
func getContainers(c echo.Context) error {
db := c.Get("DB").(*storage.Docktor)
daemon, err := db.Daemons().FindByID(c.Param(types.DAEMON_ID_PARAM))
if err != nil {
log.WithFields(log.Fields{
"daemonID": c.Param(types.DAEMON_ID_PARAM),
"error": err,
}).Error("Error when retrieving daemon")
return c.JSON(http.StatusBadRequest, err.Error())
}
log.WithFields(log.Fields{
"daemon": daemon.Name,
}).Info("Daemon retrieved")
cs, err := daemon.GetContainers()
if err != nil {
log.WithFields(log.Fields{
"daemon": daemon.Name,
"error": err,
}).Error("Error when retrieving daemon containers")
return c.JSON(http.StatusBadRequest, err.Error())
}
log.Info("Daemon containers retrieved")
return c.JSON(http.StatusOK, cs)
}
// getSavedContainers get saved containers from group of a daemon
func getSavedContainers(c echo.Context) error {
db := c.Get("DB").(*storage.Docktor)
cs, err := db.Groups().FindContainersByDaemonID(c.Param(types.DAEMON_ID_PARAM))
if err != nil {
log.WithFields(log.Fields{
"daemonID": c.Param(types.DAEMON_ID_PARAM),
"error": err,
}).Error("Error when retrieving groups containers of daemon")
return c.JSON(http.StatusBadRequest, err.Error())
}
return c.JSON(http.StatusOK, cs)
}
// updateContainersStatus change the status of containers param split by ','
func updateContainersStatus(c echo.Context) error {
db := c.Get("DB").(*storage.Docktor)
daemon, err := db.Daemons().FindByID(c.Param(types.DAEMON_ID_PARAM))
if err != nil {
log.WithFields(log.Fields{
"daemonID": c.Param(types.DAEMON_ID_PARAM),
"error": err,
}).Error("Error when retrieving daemon")
return c.JSON(http.StatusBadRequest, err.Error())
}
splitFn := func(c rune) bool {
return c == ','
}
containers := strings.FieldsFunc(c.QueryParam("containers"), splitFn)
errs := make(map[string]string)
switch c.QueryParam("status") {
case "start":
errs = daemon.StartContainers(containers...)
case "stop":
errs = daemon.StopContainers(containers...)
case "remove":
errs = daemon.RemoveContainers(containers...)
case "restart":
errs = daemon.RestartContainers(containers...)
case "create":
// Find groups of daemon
groups, err := db.Groups().FindByDaemonIDBson(daemon.ID)
if err != nil {
log.WithFields(log.Fields{
"daemonID": daemon.ID,
"error": err,
}).Error("Error when retrieving groups")
return c.JSON(http.StatusBadRequest, err.Error())
}
for _, group := range groups {
for _, container := range group.FindContainersByNameOrID(containers) {
if container.ContainerJSONBase != nil {
err = daemon.CreateContainer(container, false)
if err != nil {
errs[container.Name] = err.Error()
log.WithFields(log.Fields{
"daemon": daemon.Name,
"status": c.QueryParam("status"),
"err": err,
"container": container,
}).Error("Error when create this container")
}
}
}
}
case "destroy":
// Find groups of daemon
groups, err := db.Groups().FindByDaemonIDBson(daemon.ID)
if err != nil {
log.WithFields(log.Fields{
"daemonID": daemon.ID,
"error": err,
}).Error("Error when retrieving groups")
return c.JSON(http.StatusBadRequest, err.Error())
}
for keyGroup := 0; keyGroup < len(groups); keyGroup++ {
for keyContainer, c := range groups[keyGroup].FindContainersByNameOrID(containers) {
if c.ContainerJSONBase != nil {
groups[keyGroup].Containers = append(groups[keyGroup].Containers[:keyContainer], groups[keyGroup].Containers[keyContainer+1:]...)
}
}
}
for _, group := range groups {
_, err = db.Groups().Save(group)
if err != nil {
errs[group.Name] = err.Error()
log.WithFields(log.Fields{
"status": c.QueryParam("status"),
"err": err,
"group": group.Name,
}).Error("Error when destroy containers")
}
}
default:
log.WithFields(log.Fields{
"daemon": daemon.Name,
"status": c.QueryParam("status"),
"error": "Wrong status",
}).Error("Wrong status")
return c.JSON(http.StatusBadRequest, "Wrong status")
}
if len(errs) > 0 {
log.WithFields(log.Fields{
"daemon": daemon.Name,
"status": c.QueryParam("status"),
"containers": containers,
"errors": errs,
}).Error("Error when changing containers status")
return c.JSON(http.StatusBadRequest, errs)
}
return c.JSON(http.StatusOK, c.QueryParam("status"))
}
// execContainer exec commands in container from daemon
func execContainer(c echo.Context) error {
db := c.Get("DB").(*storage.Docktor)
// Find the daemon
daemon, err := db.Daemons().FindByID(c.Param(types.DAEMON_ID_PARAM))
if err != nil {
log.WithFields(log.Fields{
"daemonID": c.Param(types.DAEMON_ID_PARAM),
"error": err,
}).Error("Error when retrieving daemon")
return c.JSON(http.StatusBadRequest, err.Error())
}
log.WithFields(log.Fields{
"daemon": daemon.Name,
}).Info("Daemon retrieved")
// Find the command
command, err := db.Images().FindCommandByID(c.Param(types.COMMAND_ID_PARAM))
if err != nil {
log.WithFields(log.Fields{
"commandID": c.Param(types.COMMAND_ID_PARAM),
"error": err,
}).Error("Error when retrieving command")
return c.JSON(http.StatusBadRequest, err.Error())
}
log.WithFields(log.Fields{
"command": command.Title,
}).Info("Command retrieved")
// Here you can put default variables such as daemon if needed
variables := map[string]interface{}{}
// Get the body variables ton replace in the go template
var cmdVars []types.CommandVariable
err = c.Bind(&cmdVars)
if err != nil {
log.WithFields(log.Fields{
"variables": c.Request().Body,
"error": err,
}).Error("Error when parsing variables")
return c.JSON(http.StatusBadRequest, err)
}
// Copy of variables
for _, v := range cmdVars {
if v.Optional {
variables[fmt.Sprintf("optional_%s", v.Name)] = v.Value
} else {
variables[v.Name] = v.Value
}
}
log.WithFields(log.Fields{
"variables": variables,
}).Info("Variables parsed")
// Apply the variables in the go template
cmd, err := command.SetVariables(variables)
if err != nil {
log.WithFields(log.Fields{
"variables": variables,
"command": command.Command,
"error": err,
}).Error("Error when replacing variables")
return c.JSON(http.StatusBadRequest, err)
}
// Get the container name
container, err := url.QueryUnescape(c.Param(types.CONTAINER_ID_PARAM))
if err != nil {
log.WithFields(log.Fields{
"container": c.Param(types.CONTAINER_ID_PARAM),
"error": err,
}).Error("Error when parsing container name")
return c.JSON(http.StatusBadRequest, err)
}
// Launch the cmd
logs, err := daemon.ExecContainer(container, []string{cmd})
if err != nil {
log.WithFields(log.Fields{
"container": container,
"commands": cmd,
"daemon": daemon.Name,
"error": err,
}).Error("Error when executing commands on containers")
return c.JSON(http.StatusBadRequest, err.Error())
}
return c.JSON(http.StatusOK, string(logs))
}
// getContainerLog is a ws which send container log
func getContainerLog(c echo.Context) error {
websocket.Handler(func(ws *websocket.Conn) {
defer ws.Close()
db := c.Get("DB").(*storage.Docktor)
daemon, err := db.Daemons().FindByID(c.Param(types.DAEMON_ID_PARAM))
if err != nil {
log.WithFields(log.Fields{
"daemonID": c.Param(types.DAEMON_ID_PARAM),
"error": err,
}).Error("Error when retrieving daemon")
return
}
reader, err := daemon.GetContainerLogFollow(c.Param(types.CONTAINER_ID_PARAM))
if err != nil {
log.WithFields(log.Fields{
"daemon": daemon.Name,
"containerID": c.Param(types.CONTAINER_ID_PARAM),
"error": err,
}).Error("Error when retrieving logs socket")
return
}
defer reader.Close()
// ignore the 8 first bytes
hdr := make([]byte, 8)
// https://stackoverflow.com/questions/46428721/how-to-stream-docker-container-logs-via-the-go-sdk
for {
_, err := reader.Read(hdr)
if err != nil {
log.WithFields(log.Fields{
"daemon": daemon.Name,
"error": err,
}).Error("Error when reading 8 first bytes")
}
count := binary.BigEndian.Uint32(hdr[4:])
dat := make([]byte, count)
_, err = reader.Read(dat)
if err != nil {
log.WithFields(log.Fields{
"daemon": daemon.Name,
"error": err,
}).Error("Error when reading")
break
}
if err := websocket.Message.Send(ws, string(dat)); err != nil {
log.Info("client close the connection")
break
}
}
}).ServeHTTP(c.Response(), c.Request())
return nil
}
// getContainerTerm is a ws which provide an ssh term inside the container
// Based on https://github.com/bitbull-team/docker-exec-web-console
func getContainerTerm(c echo.Context) error {
websocket.Handler(func(ws *websocket.Conn) {
defer ws.Close()
db := c.Get("DB").(*storage.Docktor)
daemon, err := db.Daemons().FindByID(c.Param(types.DAEMON_ID_PARAM))
if err != nil {
log.WithFields(log.Fields{
"daemonID": c.Param(types.DAEMON_ID_PARAM),
"error": err,
}).Error("Error when retrieving daemon")
return
}
log.WithFields(log.Fields{
"daemon": daemon.Name,
}).Info("Daemon retrieved")
hij, err := daemon.GetContainerTerm(c.Param(types.CONTAINER_ID_PARAM))
if err != nil {
log.WithFields(log.Fields{
"daemon": daemon.Name,
"containerID": c.Param(types.CONTAINER_ID_PARAM),
"error": err,
}).Error("Error when retrieving container term socket")
return
}
log.Info("hij response connected")
defer hij.Close()
// redirect output to ws
go func() (err error) {
log.Info("redirect output to ws")
if ws != nil {
_, err = io.Copy(ws, hij.Reader)
}
return err
}()
// redirect ws input to input
go func() error {
log.Info("redirect input to ws")
if ws != nil {
io.Copy(hij.Conn, ws)
}
if conn, ok := hij.Conn.(interface {
CloseWrite() error
}); ok {
log.Info("Close the connection")
if err := conn.CloseWrite(); err != nil {
log.WithFields(log.Fields{
"daemon": daemon.Name,
"error": err,
}).Error("Error when closing container term socket")
}
}
return nil
}()
log.Info("Ws ready !")
// log errors
var receiveStdout chan error
if err := <-receiveStdout; err != nil {
log.WithFields(log.Fields{
"daemon": daemon.Name,
"error": err,
}).Error("Error in container term")
}
}).ServeHTTP(c.Response(), c.Request())
return nil
}
// getImages get docker images from daemon
func getImages(c echo.Context) error {
db := c.Get("DB").(*storage.Docktor)
daemon, err := db.Daemons().FindByID(c.Param(types.DAEMON_ID_PARAM))
if err != nil {
log.WithFields(log.Fields{
"daemonID": c.Param(types.DAEMON_ID_PARAM),
"error": err,
}).Error("Error when retrieving daemon")
return c.JSON(http.StatusBadRequest, err.Error())
}
log.WithFields(log.Fields{
"daemon": daemon.Name,
}).Info("Daemon retrieved")
im, err := daemon.GetDockerImages()
if err != nil {
log.WithFields(log.Fields{
"daemon": daemon.Name,
"error": err,
}).Error("Error when retrieving docker images")
return c.JSON(http.StatusBadRequest, err.Error())
}
log.Info("Docker images retrieved")
return c.JSON(http.StatusOK, im)
}
// deleteImages delete docker images from daemon
func deleteImages(c echo.Context) error {
db := c.Get("DB").(*storage.Docktor)
daemon, err := db.Daemons().FindByID(c.Param(types.DAEMON_ID_PARAM))
if err != nil {
log.WithFields(log.Fields{
"daemonID": c.Param(types.DAEMON_ID_PARAM),
"error": err,
}).Error("Error when retrieving daemon")
return c.JSON(http.StatusBadRequest, err.Error())
}
log.WithFields(log.Fields{
"daemon": daemon.Name,
}).Info("Daemon retrieved")
cs, err := daemon.RemoveDockerImages(c.Param(types.DOCKER_IMAGE_PARAM))
if err != nil {
log.WithFields(log.Fields{
"image": c.Param(types.DOCKER_IMAGE_PARAM),
"daemon": daemon.Name,
"error": err,
}).Error("Error when deleting docker image")
return c.JSON(http.StatusBadRequest, err.Error())
}
log.Info("Docker image deleted")
return c.JSON(http.StatusOK, cs)
}
|
package xdominion
/*
The XGroup is an array of XGroupBy structures
*/
type XGroup []XGroupBy
func (g *XGroup) CreateGroup(table *XTable, DB string) string {
group := ""
for _, xg := range *g {
group += xg.GetGroup(table, DB)
}
return group
}
/*
The XGroupBy structure
*/
type XGroupBy struct {
Field string
}
func (g *XGroupBy) GetGroup(table *XTable, DB string) string {
return "Group By --"
}
|
package proxy
import (
"encoding/json"
"errors"
"fmt"
"html/template"
"io"
"net"
"net/http"
"net/http/httputil"
"net/url"
"reflect"
"regexp"
"strings"
"time"
"github.com/buzzfeed/sso/internal/pkg/aead"
log "github.com/buzzfeed/sso/internal/pkg/logging"
"github.com/buzzfeed/sso/internal/proxy/collector"
"github.com/buzzfeed/sso/internal/proxy/providers"
"github.com/18F/hmacauth"
"github.com/datadog/datadog-go/statsd"
)
// SignatureHeader is the header name where the signed request header is stored.
const SignatureHeader = "Gap-Signature"
// SignatureHeaders are the headers that are valid in the request.
var SignatureHeaders = []string{
"Content-Length",
"Content-Md5",
"Content-Type",
"Date",
"Authorization",
"X-Forwarded-User",
"X-Forwarded-Email",
"X-Forwarded-Groups",
"Cookie",
}
// Errors
var (
ErrLifetimeExpired = errors.New("user lifetime expired")
ErrUserNotAuthorized = errors.New("user not authorized")
ErrUnknownHost = errors.New("unknown host")
ErrRefreshCookie = errors.New("stale cookie, refresh")
)
const statusInvalidHost = 421
// EmailValidatorFn function type for validating email addresses.
type EmailValidatorFn func(string) bool
// OAuthProxy stores all the information associated with proxying the request.
type OAuthProxy struct {
CookieCipher aead.Cipher
OldCookieCipher aead.Cipher
CookieDomain string
CookieExpire time.Duration
CookieHTTPOnly bool
CookieName string
CookieSecure bool
CookieSeed string
CSRFCookieName string
EmailValidator EmailValidatorFn
redirectURL *url.URL // the url to receive requests at
provider providers.Provider
skipAuthPreflight bool
templates *template.Template
StatsdClient *statsd.Client
mux map[string]*route
regexRoutes []*route
}
type route struct {
upstreamConfig *UpstreamConfig
handler http.Handler
tags []string
// only used for ones that have regex
regex *regexp.Regexp
}
// StateParameter holds the redirect id along with the session id.
type StateParameter struct {
SessionID string `json:"session_id"`
RedirectURI string `json:"redirect_uri"`
}
// UpstreamProxy stores information necessary for proxying the request back to the upstream.
type UpstreamProxy struct {
cookieName string
handler http.Handler
auth hmacauth.HmacAuth
}
// deleteSSOCookieHeader deletes the session cookie from the request header string.
func deleteSSOCookieHeader(req *http.Request, cookieName string) {
headers := []string{}
for _, cookie := range req.Cookies() {
if cookie.Name != cookieName {
headers = append(headers, cookie.String())
}
}
req.Header.Set("Cookie", strings.Join(headers, ";"))
}
// ServeHTTP signs the http request and deletes cookie headers
// before calling the upstream's ServeHTTP function.
func (u *UpstreamProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
deleteSSOCookieHeader(r, u.cookieName)
if u.auth != nil {
u.auth.SignRequest(r)
}
u.handler.ServeHTTP(w, r)
}
// upstreamTransport is used to ensure that upstreams cannot override the
// security headers applied by sso_proxy
type upstreamTransport struct{}
// RoundTrip round trips the request and deletes security headers before returning the response.
func (t *upstreamTransport) RoundTrip(req *http.Request) (*http.Response, error) {
resp, err := http.DefaultTransport.RoundTrip(req)
if err != nil {
logger := log.NewLogEntry()
logger.Error(err, "error in upstreamTransport RoundTrip")
return nil, err
}
for key := range securityHeaders {
resp.Header.Del(key)
}
return resp, err
}
// NewReverseProxy creates a reverse proxy to a specified url.
// It adds an X-Forwarded-Host header that is the request's host.
func NewReverseProxy(to *url.URL) *httputil.ReverseProxy {
proxy := httputil.NewSingleHostReverseProxy(to)
proxy.Transport = &upstreamTransport{}
director := proxy.Director
proxy.Director = func(req *http.Request) {
req.Header.Add("X-Forwarded-Host", req.Host)
director(req)
req.Host = to.Host
}
return proxy
}
// NewRewriteReverseProxy creates a reverse proxy that is capable of creating upstream
// urls on the fly based on a from regex and a templated to field.
// It adds an X-Forwarded-Host header to the the upstream's request.
func NewRewriteReverseProxy(route *RewriteRoute) *httputil.ReverseProxy {
proxy := &httputil.ReverseProxy{}
proxy.Transport = &upstreamTransport{}
proxy.Director = func(req *http.Request) {
// we do this to rewrite requests
rewritten := route.FromRegex.ReplaceAllString(req.Host, route.ToTemplate.Opaque)
// we use to favor scheme's used in the regex, else we use the default passed in via the template
target, err := urlParse(route.ToTemplate.Scheme, rewritten)
if err != nil {
logger := log.NewLogEntry()
// we aren't in an error handling context so we have to fake it(thanks stdlib!)
logger.WithRequestHost(req.Host).WithRewriteRoute(route).Error(
err, "unable to parse and replace rewrite url")
req.URL = nil // this will raise an error in http.RoundTripper
return
}
director := httputil.NewSingleHostReverseProxy(target).Director
req.Header.Add("X-Forwarded-Host", req.Host)
director(req)
req.Host = target.Host
}
return proxy
}
// NewReverseProxyHandler creates a new http.Handler given a httputil.ReverseProxy
func NewReverseProxyHandler(reverseProxy *httputil.ReverseProxy, opts *Options, config *UpstreamConfig) (http.Handler, []string) {
upstreamProxy := &UpstreamProxy{
handler: reverseProxy,
auth: config.HMACAuth,
cookieName: opts.CookieName,
}
if config.FlushInterval != 0 {
return NewStreamingHandler(upstreamProxy, opts, config), []string{"handler:streaming"}
}
return NewTimeoutHandler(upstreamProxy, opts, config), []string{"handler:timeout"}
}
// NewTimeoutHandler creates a new handler with a configure timeout.
func NewTimeoutHandler(handler http.Handler, opts *Options, config *UpstreamConfig) http.Handler {
timeout := opts.DefaultUpstreamTimeout
if config.Timeout != 0 {
timeout = config.Timeout
}
timeoutMsg := fmt.Sprintf(
"%s failed to respond within the %s timeout period", config.Service, timeout)
return http.TimeoutHandler(handler, timeout, timeoutMsg)
}
// NewStreamingHandler creates a new handler capable of proxying a stream
func NewStreamingHandler(handler http.Handler, opts *Options, config *UpstreamConfig) http.Handler {
upstreamProxy := handler.(*UpstreamProxy)
reverseProxy := upstreamProxy.handler.(*httputil.ReverseProxy)
reverseProxy.FlushInterval = config.FlushInterval
return upstreamProxy
}
func generateHmacAuth(signatureKey string) (hmacauth.HmacAuth, error) {
components := strings.Split(signatureKey, ":")
if len(components) != 2 {
return nil, fmt.Errorf("invalid signature hash:key spec")
}
algorithm, secret := components[0], components[1]
hash, err := hmacauth.DigestNameToCryptoHash(algorithm)
if err != nil {
return nil, fmt.Errorf("unsupported signature hash algorithm: %s", algorithm)
}
auth := hmacauth.NewHmacAuth(hash, []byte(secret), SignatureHeader, SignatureHeaders)
return auth, nil
}
// NewOAuthProxy creates a new OAuthProxy struct.
func NewOAuthProxy(opts *Options, optFuncs ...func(*OAuthProxy) error) (*OAuthProxy, error) {
logger := log.NewLogEntry()
logger.WithProvider(opts.provider.Data().ProviderName).WithClientID(opts.ClientID).Info(
"OAuthProxy configured")
domain := opts.CookieDomain
if domain == "" {
domain = "<default>"
}
logger.WithCookieName(opts.CookieName).WithCookieSecure(
opts.CookieSecure).WithCookieHTTPOnly(opts.CookieHTTPOnly).WithCookieExpire(
opts.CookieExpire).WithCookieDomain(domain).Info()
cipher, err := aead.NewMiscreantCipher(secretBytes(opts.CookieSecret))
if err != nil {
return nil, fmt.Errorf("cookie-secret error: %s", err.Error())
}
// We have an old cookie secret because we do not want stale cookies to error because they are using the old cipher
// TODO: Remove this logic after the CookieExpire duraition passes, since cookies will be refreshed by then.
if opts.OldCookieSecret == "" {
opts.OldCookieSecret = opts.CookieSecret
}
oldCipher, err := aead.NewOldCipher(secretBytes(opts.OldCookieSecret))
if err != nil {
return nil, fmt.Errorf("cookie-secret error: %s", err.Error())
}
// we setup a runtime collector to emit stats to datadog
go func() {
c := collector.New(opts.StatsdClient, 30*time.Second)
c.Run()
}()
p := &OAuthProxy{
CookieCipher: cipher,
OldCookieCipher: oldCipher,
CookieDomain: opts.CookieDomain,
CookieExpire: opts.CookieExpire,
CookieHTTPOnly: opts.CookieHTTPOnly,
CookieName: opts.CookieName,
CookieSecure: opts.CookieSecure,
CookieSeed: opts.CookieSecret,
CSRFCookieName: fmt.Sprintf("%v_%v", opts.CookieName, "csrf"),
StatsdClient: opts.StatsdClient,
// these fields make up the routing mechanism
mux: make(map[string]*route),
regexRoutes: make([]*route, 0),
provider: opts.provider,
redirectURL: &url.URL{Path: "/oauth2/callback"},
skipAuthPreflight: opts.SkipAuthPreflight,
templates: getTemplates(),
}
for _, optFunc := range optFuncs {
err := optFunc(p)
if err != nil {
return nil, err
}
}
for _, upstreamConfig := range opts.upstreamConfigs {
switch route := upstreamConfig.Route.(type) {
case *SimpleRoute:
reverseProxy := NewReverseProxy(route.ToURL)
handler, tags := NewReverseProxyHandler(reverseProxy, opts, upstreamConfig)
p.Handle(route.FromURL.Host, handler, tags, upstreamConfig)
case *RewriteRoute:
reverseProxy := NewRewriteReverseProxy(route)
handler, tags := NewReverseProxyHandler(reverseProxy, opts, upstreamConfig)
p.HandleRegex(route.FromRegex, handler, tags, upstreamConfig)
default:
return nil, fmt.Errorf("unkown route type")
}
}
return p, nil
}
// Handler returns a http handler for an OAuthProxy
func (p *OAuthProxy) Handler() http.Handler {
mux := http.NewServeMux()
mux.HandleFunc("/favicon.ico", p.Favicon)
mux.HandleFunc("/robots.txt", p.RobotsTxt)
mux.HandleFunc("/oauth2/sign_out", p.SignOut)
mux.HandleFunc("/oauth2/callback", p.OAuthCallback)
mux.HandleFunc("/oauth2/auth", p.AuthenticateOnly)
mux.HandleFunc("/", p.Proxy)
// Global middleware, which will be applied to each request in reverse
// order as applied here (i.e., we want to validate the host _first_ when
// processing a request)
var handler http.Handler = mux
if p.CookieSecure {
handler = requireHTTPS(handler)
}
handler = p.setResponseHeaderOverrides(handler)
handler = setSecurityHeaders(handler)
handler = p.validateHost(handler)
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
// Skip host validation for /ping requests because they hit the LB directly.
if req.URL.Path == "/ping" {
p.PingPage(rw, req)
return
}
handler.ServeHTTP(rw, req)
})
}
// UnknownHost returns an http error for unknown or invalid hosts
func (p *OAuthProxy) UnknownHost(rw http.ResponseWriter, req *http.Request) {
logger := log.NewLogEntry()
tags := []string{
fmt.Sprintf("action:%s", GetActionTag(req)),
"error:unknown_host",
}
p.StatsdClient.Incr("application_error", tags, 1.0)
logger.WithRequestHost(req.Host).Error("unknown host")
http.Error(rw, "", statusInvalidHost)
}
// Handle constructs a route from the given host string and matches it to the provided http.Handler and UpstreamConfig
func (p *OAuthProxy) Handle(host string, handler http.Handler, tags []string, upstreamConfig *UpstreamConfig) {
tags = append(tags, "route:simple")
p.mux[host] = &route{handler: handler, upstreamConfig: upstreamConfig, tags: tags}
}
// HandleRegex constructs a route from the given regexp and matches it to the provided http.Handler and UpstreamConfig
func (p *OAuthProxy) HandleRegex(regex *regexp.Regexp, handler http.Handler, tags []string, upstreamConfig *UpstreamConfig) {
tags = append(tags, "route:rewrite")
p.regexRoutes = append(p.regexRoutes, &route{regex: regex, handler: handler, upstreamConfig: upstreamConfig, tags: tags})
}
// router attempts to find a route for a equest. If a route is successfully matched,
// it returns the route information and a bool value of `true`. If a route can not be matched,
//a nil value for the route and false bool value is returned.
func (p *OAuthProxy) router(req *http.Request) (*route, bool) {
route, ok := p.mux[req.Host]
if ok {
return route, true
}
for _, route := range p.regexRoutes {
if route.regex.MatchString(req.Host) {
return route, true
}
}
return nil, false
}
// GetRedirectURL returns the redirect url for a given OAuthProxy,
// setting the scheme to be https if CookieSecure is true.
func (p *OAuthProxy) GetRedirectURL(host string) *url.URL {
// TODO: Ensure that we only allow valid upstream hosts in redirect URIs
var u url.URL
u = *p.redirectURL
// Build redirect URI from request host
if u.Scheme == "" {
if p.CookieSecure {
u.Scheme = "https"
} else {
u.Scheme = "http"
}
}
u.Host = host
return &u
}
func (p *OAuthProxy) redeemCode(host, code string) (s *providers.SessionState, err error) {
if code == "" {
return nil, errors.New("missing code")
}
redirectURL := p.GetRedirectURL(host)
s, err = p.provider.Redeem(redirectURL.String(), code)
if err != nil {
return
}
if s.Email == "" {
s.Email, err = p.provider.GetEmailAddress(s)
}
return
}
// MakeSessionCookie constructs a session cookie given the request, an expiration time and the current time.
func (p *OAuthProxy) MakeSessionCookie(req *http.Request, value string, expiration time.Duration, now time.Time) *http.Cookie {
return p.makeCookie(req, p.CookieName, value, expiration, now)
}
// MakeCSRFCookie creates a CSRF cookie given the request, an expiration time, and the current time.
func (p *OAuthProxy) MakeCSRFCookie(req *http.Request, value string, expiration time.Duration, now time.Time) *http.Cookie {
return p.makeCookie(req, p.CSRFCookieName, value, expiration, now)
}
func (p *OAuthProxy) makeCookie(req *http.Request, name string, value string, expiration time.Duration, now time.Time) *http.Cookie {
logger := log.NewLogEntry()
domain := req.Host
if h, _, err := net.SplitHostPort(domain); err == nil {
domain = h
}
if p.CookieDomain != "" {
if !strings.HasSuffix(domain, p.CookieDomain) {
logger.WithRequestHost(domain).WithCookieDomain(p.CookieDomain).Warn(
"using configured cookie domain")
}
domain = p.CookieDomain
}
return &http.Cookie{
Name: name,
Value: value,
Path: "/",
Domain: domain,
HttpOnly: p.CookieHTTPOnly,
Secure: p.CookieSecure,
Expires: now.Add(expiration),
}
}
// ClearCSRFCookie clears the CSRF cookie from the request
func (p *OAuthProxy) ClearCSRFCookie(rw http.ResponseWriter, req *http.Request) {
http.SetCookie(rw, p.MakeCSRFCookie(req, "", time.Hour*-1, time.Now()))
}
// SetCSRFCookie sets the CSRFCookie creates a CSRF cookie in a given request
func (p *OAuthProxy) SetCSRFCookie(rw http.ResponseWriter, req *http.Request, val string) {
http.SetCookie(rw, p.MakeCSRFCookie(req, val, p.CookieExpire, time.Now()))
}
// ClearSessionCookie clears the session cookie from a request
func (p *OAuthProxy) ClearSessionCookie(rw http.ResponseWriter, req *http.Request) {
http.SetCookie(rw, p.MakeSessionCookie(req, "", time.Hour*-1, time.Now()))
}
// SetSessionCookie creates a sesion cookie based on the value and the expiration time.
func (p *OAuthProxy) SetSessionCookie(rw http.ResponseWriter, req *http.Request, val string) {
http.SetCookie(rw, p.MakeSessionCookie(req, val, p.CookieExpire, time.Now()))
}
// LoadCookiedSession returns a SessionState from the cookie in the request.
func (p *OAuthProxy) LoadCookiedSession(req *http.Request) (*providers.SessionState, error) {
logger := log.NewLogEntry()
c, err := req.Cookie(p.CookieName)
if err != nil {
// always http.ErrNoCookie
return nil, err
}
session, err := providers.UnmarshalSession(c.Value, p.CookieCipher)
if err != nil {
logger.Info("using old cookie cipher...")
// invalid cookie session, try using the old cookie cipher
session, err = providers.UnmarshalSession(c.Value, p.OldCookieCipher)
if err != nil {
logger.Error(err, "error loading sesion with old cookie cipher")
return nil, err
}
return session, ErrRefreshCookie
}
return session, nil
}
// SaveSession saves a session state to a request cookie.
func (p *OAuthProxy) SaveSession(rw http.ResponseWriter, req *http.Request, s *providers.SessionState) error {
value, err := providers.MarshalSession(s, p.CookieCipher)
if err != nil {
return err
}
p.SetSessionCookie(rw, req, value)
return nil
}
// RobotsTxt sets the User-Agent header in the response to be "Disallow"
func (p *OAuthProxy) RobotsTxt(rw http.ResponseWriter, _ *http.Request) {
rw.WriteHeader(http.StatusOK)
fmt.Fprintf(rw, "User-agent: *\nDisallow: /")
}
// Favicon will proxy the request as usual if the user is already authenticated
// but responds with a 404 otherwise, to avoid spurious and confusing
// authentication attempts when a browser automatically requests the favicon on
// an error page.
func (p *OAuthProxy) Favicon(rw http.ResponseWriter, req *http.Request) {
err := p.Authenticate(rw, req)
if err != nil {
rw.WriteHeader(http.StatusNotFound)
return
}
p.Proxy(rw, req)
}
// PingPage send back a 200 OK response.
func (p *OAuthProxy) PingPage(rw http.ResponseWriter, _ *http.Request) {
rw.WriteHeader(http.StatusOK)
fmt.Fprintf(rw, "OK")
}
// ErrorPage renders an error page with a given status code, title, and message.
func (p *OAuthProxy) ErrorPage(rw http.ResponseWriter, req *http.Request, code int, title string, message string) {
if p.isXMLHTTPRequest(req) {
rw.Header().Set("Content-Type", "application/json")
rw.WriteHeader(code)
err := json.NewEncoder(rw).Encode(struct {
Error string `json:"error"`
}{
Error: message,
})
if err != nil {
io.WriteString(rw, err.Error())
}
} else {
logger := log.NewLogEntry()
logger.WithHTTPStatus(code).WithPageTitle(title).WithPageMessage(message).Info(
"error page")
rw.WriteHeader(code)
t := struct {
Code int
Title string
Message string
}{
Code: code,
Title: title,
Message: message,
}
p.templates.ExecuteTemplate(rw, "error.html", t)
}
}
// IsWhitelistedRequest cheks that proxy host exists and checks the SkipAuthRegex
func (p *OAuthProxy) IsWhitelistedRequest(req *http.Request) bool {
if p.skipAuthPreflight && req.Method == "OPTIONS" {
return true
}
route, ok := p.router(req)
if !ok {
// This proxy host doesn't exist, so not allowed
return false
}
upstreamConfig := route.upstreamConfig
for _, re := range upstreamConfig.SkipAuthCompiledRegex {
if re.MatchString(req.URL.Path) {
// This upstream has a matching skip auth regex
return true
}
}
return false
}
func (p *OAuthProxy) isXMLHTTPRequest(req *http.Request) bool {
return req.Header.Get("X-Requested-With") == "XMLHttpRequest"
}
// SignOut redirects the request to the provider's sign out url.
func (p *OAuthProxy) SignOut(rw http.ResponseWriter, req *http.Request) {
p.ClearSessionCookie(rw, req)
redirectURL := &url.URL{
Scheme: "https",
Host: req.Host,
Path: "/",
}
fullURL := p.provider.GetSignOutURL(redirectURL)
http.Redirect(rw, req, fullURL.String(), http.StatusFound)
}
// OAuthStart begins the authentication flow, encrypting the redirect url in a request to the provider's sign in endpoint.
func (p *OAuthProxy) OAuthStart(rw http.ResponseWriter, req *http.Request, tags []string) {
// The proxy redirects to the authenticator, and provides it with redirectURI (which points
// back to the sso proxy).
logger := log.NewLogEntry()
if p.isXMLHTTPRequest(req) {
p.ErrorPage(rw, req, http.StatusUnauthorized, "Unauthorized", "user not authorized")
}
requestURI := req.URL.String()
callbackURL := p.GetRedirectURL(req.Host)
// We redirect the browser to the authenticator with a 302 status code. The target URL is
// constructed using the GetSignInURL() method, which encodes the following data:
//
// * client_id: Defined by the OAuth2 RFC https://tools.ietf.org/html/rfc6749.
// Identifies the application requesting authentication information,
// from our prespective this will always be static since the client
// will always be sso proxy
//
// * redirect_uri: Defined by the OAuth2 RFC https://tools.ietf.org/html/rfc6749.
// Informs the authenticator _where_ to redirect the user back to once
// they have authenticated with the auth provider and given us permission
// to access their auth information
//
// * response_type: Defined by the OAuth2 RFC https://tools.ietf.org/html/rfc6749.
// Required by the spec and must be set to "code"
//
// * scope: Defined by the OAuth2 RFC https://tools.ietf.org/html/rfc6749.
// Used to offer different auth scopes, but will be unnecessary in the context of SSO.
//
// * state: Defined by the OAuth2 RFC https://tools.ietf.org/html/rfc6749.
// Used to prevent cross site forgery and maintain state across the client and server.
key := aead.GenerateKey()
state := &StateParameter{
SessionID: fmt.Sprintf("%x", key),
RedirectURI: requestURI,
}
// we encrypt this value to be opaque the browser cookie
// this value will be unique since we always use a randomized nonce as part of marshaling
encryptedCSRF, err := p.CookieCipher.Marshal(state)
if err != nil {
tags = append(tags, "csrf_token_error")
p.StatsdClient.Incr("application_error", tags, 1.0)
logger.Error(err, "failed to marshal state parameter for CSRF token")
p.ErrorPage(rw, req, http.StatusInternalServerError, "Internal Error", err.Error())
return
}
p.SetCSRFCookie(rw, req, encryptedCSRF)
// we encrypt this value to be opaque the uri query value
// this value will be unique since we always use a randomized nonce as part of marshaling
encryptedState, err := p.CookieCipher.Marshal(state)
if err != nil {
tags = append(tags, "error_marshalling_state_parameter")
p.StatsdClient.Incr("application_error", tags, 1.0)
logger.Error(err, "failed to marshal state parameter for state query parameter")
p.ErrorPage(rw, req, http.StatusInternalServerError, "Internal Error", err.Error())
return
}
signinURL := p.provider.GetSignInURL(callbackURL, encryptedState)
logger.WithSignInURL(signinURL).Info("starting OAuth flow")
http.Redirect(rw, req, signinURL.String(), http.StatusFound)
}
// OAuthCallback validates the cookie sent back from the provider, then validates
// the user information, and if authorized, redirects the user back to the original
// application.
func (p *OAuthProxy) OAuthCallback(rw http.ResponseWriter, req *http.Request) {
// We receive the callback from the SSO Authenticator. This request will either contain an
// error, or it will contain a `code`; the code can be used to fetch an access token, and
// other metadata, from the authenticator.
logger := log.NewLogEntry()
remoteAddr := getRemoteAddr(req)
tags := []string{"action:callback"}
// finish the oauth cycle
err := req.ParseForm()
if err != nil {
p.StatsdClient.Incr("application_error", tags, 1.0)
p.ErrorPage(rw, req, http.StatusInternalServerError, "Internal Error", err.Error())
return
}
errorString := req.Form.Get("error")
if errorString != "" {
tags = append(tags, "error:callback_error_exists")
p.StatsdClient.Incr("application_error", tags, 1.0)
p.ErrorPage(rw, req, http.StatusForbidden, "Permission Denied", errorString)
return
}
// We begin the process of redeeming the code for an access token.
session, err := p.redeemCode(req.Host, req.Form.Get("code"))
if err != nil {
tags = append(tags, "error:redeem_code_error")
p.StatsdClient.Incr("provider_error", tags, 1.0)
logger.WithRemoteAddress(remoteAddr).Error(
err, "error redeeming authorization code")
p.ErrorPage(rw, req, http.StatusInternalServerError, "Internal Error", "Internal Error")
return
}
encryptedState := req.Form.Get("state")
stateParameter := &StateParameter{}
err = p.CookieCipher.Unmarshal(encryptedState, stateParameter)
if err != nil {
tags = append(tags, "error:state_parameter_error")
p.StatsdClient.Incr("application_error", tags, 1.0)
logger.WithRemoteAddress(remoteAddr).Error(
err, "could not unmarshal state parameter value")
p.ErrorPage(rw, req, http.StatusInternalServerError, "Internal Error", "Internal Error")
return
}
c, err := req.Cookie(p.CSRFCookieName)
if err != nil {
tags = append(tags, "error:csrf_cookie_error")
p.StatsdClient.Incr("application_error", tags, 1.0)
p.ErrorPage(rw, req, http.StatusBadRequest, "Bad Request", err.Error())
return
}
p.ClearCSRFCookie(rw, req)
encryptedCSRF := c.Value
csrfParameter := &StateParameter{}
err = p.CookieCipher.Unmarshal(encryptedCSRF, csrfParameter)
if err != nil {
tags = append(tags, "error:csrf_parameter_error")
p.StatsdClient.Incr("application_error", tags, 1.0)
logger.WithRemoteAddress(remoteAddr).Error(
err, "couldn't unmarshal CSRF parameter value")
p.ErrorPage(rw, req, http.StatusInternalServerError, "Internal Error", "Internal Error")
return
}
if encryptedState == encryptedCSRF {
tags = append(tags, "error:equal_encrypted_state_and_csrf")
p.StatsdClient.Incr("application_error", tags, 1.0)
logger.WithRemoteAddress(remoteAddr).Info(
"encrypted state value and encrypted CSRF value are unexpectedly equal")
p.ErrorPage(rw, req, http.StatusBadRequest, "Bad Request", "Bad Request")
return
}
if !reflect.DeepEqual(stateParameter, csrfParameter) {
tags = append(tags, "error:state_csrf_mismatch")
p.StatsdClient.Incr("application_error", tags, 1.0)
logger.WithRemoteAddress(remoteAddr).Info(
"state parameter and CSRF parameters are unexpectedly not equal")
p.ErrorPage(rw, req, http.StatusBadRequest, "Bad Request", "Bad Request")
return
}
// We validate the user information, and check that this user has proper authorization
// for the resources requested. This can be set via the email address or any groups.
//
// set cookie, or deny
if !p.EmailValidator(session.Email) {
tags = append(tags, "error:invalid_email")
p.StatsdClient.Incr("application_error", tags, 1.0)
logger.WithRemoteAddress(remoteAddr).WithUser(session.Email).Info(
"permission denied: unauthorized")
p.ErrorPage(rw, req, http.StatusForbidden, "Permission Denied", "Invalid Account")
return
}
route, ok := p.router(req)
if !ok {
// this shouldn't happen since we've already matched the host once on this request
tags = append(tags, "error:unknown_host")
p.StatsdClient.Incr("application_error", tags, 1.0)
logger.WithRemoteAddress(remoteAddr).WithUser(session.Email).Info(
"couldn't resolve route from host name for membership check")
p.ErrorPage(rw, req, http.StatusInternalServerError, "Internal Error", "Error looking up route for group membership")
return
}
allowedGroups := route.upstreamConfig.AllowedGroups
inGroups, validGroup, err := p.provider.ValidateGroup(session.Email, allowedGroups)
if err != nil {
tags = append(tags, "error:user_group_failed")
p.StatsdClient.Incr("provider_error", tags, 1.0)
logger.WithRemoteAddress(remoteAddr).WithUser(session.Email).Info(
"couldn't fetch user groups")
p.ErrorPage(rw, req, http.StatusInternalServerError, "Internal Error", "Error validating group membership, please try again")
return
}
if !validGroup {
tags = append(tags, "error:unauthorized_email")
p.StatsdClient.Incr("provider_error", tags, 1.0)
logger.WithRemoteAddress(remoteAddr).WithUser(session.Email).WithAllowedGroups(
allowedGroups).Info("permission denied: unauthorized")
p.ErrorPage(rw, req, http.StatusForbidden, "Permission Denied", "Group membership required")
return
}
logger.WithRemoteAddress(remoteAddr).WithUser(session.Email).WithInGroups(inGroups).Info(
"authentication complete")
session.Groups = inGroups
// We store the session in a cookie and redirect the user back to the application
err = p.SaveSession(rw, req, session)
if err != nil {
tags = append(tags, "error:save_session_error")
p.StatsdClient.Incr("application_error", tags, 1.0)
logger.WithRemoteAddress(remoteAddr).Error(err, "error saving session")
p.ErrorPage(rw, req, http.StatusInternalServerError, "Internal Error", "Internal Error")
return
}
// This is the redirect back to the original requested application
http.Redirect(rw, req, stateParameter.RedirectURI, http.StatusFound)
}
// AuthenticateOnly calls the Authenticate handler.
func (p *OAuthProxy) AuthenticateOnly(rw http.ResponseWriter, req *http.Request) {
err := p.Authenticate(rw, req)
if err != nil {
p.StatsdClient.Incr("application_error", []string{"action:auth", "error:unauthorized_request"}, 1.0)
http.Error(rw, "unauthorized request", http.StatusUnauthorized)
}
rw.WriteHeader(http.StatusAccepted)
}
// Proxy authenticates a request, either proxying the request if it is authenticated, or starting the authentication process if not.
func (p *OAuthProxy) Proxy(rw http.ResponseWriter, req *http.Request) {
// Attempts to validate the user and their cookie.
logger := log.NewLogEntry()
start := time.Now()
tags := []string{"action:proxy"}
var err error
// If the request is explicitly whitelisted, we skip authentication
if p.IsWhitelistedRequest(req) {
tags = append(tags, "auth_type:whitelisted")
} else {
tags = append(tags, "auth_type:authenticated")
err = p.Authenticate(rw, req)
}
// If the authentication is not successful we proceed to start the OAuth Flow with
// OAuthStart. If authentication is successful, we proceed to proxy to the configured
// upstream.
if err != nil {
switch err {
case http.ErrNoCookie:
// No cookie is set, start the oauth flow
p.OAuthStart(rw, req, tags)
return
case ErrUserNotAuthorized:
tags = append(tags, "error:user_unauthorized")
p.StatsdClient.Incr("application_error", tags, 1.0)
// We know the user is not authorized for the request, we show them a forbidden page
p.ErrorPage(rw, req, http.StatusForbidden, "Forbidden", "You're not authorized to view this page")
return
case ErrLifetimeExpired:
// User's lifetime expired, we trigger the start of the oauth flow
p.OAuthStart(rw, req, tags)
return
default:
logger.Error(err, "unknown error authenticating user")
tags = append(tags, "error:internal_error")
p.StatsdClient.Incr("application_error", tags, 1.0)
// We don't know exactly what happened, but authenticating the user failed, show an error
p.ErrorPage(rw, req, http.StatusInternalServerError, "Internal Error", "An unexpected error occurred")
return
}
}
// We have validated the users request and now proxy their request to the provided upstream.
route, ok := p.router(req)
if !ok {
p.UnknownHost(rw, req)
return
}
if route.tags != nil {
tags = append(tags, route.tags...)
}
overhead := time.Now().Sub(start)
p.StatsdClient.Timing("request_overhead", overhead, tags, 1.0)
route.handler.ServeHTTP(rw, req)
}
// Authenticate authenticates a request by checking for a session cookie, and validating its expiration,
// clearing the session cookie if it's invalid and returning an error if necessary..
func (p *OAuthProxy) Authenticate(rw http.ResponseWriter, req *http.Request) (err error) {
logger := log.NewLogEntry()
// use for logging
remoteAddr := getRemoteAddr(req)
route, ok := p.router(req)
if !ok {
logger.WithRequestHost(req.Host).Info(
"error looking up route by host to validate user groups")
return ErrUnknownHost
}
allowedGroups := route.upstreamConfig.AllowedGroups
// Clear the session cookie if anything goes wrong.
defer func() {
if err != nil {
p.ClearSessionCookie(rw, req)
}
}()
session, err := p.LoadCookiedSession(req)
// TODO: Remove ErrCookieRefresh codepath after completing migration from AES-GCM to AES-SIV.
if err != nil && err != ErrRefreshCookie {
// We loaded a cookie but it wasn't valid, clear it, and reject the request
logger.WithRemoteAddress(remoteAddr).Error(err, "error authenticating user")
return err
}
// Lifetime period is the entire duration in which the session is valid.
// This should be set to something like 14 to 30 days.
if session.LifetimePeriodExpired() {
// session lifetime has expired, we reject the request and clear the cookie
logger.WithUser(session.Email).Info(
"lifetime has expired; restarting authentication")
return ErrLifetimeExpired
} else if session.RefreshPeriodExpired() || err == ErrRefreshCookie {
// Refresh period is the period in which the access token is valid. This is ultimately
// controlled by the upstream provider and tends to be around 1 hour.
ok, err := p.provider.RefreshSession(session, allowedGroups)
// We failed to refresh the session successfully
// clear the cookie and reject the request
if err != nil {
logger.WithUser(session.Email).Error(err, "refreshing session failed")
return err
}
if !ok {
// User is not authorized after refresh
// clear the cookie and reject the request
logger.WithUser(session.Email).Info(
"not authorized after refreshing session")
return ErrUserNotAuthorized
}
err = p.SaveSession(rw, req, session)
if err != nil {
// We refreshed the session successfully, but failed to save it.
//
// This could be from failing to encode the session properly.
// But, we clear the session cookie and reject the request!
logger.WithUser(session.Email).Error(
err, "could not save refreshed session")
return err
}
} else if session.ValidationPeriodExpired() {
// Validation period has expired, this is the shortest interval we use to
// check for valid requests. This should be set to something like a minute.
// This calls up the provider chain to validate this user is still active
// and hasn't been de-authorized.
ok := p.provider.ValidateSessionState(session, allowedGroups)
if !ok {
// This user is now no longer authorized, or we failed to
// validate the user.
// Clear the cookie and reject the request
logger.WithUser(session.Email).Error(
err, "no longer authorized after validation period")
return ErrUserNotAuthorized
}
err = p.SaveSession(rw, req, session)
if err != nil {
// We validated the session successfully, but failed to save it.
// This could be from failing to encode the session properly.
// But, we clear the session cookie and reject the request!
logger.WithUser(session.Email).Error(
err, "could not save validated session")
return err
}
}
if !p.EmailValidator(session.Email) {
logger.WithUser(session.Email).Error("not authorized")
return ErrUserNotAuthorized
}
req.Header.Set("X-Forwarded-User", session.User)
req.Header.Set("X-Forwarded-Email", session.Email)
req.Header.Set("X-Forwarded-Groups", strings.Join(session.Groups, ","))
// stash authenticated user so that it can be logged later (see func logRequest)
rw.Header().Set(loggingUserHeader, session.Email)
// This user has been OK'd. Allow the request!
return nil
}
|
package rtda
import "jvmgo_c/ch11/rtda/heap"
func NewShimFrame(thread *Thread,ops *OperandStack) *Frame {
return &Frame{
thread: thread,
method: heap.ShimReturnMethod(),
operandStack:ops,
}
}
|
package controller
import (
"github.com/fberrez/forum/model"
"github.com/gin-gonic/gin"
"log"
"net/http"
"strconv"
)
func GetCategory(c *gin.Context) {
categories, err := model.GetCategory()
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"executed": false, "message": err})
log.Fatalf("getCategory error : %v", err)
}
c.JSON(http.StatusOK, gin.H{"executed": true, "content": categories})
}
// func GetContent(c *gin.Context) {
// if c.Param("idSub") == "" {
// getSubCategoryByIdCategory(c.Param("idCat"), c)
// } else {
// getPostsByIdSubCategory(c.Param("idSub"), c)
// }
// }
func GetSubCategoryByIdCategory(c *gin.Context) {
idCategory, err := strconv.Atoi(c.Param("idCat"))
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"executed": false, "message": err})
log.Fatalf("Parse idCategory to int error : %v", err)
}
subCategories, err := model.GetSubCategoryByIdCategory(idCategory)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"executed": false, "message": err})
log.Fatalf("getSubCategory error : %v", err)
}
c.JSON(http.StatusOK, gin.H{"executed": true, "content": subCategories})
}
func GetPostsByIdSubCategory(c *gin.Context) {
idSC, err := strconv.Atoi(c.Param("idSubCat"))
log.Println(idSC)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"executed": false, "message": err})
log.Fatalf("Parse idSubCat to int error : %v", err)
}
posts, err2 := model.GetPostByIdSubCat(idSC)
if err2 != nil {
c.JSON(http.StatusInternalServerError, gin.H{"executed": false, "message": err2})
log.Fatalf("getPostByIdSubCat error : %v", err2)
}
c.JSON(http.StatusOK, gin.H{"executed": true, "content": posts})
}
|
package server
import (
"log"
"github.com/cswank/quimby/internal/auth"
"github.com/cswank/quimby/internal/config"
"github.com/cswank/quimby/internal/homekit"
"github.com/cswank/quimby/internal/repository"
"github.com/cswank/quimby/internal/router"
"github.com/cswank/quimby/internal/templates"
)
func Start(cfg config.Config, g *repository.Gadget, u *repository.User) {
if err := templates.Init(); err != nil {
log.Fatal(err)
}
a := auth.New(u)
hc, err := homekit.New()
if err != nil {
log.Fatal(err)
}
if err := router.Serve(cfg, g, u, a, hc); err != nil {
log.Fatal(err)
}
}
|
// Package main contains pomerium
package main
import (
"context"
"errors"
"flag"
"fmt"
"github.com/rs/zerolog"
"github.com/pomerium/pomerium/config"
"github.com/pomerium/pomerium/internal/log"
"github.com/pomerium/pomerium/internal/version"
"github.com/pomerium/pomerium/pkg/cmd/pomerium"
"github.com/pomerium/pomerium/pkg/envoy/files"
)
var (
versionFlag = flag.Bool("version", false, "prints the version")
configFile = flag.String("config", "", "Specify configuration file location")
)
func main() {
flag.Parse()
if *versionFlag {
fmt.Println("pomerium:", version.FullVersion())
fmt.Println("envoy:", files.FullVersion())
return
}
ctx := context.Background()
if err := run(ctx); !errors.Is(err, context.Canceled) {
log.Fatal().Err(err).Msg("cmd/pomerium")
}
log.Info(ctx).Msg("cmd/pomerium: exiting")
}
func run(ctx context.Context) error {
ctx = log.WithContext(ctx, func(c zerolog.Context) zerolog.Context {
return c.Str("config_file_source", *configFile).Bool("bootstrap", true)
})
var src config.Source
src, err := config.NewFileOrEnvironmentSource(*configFile, files.FullVersion())
if err != nil {
return err
}
return pomerium.Run(ctx, src)
}
|
package main
import (
"fmt"
"net/url"
"time"
"github.com/kavenegar/kavenegar-go"
)
func main() {
api := kavenegar.New(" your apikey ")
//Message.Send
sender := "" //Sender Line Number(optional)
receptor := []string{"", ""} //Recipient numbers
message := "Hello Go!" //Text message
params := &kavenegar.MessageSendParam{
Date: time.Now().Add(time.Duration(10) * time.Minute),
LocalID: []string{"1000", "1001"},
Type: []kavenegar.MessageSendType{kavenegar.Type_MessageSend_AppMemory,kavenegar.Type_MessageSend_AppMemory},
}
if res, err := api.Message.Send(sender, receptor, message, params); err != nil {
switch err := err.(type) {
case *kavenegar.APIError:
fmt.Println(err.Error())
case *kavenegar.HTTPError:
fmt.Println(err.Error())
default:
fmt.Println(err.Error())
}
} else {
for _, r := range res {
fmt.Println("MessageID = ", r.MessageID)
fmt.Println("Status = ", r.Status)
//...
}
}
//Message.CreateSend
v := url.Values{}
//v.Set("sender", "")
v.Set("message", "Hello Go!")
v.Add("receptor", "")
v.Add("receptor", "")
//v.Add("type",kavenegar.Type_MessageSend_AppMemory.String())
//v.Add("type",kavenegar.Type_MessageSend_AppMemory.String())
//v.Add("localid","1000")
//v.Add("localid","1001")
//t := time.Now().Add(time.Duration(10) * time.Minute)
//v.Set("date", kavenegar.TimeToUnix(t))
if res, err := api.Message.CreateSend(v); err != nil {
switch err := err.(type) {
case *kavenegar.APIError:
fmt.Println(err.Error())
case *kavenegar.HTTPError:
fmt.Println(err.Error())
default:
fmt.Println(err.Error())
}
} else {
for _, r := range res {
fmt.Println("MessageID = ", r.MessageID)
fmt.Println("Status = ", r.Status)
//...
}
}
}
|
// 15 april 2015
package pgidl
import (
"io"
"text/scanner"
"strconv"
)
type lexerr struct {
msg string
pos scanner.Position
}
type lexer struct {
scanner *scanner.Scanner
idl IDL
errs []lexerr
}
func newLexer(r io.Reader, filename string) *lexer {
l := new(lexer)
l.scanner = new(scanner.Scanner)
l.scanner.Init(r)
l.scanner.Error = func(s *scanner.Scanner, msg string) {
l.Error(msg)
}
l.scanner.Mode = scanner.ScanIdents | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments
l.scanner.Position.Filename = filename
return l
}
var symtypes = map[string]int{
"package": tokPACKAGE,
"func": tokFUNC,
"struct": tokSTRUCT,
"interface": tokINTERFACE,
"void": tokVOID,
"field": tokFIELD,
"from": tokFROM,
"raw": tokRAW,
"const": tokCONST,
"enum": tokENUM,
}
func (l *lexer) Lex(lval *yySymType) int {
r := l.scanner.Scan()
switch r {
case scanner.EOF:
return 0
case scanner.Ident:
lval.String = l.scanner.TokenText()
t, ok := symtypes[lval.String]
if !ok {
return tokIDENT
}
return t
case scanner.String:
ss := l.scanner.TokenText()
// the token text is a Go string in a string!
ss, err := strconv.Unquote(ss)
if err != nil {
l.Error(err.Error())
}
lval.String = ss
return tokSTRING
}
return int(r)
}
func (l *lexer) Error(s string) {
l.errs = append(l.errs, lexerr{
msg: s,
pos: l.scanner.Pos(),
})
}
|
package events
import (
"encoding/json"
"errors"
"fmt"
)
// Event is an immutable event to be handled
type Event struct {
typeField uint
completed bool
err error
Payload interface{}
}
// NewEvent constructs an event of the given type and optional payload
func NewEvent(typeValue uint, payload ...interface{}) *Event {
var payloadValue interface{}
if len(payload) > 0 {
payloadValue = payload[0]
}
return &Event{typeField: typeValue, Payload: payloadValue}
}
// Type returns the event type
func (evt Event) Type() uint {
return evt.typeField
}
// Completed returns true if the event is completed
func (evt Event) Completed() bool {
return evt.completed
}
// SetCompleted sets the event completed flag to true.
// Once called, the event can never be reverted to incomplete.
// SetCompleted can be called any number of times.
func (evt *Event) SetCompleted() {
evt.completed = true
}
// Error returns the event error, or nil if there is no error.
func (evt Event) Error() error {
return evt.err
}
// SetError sets the event error.
// SetError only has an effect on the first call, further calls are ignored.
// It is up to each handler to check for errors.
func (evt *Event) SetError(err error) {
if evt.err == nil {
evt.err = err
}
}
// MarshalJSON marshals an event into a JSON string.
// If the event has a non-nil error object, it is converted to a string, losing the type information.
func (evt Event) MarshalJSON() ([]byte, error) {
m := struct {
Type uint `json:"type"`
Completed bool `json:"completed"`
Error string `json:"error"`
Payload interface{} `json:"payload"`
}{
Type: evt.typeField,
Completed: evt.completed,
Payload: evt.Payload,
}
if evt.err != nil {
m.Error = evt.err.Error()
}
return json.Marshal(m)
}
// UnmarshalJSON unmarshals JSON into an event.
// If the json contains an error, it is just a string, and will be unmarshalled using errors.New(string).
func (evt *Event) UnmarshalJSON(buf []byte) error {
var u struct {
Type uint `json:"type"`
Completed bool `json:"completed"`
Error string `json:"error"`
Payload json.RawMessage `json:"payload"`
}
if err := json.Unmarshal(buf, &u); err != nil {
return err
}
evt.typeField = u.Type
evt.completed = u.Completed
if u.Error != "" {
evt.err = errors.New(u.Error)
}
evt.Payload = u.Payload
return nil
}
// InvocationPayload represents a payload that invokes a named function with a parameter map.
// The invocation can be implemented via reflection, switch statement, RPC, database call, etc.
type InvocationPayload struct {
Name string `json:"name"`
Params map[string]interface{} `json:"params"`
}
// Handler handles a received event
type Handler interface {
HandleEvent(*Event)
}
// HandlerFunc type is an adapter to allow the use of
// ordinary functions as event handlers. If f is a function
// with the appropriate signature, HandlerFunc(f) is a
// Handler that calls f.
type HandlerFunc func(*Event)
// HandleEvent calls h(evt)
func (h HandlerFunc) HandleEvent(evt *Event) {
h(evt)
}
// Discriminator determines whether or not a handler should be invoked for an event
type Discriminator interface {
Applicable(Event) bool
}
// DiscriminatorFunc type is an adapter to allow the use of
// ordinary functions as discriminators, DiscriminatorFunc(f) is a
// Discriminator that calls f.
type DiscriminatorFunc func(Event) bool
// Applicable returns d(evt)
func (d DiscriminatorFunc) Applicable(evt Event) bool {
return d(evt)
}
// AllEvents always returns true
func AllEvents(evt Event) bool {
return true
}
// AllEventsDiscriminator returns a simple event discriminator that always returns true
func AllEventsDiscriminator() Discriminator {
return DiscriminatorFunc(AllEvents)
}
// EventsMatchingType returns true if the Event is of the given type
func EventsMatchingType(evt Event, evtType uint) bool {
return evt.Type() == evtType
}
// EventsMatchingDiscriminator returns an event discriminator that returns true when the event matches the given type
func EventsMatchingTypeDiscriminator(evtType uint) Discriminator {
return DiscriminatorFunc(func(evt Event) bool {
return EventsMatchingType(evt, evtType)
})
}
// IncompleteEvents returns true until an event is marked completed
func IncompleteEvents(evt Event) bool {
return !evt.completed
}
// IncompleteEventsDiscriminator returns an event discriminator that returns true until the event is completed
func IncompleteEventsDiscriminator() Discriminator {
return DiscriminatorFunc(func(evt Event) bool {
return IncompleteEvents(evt)
})
}
// MatcherFunc matches a Handler Discriminator pair for removal
type MatcherFunc func(Handler, Discriminator) bool
// AllMatcher produces a MatcherFunc that always returns true
func AllMatcher() MatcherFunc {
return MatcherFunc(func(mh Handler, md Discriminator) bool {
return true
})
}
// HandlerMatcher produces a MatcherFunc for a Handler instance
func HandlerMatcher(h Handler) MatcherFunc {
return MatcherFunc(func(mh Handler, md Discriminator) bool {
return fmt.Sprintf("%p", h) == fmt.Sprintf("%p", mh)
})
}
// DiscriminatorMatcher produces a MatcherFunc for a Discriminator instance
func DiscriminatorMatcher(d Discriminator) MatcherFunc {
return MatcherFunc(func(mh Handler, md Discriminator) bool {
return fmt.Sprintf("%p", d) == fmt.Sprintf("%p", md)
})
}
// HandlerDiscriminatorMatcher produces a MatcherFunc for a Handler instance associated with a Discriminator instance
func HandlerDiscriminatorMatcher(h Handler, d Discriminator) MatcherFunc {
return MatcherFunc(func(mh Handler, md Discriminator) bool {
return (fmt.Sprintf("%p", h) == fmt.Sprintf("%p", mh)) &&
(fmt.Sprintf("%p", d) == fmt.Sprintf("%p", md))
})
}
// A pair of handler and discriminator
type handlerAndDiscriminator struct {
handler Handler
discriminator Discriminator
}
// Registry registers event handlers and forwards received events to them.
// The zero value for Registry is an empty registry ready to use.
type Registry struct {
eventHandlers []handlerAndDiscriminator
}
// AddHandler adds a handler that is invoked when the optional discriminator is true
// If no discriminator is provided, an IncompleteEvents discriminator is used
func (r *Registry) AddHandler(handler Handler, discriminator ...Discriminator) {
discriminatorValue := IncompleteEventsDiscriminator()
if len(discriminator) > 0 {
discriminatorValue = discriminator[0]
}
r.eventHandlers = append(r.eventHandlers, handlerAndDiscriminator{
handler: handler,
discriminator: discriminatorValue,
})
}
// RemoveHandler removes the first matching Handler
func (r *Registry) RemoveHandler(m MatcherFunc, removeAll ...bool) {
if (len(removeAll) > 0) && removeAll[0] {
// Remove all matching handlers by building new array of the ones to keep
var newHandlers []handlerAndDiscriminator
for _, had := range r.eventHandlers {
if !m(had.handler, had.discriminator) {
newHandlers = append(newHandlers, had)
}
}
r.eventHandlers = newHandlers
} else {
// Remove first matching handler by creating a new array with one element removed
for i, had := range r.eventHandlers {
if m(had.handler, had.discriminator) {
r.eventHandlers = append(r.eventHandlers[:i], r.eventHandlers[i+1:]...)
break
}
}
}
}
// SendEvent iterates all handlers in the order they were added to the registry,
// every handler whose discriminator returns true for the event is invoked
func (r *Registry) SendEvent(evt *Event) {
for _, had := range r.eventHandlers {
if had.discriminator.Applicable(*evt) {
had.handler.HandleEvent(evt)
}
}
}
|
package starportcmd
import (
"fmt"
"github.com/tendermint/starport/starport/pkg/chaincmd"
"github.com/spf13/cobra"
"github.com/tendermint/starport/starport/services/chain"
)
// NewRelayer creates a new command called chain that holds IBC Relayer related
// sub commands.
func NewRelayer() *cobra.Command {
c := &cobra.Command{
Use: "chain",
Short: "Relay connects blockchains via IBC protocol",
}
c.AddCommand(NewRelayerInfo())
c.AddCommand(NewRelayerAdd())
return c
}
// NewRelayerInfo creates a command that shows self chain information.
func NewRelayerInfo() *cobra.Command {
c := &cobra.Command{
Use: "me",
Short: "Retrieves self chain information to share with other chains",
RunE: relayerInfoHandler,
}
c.Flags().AddFlagSet(flagSetHomes())
return c
}
// NewRelayerAdd creates a command to connect added chain with relayer.
func NewRelayerAdd() *cobra.Command {
c := &cobra.Command{
Use: "add [another]",
Short: "Adds another chain by its chain information",
Args: cobra.MinimumNArgs(1),
RunE: relayerAddHandler,
}
c.Flags().AddFlagSet(flagSetHomes())
return c
}
func relayerInfoHandler(cmd *cobra.Command, args []string) error {
chainOption := []chain.Option{
chain.LogLevel(logLevel(cmd)),
chain.KeyringBackend(chaincmd.KeyringBackendTest),
}
c, err := newChainWithHomeFlags(cmd, appPath, chainOption...)
if err != nil {
return err
}
info, err := c.RelayerInfo()
if err != nil {
return err
}
fmt.Println(info)
return nil
}
func relayerAddHandler(cmd *cobra.Command, args []string) error {
chainOption := []chain.Option{
chain.LogLevel(logLevel(cmd)),
chain.KeyringBackend(chaincmd.KeyringBackendTest),
}
c, err := newChainWithHomeFlags(cmd, appPath, chainOption...)
if err != nil {
return err
}
if err := c.RelayerAdd(args[0]); err != nil {
return err
}
return nil
}
|
package main
import (
"github.com/astaxie/beego"
)
type MainController struct {
beego.Controller
}
type DelController struct {
beego.Controller
}
type ViewController struct {
beego.Controller
}
func (this *ViewController) Get() {
this.Ctx.SetCookie("age","",-1)
this.Ctx.WriteString("view world")
}
func (this *MainController) Get() {
this.Ctx.SetCookie("name","zhangkun",100000)
this.Ctx.SetCookie("age","100",100000)
this.Ctx.WriteString("hello world")
}
func (this *DelController) Get() {
this.Ctx.SetCookie("name","",-1)
this.Redirect("/view", 302)
//this.Ctx.WriteString("delete")
}
func main() {
beego.Router("/", &MainController{})
beego.Router("/delete", &DelController{})
beego.Router("/view", &ViewController{})
beego.Run()
}
|
package geoip2
import (
"fmt"
"log"
"net"
"os"
"path/filepath"
"strings"
"sync"
"github.com/abh/geodns/countries"
"github.com/abh/geodns/targeting/geo"
geoip2 "github.com/oschwald/geoip2-golang"
)
type geoType uint8
const (
countryDB = iota
cityDB
asnDB
)
var dbFiles map[geoType][]string
// GeoIP2 contains the geoip implementation of the GeoDNS geo
// targeting interface
type GeoIP2 struct {
dir string
country *geoip2.Reader
city *geoip2.Reader
asn *geoip2.Reader
mu sync.RWMutex
}
func init() {
dbFiles = map[geoType][]string{
countryDB: []string{"GeoIP2-Country.mmdb", "GeoLite2-Country.mmdb"},
asnDB: []string{"GeoIP2-ASN.mmdb", "GeoLite2-ASN.mmdb"},
cityDB: []string{"GeoIP2-City.mmdb", "GeoLite2-City.mmdb"},
}
}
// FindDB returns a guess at a directory path for GeoIP data files
func FindDB() string {
dirs := []string{
"/usr/share/GeoIP/", // Linux default
"/usr/share/local/GeoIP/", // source install?
"/usr/local/share/GeoIP/", // FreeBSD
"/opt/local/share/GeoIP/", // MacPorts
}
for _, dir := range dirs {
if _, err := os.Stat(dir); err != nil {
if os.IsExist(err) {
log.Println(err)
}
continue
}
return dir
}
return ""
}
func (g *GeoIP2) open(t geoType, db string) (*geoip2.Reader, error) {
fileName := filepath.Join(g.dir, db)
if len(db) == 0 {
found := false
for _, f := range dbFiles[t] {
fileName = filepath.Join(g.dir, f)
if _, err := os.Stat(fileName); err == nil {
found = true
break
}
}
if !found {
return nil, fmt.Errorf("could not find '%s' in '%s'", dbFiles[t], g.dir)
}
}
n, err := geoip2.Open(fileName)
if err != nil {
return nil, err
}
g.mu.Lock()
defer g.mu.Unlock()
switch t {
case countryDB:
g.country = n
case cityDB:
g.city = n
case asnDB:
g.asn = n
}
return n, nil
}
func (g *GeoIP2) get(t geoType, db string) (*geoip2.Reader, error) {
g.mu.RLock()
var r *geoip2.Reader
switch t {
case countryDB:
r = g.country
case cityDB:
r = g.city
case asnDB:
r = g.asn
}
// unlock so the g.open() call below won't lock
g.mu.RUnlock()
if r != nil {
return r, nil
}
return g.open(t, db)
}
// New returns a new GeoIP2 provider
func New(dir string) (*GeoIP2, error) {
g := &GeoIP2{
dir: dir,
}
_, err := g.open(countryDB, "")
if err != nil {
return nil, err
}
return g, nil
}
// HasASN returns if we can do ASN lookups
func (g *GeoIP2) HasASN() (bool, error) {
r, err := g.get(asnDB, "")
if r != nil && err == nil {
return true, nil
}
return false, err
}
// GetASN returns the ASN for the IP (as a "as123" string and
// an integer)
func (g *GeoIP2) GetASN(ip net.IP) (string, int, error) {
r, err := g.get(asnDB, "")
if err != nil {
return "", 0, err
}
c, err := r.ASN(ip)
if err != nil {
return "", 0, fmt.Errorf("lookup ASN for '%s': %s", ip.String(), err)
}
asn := c.AutonomousSystemNumber
return fmt.Sprintf("as%d", asn), 0, nil
}
// HasCountry checks if the GeoIP country database is available
func (g *GeoIP2) HasCountry() (bool, error) {
r, err := g.get(countryDB, "")
if r != nil && err == nil {
return true, nil
}
return false, err
}
// GetCountry returns the country, continent and netmask for the given IP
func (g *GeoIP2) GetCountry(ip net.IP) (country, continent string, netmask int) {
r, err := g.get(countryDB, "")
c, err := r.Country(ip)
if err != nil {
log.Printf("Could not lookup country for '%s': %s", ip.String(), err)
return "", "", 0
}
country = c.Country.IsoCode
if len(country) > 0 {
country = strings.ToLower(country)
continent = countries.CountryContinent[country]
}
return country, continent, 0
}
// HasLocation returns if the city database is available to
// return lat/lon information for an IP
func (g *GeoIP2) HasLocation() (bool, error) {
r, err := g.get(cityDB, "")
if r != nil && err == nil {
return true, nil
}
return false, err
}
// GetLocation returns a geo.Location object for the given IP
func (g *GeoIP2) GetLocation(ip net.IP) (l *geo.Location, err error) {
c, err := g.city.City(ip)
if err != nil {
log.Printf("Could not lookup CountryRegion for '%s': %s", ip.String(), err)
return
}
l = &geo.Location{
Latitude: float64(c.Location.Latitude),
Longitude: float64(c.Location.Longitude),
Country: strings.ToLower(c.Country.IsoCode),
}
if len(c.Subdivisions) > 0 {
l.Region = strings.ToLower(c.Subdivisions[0].IsoCode)
}
if len(l.Country) > 0 {
l.Continent = countries.CountryContinent[l.Country]
if len(l.Region) > 0 {
l.Region = l.Country + "-" + l.Region
l.RegionGroup = countries.CountryRegionGroup(l.Country, l.Region)
}
}
return
}
|
package room
import (
"fmt"
"github.com/mooncaker816/gophercises/poker/deck"
"github.com/veandco/go-sdl2/img"
"github.com/veandco/go-sdl2/sdl"
)
type Table struct {
result *sdl.Texture
Players []*Player
Deck *deck.Deck
}
// AddDeck create new deck as desired
func (t *Table) AddDeck(n int) deck.Deck {
d := deck.New(deck.Multiple(n), deck.Shuffle)
t.Deck = &d
return d
}
// AddPlayer will add new palyer to scene
func (t *Table) AddPlayer(ps ...*Player) {
for _, p := range ps {
t.Players = append(t.Players, p)
}
}
// UpdateResult will update the result for final rendering
func (t *Table) UpdateResult(r *sdl.Renderer, win bool) error {
var path string
if win {
path = "../../../res/img/youwin.jpg"
} else {
path = "../../../res/img/youlose.jpg"
}
res, err := img.LoadTexture(r, path)
if err != nil {
return fmt.Errorf("could not load result: %v", err)
}
t.result = res
return nil
}
func (t *Table) paint(r *sdl.Renderer, s *Scene) error {
for _, p := range s.Table.Players {
if err := p.paint(r, s); err != nil {
return fmt.Errorf("could not paint player: %v", err)
}
}
if s.Table.result != nil {
rect := &sdl.Rect{X: 200, Y: 150, W: 400, H: 300}
if err := r.Copy(s.Table.result, nil, rect); err != nil {
return fmt.Errorf("could not copy card texture: %v", err)
}
}
return nil
}
func (t *Table) destroy() {
t.result.Destroy()
}
|
package twch
import (
"fmt"
"net/http"
"reflect"
"testing"
)
func TestListEmoticons(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/chat/emoticons", func(w http.ResponseWriter, r *http.Request) {
testMethod(t, r, "GET")
fmt.Fprint(w, `{ "_links": { "self": "s" }, "emoticons": [ { "regex": "r", "images": [ { "emoticon_set": 1, "height": 1, "width": 1, "url": "u" } ] } ]}`)
})
want := []Emoticon{
Emoticon{
Regex: stringPtr("r"),
Images: []EmoticonImage{
EmoticonImage{
EmoticonSet: intPtr(1),
Height: intPtr(1),
Width: intPtr(1),
URL: stringPtr("u"),
},
},
},
}
got, _, err := client.Chat.ListEmoticons()
if err != nil {
t.Errorf("Chat.ListEmoticons: request returned error %+v", err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("Chat.ListEmoticons response did not match:\nwant: %+v\ngot: %+v", want, got)
}
}
|
// compact.
package main
import (
"bytes"
"encoding/json"
"fmt"
)
var j = []byte(`[
{
"name": "lily",
"age": 11
},
{
"name": "dory",
"age": 12
}
]`)
func main() {
b := new(bytes.Buffer)
err := json.Compact(b, j)
if err != nil {
panic(err)
}
fmt.Printf("json.Compact:\n")
fmt.Printf("Before:%s\n", j)
fmt.Printf("After:%s\n", b)
}
|
package main
import "fmt"
var globalVar = "This is a global variable and can be used in any function"
func main(){
var x string = "Hello, World"
var b bool
fmt.Println(x)
boo()
b = tonto()
fmt.Println("The value received by tonto is:", b)
fmt.Printf("The value received by tonto is: %t \n", b)
cadenas("uno", "dos")
numbers(10, 5)
numbers(20, 14)
fmt.Println("\n", globalVar)
}
func boo(){
fmt.Println("#### Boo function ####")
fmt.Println(true && true)
fmt.Println("\n", globalVar)
}
func tonto()(bool){
fmt.Println("\n#### Tonto function ####")
var y bool
y = true && true
fmt.Println("\n", globalVar)
return y
}
func cadenas(x string, y string){
fmt.Println("\n#### Cadenas function ####")
fmt.Printf("Hola uno es %s y dos es %s \n", x, y)
var una, dos string = x, y
fmt.Println("La primera es: " + una + ", la segunda es: " + dos)
var thisone string = fmt.Sprintf(" Con enteros %d ", 1)
var jo string = fmt.Sprintf("jola %d", 1)
fmt.Println("First this one" + thisone + "then: " + jo);
fmt.Printf("Is string \"%s\" equal to \"%s\" = %t \n", x, y, x == y)
fmt.Println("\n", globalVar)
}
func numbers(x int, y int){
fmt.Println("\n#### numbers function ####")
fmt.Println("This function prints the result of an operation on screen")
//fmt.Printf("The sum of %d + %d = %d \n", x, y, x + y)
fmt.Printf("The sum of %d + %d = %d \n", x, y, x + y)
fmt.Printf("The subs of %d - %d = %d \n", x, y, x - y)
fmt.Printf("The mult of %d * %d = %d \n", x, y, x * y)
fmt.Printf("The div of %d / %d = %d \n", x, y, x / y)
fmt.Printf("The rem of %d %% %d = %d \n", x, y, x % y)
mew := 5
fmt.Println(mew)
fmt.Println("\n", globalVar)
}
func askAndGive{
}
|
package humanity
import "fmt"
type Preparer interface {
Prepare() error
}
func (h *Human) Prepare() error {
if h.Ready == true {
fmt.Printf("%v is ready !\n", h)
}
h.Ready = true
return nil
}
func PrepareMissionPart(objs ...Preparer) error {
for i := range objs {
Preparer.Prepare(objs[i])
}
return nil
}
type Checker interface {
Check() bool
}
func (h *Human) Check() bool {
return h.Ready
}
func CheckMissionPart(objs ...Checker) bool {
for i := range objs {
Checker.Check(objs[i])
}
return true
}
|
package aes
import (
"testing"
"github.com/iGoogle-ink/gotil/xlog"
)
var (
secretKey = "GYBh3Rmey7nNzR/NpV0vAw=="
iv = "JR3unO2glQuMhUx3"
)
func TestDesCBCEncryptDecrypt(t *testing.T) {
originData := "www.gopay.ink"
xlog.Debug("originData:", originData)
encryptData, err := DesCBCEncryptData([]byte(originData), []byte(secretKey))
if err != nil {
xlog.Error("DesCBCEncryptData:", err)
return
}
xlog.Debug("encryptData:", string(encryptData))
origin, err := DesCBCDecryptData(encryptData, []byte(secretKey))
if err != nil {
xlog.Error("DesCBCDecryptData:", err)
return
}
xlog.Debug("origin:", string(origin))
}
func TestDesCBCEncryptDecryptIv(t *testing.T) {
originData := "www.gopay.ink"
xlog.Debug("originData:", originData)
encryptData, err := DesCBCEncryptIvData([]byte(originData), []byte(secretKey), []byte(iv))
if err != nil {
xlog.Error("DesCBCEncryptIvData:", err)
return
}
xlog.Debug("encryptData:", string(encryptData))
origin, err := DesCBCDecryptIvData(encryptData, []byte(secretKey), []byte(iv))
if err != nil {
xlog.Error("DesCBCDecryptIvData:", err)
return
}
xlog.Debug("origin:", string(origin))
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2020-07-21 09:41
# @File : _206_Reverse_Linked_List.go
# @Description : 反转链表,涉及到头结点,所以需要引入dummyNode
# @Attention :
返回值,因为是反转,所以需要返回的是prev的值
*/
package v0
func reverseList(head *ListNode) *ListNode {
var prev *ListNode
var temp *ListNode
for nil != head {
temp = head.Next
head.Next = prev
prev=head
head=temp
}
return prev
}
|
package util
import (
"github.com/umeat/go-gnss/cmd/database/models"
"github.com/geoscienceaustralia/go-rtcm/rtcm3"
)
func ParseSatelliteMask(satMask uint64) (prns []int) {
for i, prn := 64, 1; i > 0; i-- {
if (satMask >> uint64(i-1)) & 0x1 == 1 {
prns = append(prns, prn)
}
prn++
}
return prns
}
func ParseSignalMask(sigMask uint32) (ids []int) {
for i := 32; i > 0; i-- {
if (sigMask >> uint32(i-1)) & 0x1 == 1 {
ids = append(ids, i)
}
}
return ids
}
func Utob(v uint64) bool {
if v == 0 {
return false
}
return true
}
func ParseCellMask(cellMask uint64, length int) (cells []bool) {
for i := 0; i < length; i++ {
cells = append([]bool{Utob((cellMask >> uint(i)) & 0x1)}, cells...)
}
return cells
}
func ObservationMsm7(msg rtcm3.MessageMsm7) (obs models.Observation, err error) {
obs = models.Observation{
MessageNumber: msg.MessageNumber,
ReferenceStationId: msg.ReferenceStationId,
Epoch: msg.Epoch,
ClockSteeringIndicator: msg.ClockSteeringIndicator,
ExternalClockIndicator: msg.ExternalClockIndicator,
SmoothingInterval: msg.SmoothingInterval,
SatelliteData: []models.SatelliteData{},
}
satIDs := ParseSatelliteMask(msg.SatelliteMask)
sigIDs := ParseSignalMask(msg.SignalMask)
cellIDs := ParseCellMask(msg.CellMask, len(satIDs) * len(sigIDs))
cellPos := 0
sigPos := 0
for i, satID := range satIDs {
satData := models.SatelliteData{
SatelliteID: satID,
Extended: msg.SatelliteData.Extended[i],
PhaseRangeRates: msg.SatelliteData.PhaseRangeRates[i],
SignalData: []models.SignalData{},
}
for _, sigID := range sigIDs {
if cellIDs[cellPos] {
satData.SignalData = append(satData.SignalData, models.SignalData{
SignalID: sigID,
Pseudoranges: msg.SignalData.Pseudoranges[sigPos],
PhaseRanges: msg.SignalData.PhaseRanges[sigPos],
PhaseRangeLocks: msg.SignalData.PhaseRangeLocks[sigPos],
HalfCycles: msg.SignalData.HalfCycles[sigPos],
CNRs: msg.SignalData.Cnrs[sigPos],
PhaseRangeRates: msg.SignalData.PhaseRangeRates[sigPos],
})
sigPos ++
}
cellPos ++
}
obs.SatelliteData = append(obs.SatelliteData, satData)
}
return obs, err
}
|
package main
import (
"net/http"
"net/http/httptest"
"reflect"
"testing"
)
func TestJsonResponseString(t *testing.T) {
j := jsonResponse{"a": "b"}
expected := `{
"a": "b"
}`
if j.String() != expected {
t.Errorf("Expected: %v, Received: %v", expected, j.String())
}
}
func TestGetIPAddress(t *testing.T) {
request, _ := http.NewRequest("GET", "/get", nil)
remoteAddr := "1.2.3.4"
request.RemoteAddr = remoteAddr
ip := getIPAddress(request)
if ip != remoteAddr {
t.Errorf("Received incorrect IP address. Expected: %v, received: %v.", remoteAddr, ip)
}
headerIP := "5.6.7.8"
request.Header.Set("X-Forwarded-For", headerIP)
ip = getIPAddress(request)
if ip != headerIP {
t.Errorf("Failed to parse X-Forwarded-For properly. Expected: %v, received: %v", headerIP, ip)
}
}
func TestFlatten(t *testing.T) {
input := map[string][]string{
"a": []string{"b"},
"c": []string{"d", "e"},
}
expected := map[string]string{
"a": "b",
"c": "d,e",
}
output := flatten(input)
if !reflect.DeepEqual(expected, output) {
t.Errorf("Expected %v, received %v", expected, output)
}
}
func TestHandlersReturnExpectedStatusCodes(t *testing.T) {
type handler struct {
f func(http.ResponseWriter, *http.Request)
method string
path string
status int
}
handlers := []handler{
handler{index, "GET", "/", 200},
handler{ip, "GET", "/ip", 200},
handler{userAgent, "GET", "/user-agent", 200},
handler{headers, "GET", "/headers", 200},
handler{get, "GET", "/get", 200},
handler{get, "GET", "/robots.txt", 200},
handler{get, "GET", "/deny", 200},
}
for _, h := range handlers {
request, _ := http.NewRequest(h.method, h.path, nil)
response := httptest.NewRecorder()
h.f(response, request)
if response.Code != h.status {
t.Errorf("Expected status code %v, received %v", h.status, response.Code)
}
}
}
|
// test-sort1 project doc.go
/*
test-sort1 document
*/
package main
|
package semt
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document02200101 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:semt.022.001.01 Document"`
Message *SecuritiesSettlementTransactionAuditTrailReportV01 `xml:"SctiesSttlmTxAudtTrlRpt"`
}
func (d *Document02200101) AddMessage() *SecuritiesSettlementTransactionAuditTrailReportV01 {
d.Message = new(SecuritiesSettlementTransactionAuditTrailReportV01)
return d.Message
}
// Scope
//
// This message is sent by the Market Infrastructure to the CSD to advise of the history of all the statuses, modifications, replacement and cancellation of a specific transaction during its whole life cycle when the instructing party is a direct participant to the Settlement Infrastructure.
//
//
// Usage
//
// The message may also be used to:
//
// - re-send a message sent by the market infrastructure to the direct participant,
//
// - provide a third party with a copy of a message being sent by the market infrastructure for information,
//
// - re-send to a third party a copy of a message being sent by the market infrastructure for information
// using the relevant elements in the Business Application Header.
//
// ISO 15022 - 20022 Coexistence
// This ISO 20022 message is reversed engineered from ISO 15022. Both standards will coexist for a certain number of years. Until this coexistence period ends, the usage of certain data types is restricted to ensure interoperability between ISO 15022 and 20022 users. Compliance to these rules is mandatory in a coexistence environment. The coexistence restrictions are described in a Textual Rule linked to the Message Items they concern. These coexistence textual rules are clearly identified as follows: “CoexistenceXxxxRule”.
type SecuritiesSettlementTransactionAuditTrailReportV01 struct {
// Page number of the message (within a statement) and continuation indicator to indicate that the statement is to continue or that the message is the last page of the statement.
Pagination *iso20022.Pagination `xml:"Pgntn"`
// Identification of the SecuritiesStatusQuery message sent to request this report.
QueryReference *iso20022.Identification1 `xml:"QryRef,omitempty"`
// Provides unambiguous transaction identification information.
TransactionIdentification *iso20022.TransactionIdentifications15 `xml:"TxId,omitempty"`
// Account to or from which a securities entry is made.
SafekeepingAccount *iso20022.SecuritiesAccount13 `xml:"SfkpgAcct"`
// Party that legally owns the account.
AccountOwner *iso20022.PartyIdentification36Choice `xml:"AcctOwnr,omitempty"`
// Provides the history of status and reasons for a pending, posted or cancelled transaction.
StatusTrail []*iso20022.StatusTrail2 `xml:"StsTrl,omitempty"`
}
func (s *SecuritiesSettlementTransactionAuditTrailReportV01) AddPagination() *iso20022.Pagination {
s.Pagination = new(iso20022.Pagination)
return s.Pagination
}
func (s *SecuritiesSettlementTransactionAuditTrailReportV01) AddQueryReference() *iso20022.Identification1 {
s.QueryReference = new(iso20022.Identification1)
return s.QueryReference
}
func (s *SecuritiesSettlementTransactionAuditTrailReportV01) AddTransactionIdentification() *iso20022.TransactionIdentifications15 {
s.TransactionIdentification = new(iso20022.TransactionIdentifications15)
return s.TransactionIdentification
}
func (s *SecuritiesSettlementTransactionAuditTrailReportV01) AddSafekeepingAccount() *iso20022.SecuritiesAccount13 {
s.SafekeepingAccount = new(iso20022.SecuritiesAccount13)
return s.SafekeepingAccount
}
func (s *SecuritiesSettlementTransactionAuditTrailReportV01) AddAccountOwner() *iso20022.PartyIdentification36Choice {
s.AccountOwner = new(iso20022.PartyIdentification36Choice)
return s.AccountOwner
}
func (s *SecuritiesSettlementTransactionAuditTrailReportV01) AddStatusTrail() *iso20022.StatusTrail2 {
newValue := new(iso20022.StatusTrail2)
s.StatusTrail = append(s.StatusTrail, newValue)
return newValue
}
|
package atomix
import (
"strconv"
"sync/atomic"
)
// AlignedInt64 is an atomic wrapper around an int64 aligned to a cache line.
type AlignedInt64 struct {
atomicType
value int64
_ [CacheLine - 8]byte // unsafe.Sizeof(int64) == 8
}
// NewAlignedInt64 creates an AlignedInt64.
func NewAlignedInt64(i int64) *AlignedInt64 {
return &AlignedInt64{value: i}
}
func (a *AlignedInt64) String() string {
return strconv.FormatInt(a.Load(), 10)
}
// Load atomically the value.
func (a *AlignedInt64) Load() int64 {
return atomic.LoadInt64(&a.value)
}
// Store atomically the given value.
func (a *AlignedInt64) Store(n int64) {
atomic.StoreInt64(&a.value, n)
}
// Swap atomically and return the old value.
func (a *AlignedInt64) Swap(n int64) int64 {
return atomic.SwapInt64(&a.value, n)
}
// Add atomically and return the new value.
func (a *AlignedInt64) Add(n int64) int64 {
return atomic.AddInt64(&a.value, n)
}
// Sub atomically and return the new value.
func (a *AlignedInt64) Sub(n int64) int64 {
return atomic.AddInt64(&a.value, -n)
}
// Inc atomically and return the new value.
func (a *AlignedInt64) Inc() int64 {
return a.Add(1)
}
// Dec atomically and return the new value.
func (a *AlignedInt64) Dec() int64 {
return a.Sub(1)
}
// CAS is an atomic Compare-And-Swap operation.
func (a *AlignedInt64) CAS(old, new int64) bool {
return atomic.CompareAndSwapInt64(&a.value, old, new)
}
|
package basic
import "fmt"
func RangeSliceMap() {
a := []int{10, 20, 30}
for n := range a {
fmt.Println( n)
}
for k, v := range a {
fmt.Println(k, v)
}
b := map[string]int{"top1": 1000, "top2": 500}
for k, v := range b {
fmt.Println(k, v)
}
}
func RangeByte1() {
data := "A\xfe\x02\xff\x04"
for _, v := range data { //没有[]byte强制转换, 得不到正确字节输出
fmt.Printf("%#x ", v)
}
//输出: 0x41 0xfffd 0x2 0xfffd 0x4
fmt.Println()
for _, v := range []byte(data) { //有[]byte强制转换, 得到正确字节输出
fmt.Printf("%#x ", v)
}
//输出:0x41 0xfe 0x2 0xff 0x4
}
func Range() {
fmt.Println("<--------------------Rage begin------------------> ")
RangeSliceMap()
RangeByte1()
fmt.Println("\n <--------------------Rage end------------------> ")
}
|
package philifence
import (
"encoding/json"
"github.com/julienschmidt/httprouter"
"io"
"io/ioutil"
"net/http"
"net/http/pprof"
"strconv"
)
var fences, roads FenceIndex
func ListenAndServe(addr string, fidx, ridx FenceIndex, profile bool) error {
info("Listening on %s\n", addr)
defer info("Done Fencing\n")
fences = fidx
roads = ridx
router := httprouter.New()
router.GET("/fence", getFenceList)
router.POST("/fence/:name/add", postFenceAdd)
router.GET("/fence/:name/search", getFenceSearch)
router.GET("/road", getRoadList)
router.POST("/road/:name/add", postRoadAdd)
router.GET("/road/:name/search", getRoadSearch)
if profile {
profiler(router)
info("Profiling available at /debug/pprof/")
}
return http.ListenAndServe(addr, router)
}
func respond(w http.ResponseWriter, res interface{}) {
w.Header().Set("Server", "philifence")
w.Header().Set("Connection", "keep-alive")
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
writeJson(w, res)
}
func getFenceList(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
writeJson(w, fences.Keys())
}
func getRoadList(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
writeJson(w, roads.Keys())
}
func postFenceAdd(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<26)) // 64 MB max
if err != nil {
http.Error(w, "Body 64 MB max", http.StatusRequestEntityTooLarge)
return
}
if err := r.Body.Close(); err != nil {
http.Error(w, "Error closing body", http.StatusInternalServerError)
return
}
name := params.ByName("name")
g, err := unmarshalFeature(string(body))
if err != nil {
http.Error(w, "Unable to read geojson feature", http.StatusBadRequest)
return
}
feature, err := featureAdapter(g)
if err != nil {
http.Error(w, "Unable to read geojson feature", http.StatusBadRequest)
return
}
if err := fences.Add(name, feature); err != nil {
http.Error(w, "Error adding feature "+err.Error(), http.StatusBadRequest)
}
respond(w, "success")
}
func postRoadAdd(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<26)) // 64 MB max
if err != nil {
http.Error(w, "Body 64 MB max", http.StatusRequestEntityTooLarge)
return
}
if err := r.Body.Close(); err != nil {
http.Error(w, "Error closing body", http.StatusInternalServerError)
return
}
name := params.ByName("name")
g, err := unmarshalFeature(string(body))
if err != nil {
http.Error(w, "Unable to read geojson feature", http.StatusBadRequest)
return
}
feature, err := featureAdapter(g)
if err != nil {
http.Error(w, "Unable to read geojson feature", http.StatusBadRequest)
return
}
if err := roads.Add(name, feature); err != nil {
http.Error(w, "Error adding feature "+err.Error(), http.StatusBadRequest)
}
respond(w, "success")
}
func getFenceSearch(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
query := r.URL.Query()
lat, err := strconv.ParseFloat(query.Get("lat"), 64)
if err != nil {
http.Error(w, "Query param 'lat' required as float", http.StatusBadRequest)
return
}
lon, err := strconv.ParseFloat(query.Get("lon"), 64)
if err != nil {
http.Error(w, "Query param 'lon' required as float", http.StatusBadRequest)
return
}
tol, err := strconv.ParseFloat(query.Get("tolerance"), 64)
if err != nil {
tol = 1 // ~1m
}
query.Del("lat")
query.Del("lon")
query.Del("tolerance")
c := Coordinate{lat: lat, lon: lon}
name := params.ByName("name")
matchs, err := fences.Search(name, c, tol)
if err != nil {
http.Error(w, "Error search fence "+name, http.StatusBadRequest)
return
}
fences := make([]Properties, len(matchs))
for i, fence := range matchs {
fences[i] = fence.Properties
}
props := make(map[string]interface{}, len(query))
for k := range query {
props[k] = query.Get(k)
}
respond(w, *newResponseMessage(c, props, fences))
}
func getRoadSearch(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
query := r.URL.Query()
lat, err := strconv.ParseFloat(query.Get("lat"), 64)
if err != nil {
http.Error(w, "Query param 'lat' required as float", http.StatusBadRequest)
return
}
lon, err := strconv.ParseFloat(query.Get("lon"), 64)
if err != nil {
http.Error(w, "Query param 'lon' required as float", http.StatusBadRequest)
return
}
tol, err := strconv.ParseFloat(query.Get("tolerance"), 64)
if err != nil {
tol = 1 // ~1m
}
query.Del("lat")
query.Del("lon")
query.Del("tolerance")
c := Coordinate{lat: lat, lon: lon}
name := params.ByName("name")
matchs, err := roads.Search(name, c, tol)
if err != nil {
http.Error(w, "Error search road "+name, http.StatusBadRequest)
return
}
roads := make([]Properties, len(matchs))
for i, road := range matchs {
roads[i] = road.Properties
}
props := make(map[string]interface{}, len(query))
for k := range query {
props[k] = query.Get(k)
}
respond(w, *newResponseMessage(c, props, roads))
}
func writeJson(w io.Writer, msg interface{}) (err error) {
buf, err := json.Marshal(&msg)
_, err = w.Write(buf)
if err != nil {
return err
}
return
}
func profiler(router *httprouter.Router) {
router.HandlerFunc("GET", "/debug/pprof/", pprof.Index)
router.HandlerFunc("POST", "/debug/pprof/", pprof.Index)
router.HandlerFunc("GET", "/debug/pprof/cmdline", pprof.Cmdline)
router.HandlerFunc("POST", "/debug/pprof/cmdline", pprof.Cmdline)
router.HandlerFunc("GET", "/debug/pprof/profile", pprof.Profile)
router.HandlerFunc("POST", "/debug/pprof/profile", pprof.Profile)
router.HandlerFunc("GET", "/debug/pprof/symbol", pprof.Symbol)
router.HandlerFunc("POST", "/debug/pprof/symbol", pprof.Symbol)
router.Handler("GET", "/debug/pprof/heap", pprof.Handler("heap"))
router.Handler("GET", "/debug/pprof/block", pprof.Handler("block"))
router.Handler("GET", "/debug/pprof/goroutine", pprof.Handler("goroutine"))
router.Handler("GET", "/debug/pprof/threadcreate", pprof.Handler("threadcreate"))
}
|
// imgListDownLoad
package DaeseongLib
import (
"fmt"
_ "io"
"io/ioutil"
"net/http"
"os"
"regexp"
"strings"
)
var (
IMGS []string
)
func downloadbytes(sUrl string) ([]byte, error) {
req, err := http.NewRequest("GET", sUrl, nil)
if err != nil {
return nil, err
}
req.Header.Add("User-Agent", "Daeseonglib")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
bByte, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return bByte, nil
}
func writeImgFile(sFileName string, bByte []byte) bool {
err := ioutil.WriteFile(sFileName, bByte, 0644)
if err != nil {
return false
}
return true
}
func DownloadImgFile(sUrl string, sFilePath string) (err error) {
Replacer := strings.NewReplacer(
"[", "",
"]", "",
"\"", "",
)
sUrl = Replacer.Replace(sUrl)
bytes, err := downloadbytes(sUrl)
if err != nil {
return err
}
writeImgFile(sFilePath, bytes)
return nil
}
func createFolder(sfolder string) {
if _, err := os.Stat(sfolder); os.IsNotExist(err) {
os.Mkdir(sfolder, 0777)
}
}
func getFilename(linkUrl string) (sName string) {
//i := strings.Split(linkUrl, "/")
//sName = i[len(i)-1]
sName = linkUrl[strings.LastIndex(linkUrl, "/")+1:]
sName = strings.Replace(sName, "\"", "", -1)
return sName
}
func imgList(sContent string) {
createFolder("D:\\Daeseong")
var img = regexp.MustCompile(`"(.*?)"`)
var src = regexp.MustCompile(`src="(.*?)"`)
var href = regexp.MustCompile(`<img(.*?)src="((http|https)://)(.*?)"`)
match := href.FindAllString(sContent, -1)
if match != nil {
for _, val := range match {
html := src.FindAllString(val, -1)
linkUrl := img.FindAllString(html[0], -1)
filename := getFilename(linkUrl[0])
saveFile := fmt.Sprintf("D:\\Daeseong\\%s", filename)
go DownloadImgFile(linkUrl[0], saveFile)
}
}
}
func DownloadImgTest() {
IMGS = append(IMGS, "http://www.naver.com")
IMGS = append(IMGS, "http://www.daum.net")
for _, url := range IMGS {
if !strings.HasPrefix(url, "http") {
url = "http://" + url
}
res, err := http.Get(url)
if err != nil {
continue
}
if res.StatusCode != http.StatusOK {
continue
}
body, err := ioutil.ReadAll(res.Body)
defer res.Body.Close()
if err != nil {
continue
}
imgList(string(body))
}
fmt.Println("complete")
}
/*
func main() {
DownloadImgTest()
}
*/
|
package easy349
func intersection(nums1 []int, nums2 []int) []int {
set := make(map[int]struct{})
for _, v := range nums1 {
set[v] = struct{}{}
}
ansSet := make(map[int]struct{})
for _, v := range nums2 {
_, ok := set[v]
if ok {
ansSet[v] = struct{}{}
}
}
ans := []int{}
for k := range ansSet {
ans = append(ans, k)
}
return ans
}
|
package _1_Two_Sum
func twoSum(nums []int, target int) []int {
// return twoSumForce(nums, target)
return twoSumHash(nums, target)
}
func twoSumHash(nums []int, target int) []int {
var (
m = make(map[int]int)
)
for idx, n := range nums {
m[n] = idx
}
for idx1, n := range nums {
if idx2, ok := m[target-n]; ok && idx1 != idx2 {
return []int{idx1, idx2}
}
}
return []int{}
}
func twoSumForce(nums []int, target int) []int {
ret := []int{}
for k1, v1 := range nums {
for k2, v2 := range nums {
if k1 >= k2 {
continue
}
if v1+v2 == target {
ret = append(ret, k1, k2)
return ret
}
}
}
return ret
}
|
package helpers
import "net/url"
/* IsValidUrl: checks if a url is valid
Param: toTest (string) - url to check
Returns: boolean
sample valid url: `http://www.domain-address.com`
*/
func IsValidUrl(toTest string) bool {
_, err := url.ParseRequestURI(toTest)
if err != nil {
return false
}
u, err := url.Parse(toTest)
if err != nil || u.Scheme == "" || u.Host == "" {
return false
}
return true
}
|
package syscallx
/* This is the source file for msync_*.go, to regenerate run
./generate
*/
//sys Msync(b []byte, flags int) (err error)
|
package query
import (
"fmt"
"sort"
"core"
)
// ReverseDeps For each input label, finds all targets which depend upon it.
func ReverseDeps(graph *core.BuildGraph, labels []core.BuildLabel) {
uniqueTargets := make(map[core.BuildLabel]struct{})
for _, label := range labels {
for _, child := range graph.PackageOrDie(label.PackageName).AllChildren(graph.TargetOrDie(label)) {
for _, target := range graph.ReverseDependencies(child) {
if parent := target.Parent(graph); parent != nil {
uniqueTargets[parent.Label] = struct{}{}
} else {
uniqueTargets[target.Label] = struct{}{}
}
}
}
}
// Check for anything subincluding this guy too
for _, pkg := range graph.PackageMap() {
for _, label := range labels {
if pkg.HasSubinclude(label) {
uniqueTargets[core.BuildLabel{PackageName: pkg.Name, Name: "all"}] = struct{}{}
}
}
}
targets := make(core.BuildLabels, 0, len(uniqueTargets))
for _, label := range labels {
delete(uniqueTargets, label)
}
for target := range uniqueTargets {
targets = append(targets, target)
}
sort.Sort(targets)
for _, target := range targets {
fmt.Printf("%s\n", target)
}
}
|
package fzb
import (
"testing"
)
func Test_Fzb(t *testing.T) {
f := NewFzb()
f.Title = "test"
fxml, err := f.ParseXML()
if err != nil {
t.Error(err)
}
t.Log(string(fxml))
}
|
package main
/*
* @lc app=leetcode id=236 lang=golang
*
* [236] Lowest Common Ancestor of a Binary Tree
*/
/**
* Definition for TreeNode.
* type TreeNode struct {
* Val int
* Left *ListNode
* Right *ListNode
* }
*/
func lowestCommonAncestor_PLEASE_REMOVE_THIS(root, p, q *TreeNode) *TreeNode {
pathP := make([]*TreeNode, 0)
pathQ := make([]*TreeNode, 0)
pathToVal(&pathP, root, p.Val)
pathToVal(&pathQ, root, q.Val)
min := min236(len(pathP), len(pathQ))
for i := 0; i < min; i++ {
if i == min-1 {
return pathP[min-1]
} else if pathP[i+1] != pathQ[i+1] {
return pathP[i]
}
}
return nil
}
func pathToVal(path *[]*TreeNode, root *TreeNode, val int) bool {
if root == nil {
return false
}
*path = append(*path, root)
if root.Val == val {
return true
}
if pathToVal(path, root.Left, val) {
return true
}
if pathToVal(path, root.Right, val) {
return true
}
*path = (*path)[:len(*path)-1]
return false
}
func min236(a, b int) int {
if a <= b {
return a
}
return b
}
|
package main
type Config struct {
bindAddr string `toml:"bind_addr"`
mocksFolder string `toml:"mocks_folder"`
Routes []Route `toml:"routes"`
}
type Route struct {
Path string `toml:"path"`
Filename string `toml:"filename"`
}
func NewConfig() *Config {
return &Config{
bindAddr: ":8080",
mocksFolder: "mocks",
Routes: []Route{},
}
}
|
package messagebird
import (
"testing"
"time"
)
var voiceMessageObject []byte = []byte(`{
"id":"430c44a0354aab7ac9553f7a49907463",
"href":"https:\/\/rest.messagebird.com\/voicemessages\/430c44a0354aab7ac9553f7a49907463",
"originator":"MessageBird",
"body":"Hello World",
"reference":null,
"language":"en-gb",
"voice":"female",
"repeat":1,
"ifMachine":"continue",
"scheduledDatetime":null,
"createdDatetime":"2015-01-05T16:11:24+00:00",
"recipients":{
"totalCount":1,
"totalSentCount":1,
"totalDeliveredCount":0,
"totalDeliveryFailedCount":0,
"items":[
{
"recipient":31612345678,
"status":"calling",
"statusDatetime":"2015-01-05T16:11:24+00:00"
}
]
}
}`)
func TestNewVoiceMessage(t *testing.T) {
SetServerResponse(200, voiceMessageObject)
message, err := mbClient.NewVoiceMessage([]string{"31612345678"}, "Hello World", nil)
if err != nil {
t.Fatalf("Didn't expect error while creating a new voice message: %s", err)
}
if message.Id != "430c44a0354aab7ac9553f7a49907463" {
t.Errorf("Unexpected voice message id: %s", message.Id)
}
if message.HRef != "https://rest.messagebird.com/voicemessages/430c44a0354aab7ac9553f7a49907463" {
t.Errorf("Unexpected voice message href: %s", message.HRef)
}
if message.Originator != "MessageBird" {
t.Errorf("Unexpected voice message originator: %s", message.Originator)
}
if message.Body != "Hello World" {
t.Errorf("Unexpected voice message body: %s", message.Body)
}
if message.Reference != "" {
t.Errorf("Unexpected voice message reference: %s", message.Reference)
}
if message.Language != "en-gb" {
t.Errorf("Unexpected voice message language: %s", message.Language)
}
if message.Voice != "female" {
t.Errorf("Unexpected voice message voice: %s", message.Voice)
}
if message.Repeat != 1 {
t.Errorf("Unexpected voice message repeat: %d", message.Repeat)
}
if message.IfMachine != "continue" {
t.Errorf("Unexpected voice message ifmachine: %d", message.IfMachine)
}
if message.ScheduledDatetime != nil {
t.Errorf("Unexpected voice message scheduled datetime: %s", message.ScheduledDatetime)
}
if message.CreatedDatetime == nil || message.CreatedDatetime.Format(time.RFC3339) != "2015-01-05T16:11:24Z" {
t.Errorf("Unexpected voice message created datetime: %s", message.CreatedDatetime)
}
if message.Recipients.TotalCount != 1 {
t.Fatalf("Unexpected number of total count: %d", message.Recipients.TotalCount)
}
if message.Recipients.TotalSentCount != 1 {
t.Errorf("Unexpected number of total sent count: %d", message.Recipients.TotalSentCount)
}
if message.Recipients.Items[0].Recipient != 31612345678 {
t.Errorf("Unexpected voice message recipient: %d", message.Recipients.Items[0].Recipient)
}
if message.Recipients.Items[0].Status != "calling" {
t.Errorf("Unexpected voice message recipient status: %s", message.Recipients.Items[0].Status)
}
if message.Recipients.Items[0].StatusDatetime == nil || message.Recipients.Items[0].StatusDatetime.Format(time.RFC3339) != "2015-01-05T16:11:24Z" {
t.Errorf("Unexpected datetime status for voice message recipient: %s", message.Recipients.Items[0].StatusDatetime.Format(time.RFC3339))
}
if len(message.Errors) != 0 {
t.Errorf("Unexpected number of errors in voice message: %d", len(message.Errors))
}
}
var voiceMessageObjectWithParams []byte = []byte(`{
"id":"430c44a0354aab7ac9553f7a49907463",
"href":"https:\/\/rest.messagebird.com\/voicemessages\/430c44a0354aab7ac9553f7a49907463",
"body":"Hello World",
"reference":"MyReference",
"language":"en-gb",
"voice":"male",
"repeat":5,
"ifMachine":"hangup",
"scheduledDatetime":null,
"createdDatetime":"2015-01-05T16:11:24+00:00",
"recipients":{
"totalCount":1,
"totalSentCount":1,
"totalDeliveredCount":0,
"totalDeliveryFailedCount":0,
"items":[
{
"recipient":31612345678,
"status":"calling",
"statusDatetime":"2015-01-05T16:11:24+00:00"
}
]
}
}`)
func TestNewVoiceMessageWithParams(t *testing.T) {
SetServerResponse(200, voiceMessageObjectWithParams)
params := &VoiceMessageParams{
Reference: "MyReference",
Voice: "male",
Repeat: 5,
IfMachine: "hangup",
}
message, err := mbClient.NewVoiceMessage([]string{"31612345678"}, "Hello World", params)
if err != nil {
t.Fatalf("Didn't expect error while creating a new voice message: %s", err)
}
if message.Reference != "MyReference" {
t.Errorf("Unexpected voice message reference: %s", message.Reference)
}
if message.Voice != "male" {
t.Errorf("Unexpected voice message voice: %s", message.Voice)
}
if message.Repeat != 5 {
t.Errorf("Unexpected voice message repeat: %d", message.Repeat)
}
if message.IfMachine != "hangup" {
t.Errorf("Unexpected voice message ifmachine: %s", message.IfMachine)
}
}
var voiceMessageObjectWithCreatedDatetime []byte = []byte(`{
"id":"430c44a0354aab7ac9553f7a49907463",
"href":"https:\/\/rest.messagebird.com\/voicemessages\/430c44a0354aab7ac9553f7a49907463",
"body":"Hello World",
"reference":null,
"language":"en-gb",
"voice":"female",
"repeat":1,
"ifMachine":"continue",
"scheduledDatetime":"2015-01-05T16:12:24+00:00",
"createdDatetime":"2015-01-05T16:11:24+00:00",
"recipients":{
"totalCount":1,
"totalSentCount":0,
"totalDeliveredCount":0,
"totalDeliveryFailedCount":0,
"items":[
{
"recipient":31612345678,
"status":"scheduled",
"statusDatetime":null
}
]
}
}`)
func TestNewVoiceMessageWithScheduledDatetime(t *testing.T) {
SetServerResponse(200, voiceMessageObjectWithCreatedDatetime)
scheduledDatetime, _ := time.Parse(time.RFC3339, "2015-01-05T16:12:24+00:00")
params := &VoiceMessageParams{ScheduledDatetime: scheduledDatetime}
message, err := mbClient.NewVoiceMessage([]string{"31612345678"}, "Hello World", params)
if err != nil {
t.Fatalf("Didn't expect error while creating a new voice message: %s", err)
}
if message.ScheduledDatetime.Format(time.RFC3339) != scheduledDatetime.Format(time.RFC3339) {
t.Errorf("Unexpected scheduled datetime: %s", message.ScheduledDatetime.Format(time.RFC3339))
}
if message.Recipients.TotalCount != 1 {
t.Fatalf("Unexpected number of total count: %d", message.Recipients.TotalCount)
}
if message.Recipients.TotalSentCount != 0 {
t.Errorf("Unexpected number of total sent count: %d", message.Recipients.TotalSentCount)
}
if message.Recipients.Items[0].Recipient != 31612345678 {
t.Errorf("Unexpected voice message recipient: %d", message.Recipients.Items[0].Recipient)
}
if message.Recipients.Items[0].Status != "scheduled" {
t.Errorf("Unexpected voice message recipient status: %s", message.Recipients.Items[0].Status)
}
}
|
package main
import (
"context"
"fmt"
"strconv"
"time"
"example.com/m/global"
"github.com/go-redis/cache/v8"
)
// func rClient() *redis.Client {
// client := redis.NewClient(&redis.Options{
// Addr: "localhost:6379",
// })
// return client
// }
// func ping(client *redis.Client) error {
// pong, err := client.Ping().Result()
// if err != nil {
// return err
// }
// // Output: PONG <nil>
// fmt.Println(pong)
// return nil
// }
// func set(client *redis.Client) error {
// err := client.Set("name", "43636", 0).Err()
// if err != nil {
// return err
// }
// err = client.Set("country", "Philippines", 0).Err()
// if err != nil {
// return err
// }
// return nil
// }
// func get(client *redis.Client) error {
// nameVal, err := client.Get("name").Result()
// if err != nil {
// return (err)
// }
// fmt.Println("name", nameVal)
// countryVal, err := client.Get("country").Result()
// if err == redis.Nil {
// fmt.Println("no value found")
// } else if err != nil {
// panic(err)
// } else {
// fmt.Println("country", countryVal)
// }
// return nil
// }
// func main() {
// // creates a client
// client := rClient()
// // check connection status
// err := ping(client)
// if err != nil {
// fmt.Println(err)
// }
// // Using the SET command to set Key-value pair
// err = set(client)
// if err != nil {
// fmt.Println(err)
// }
// // Using the GET command to get values from keys
// err = get(client)
// if err != nil {
// fmt.Println(err)
// }
// }
type Object struct {
Str string
Num int
}
func mainx() {
// ring := redis.NewRing(&redis.RingOptions{
// Addrs: map[string]string{
// "server1": ":6379",
// //"server2": ":6380",
// },
// })
// mycache := cache.New(&cache.Options{
// Redis: ring,
// LocalCache: cache.NewTinyLFU(1000, time.Minute),
// })
ctx := context.TODO()
key := "twp"
// obj := &Object{
// Str: "213525",
// Num: 42,
// }
var listStatus []global.StatusDetail
listStatus = append(listStatus, global.StatusDetail{
Status: "On",
Time: time.Now(),
},
)
if err := global.MyRediscache.Set(&cache.Item{
Ctx: ctx,
Key: key,
Value: listStatus,
TTL: 20 * time.Second,
});
err != nil {
panic(err)
}
var wanted []global.StatusDetail
if err := global.MyRediscache.Get(ctx, key, &wanted); err == nil {
fmt.Println(wanted)
}
keye := "index_" + strconv.Itoa(4) + "_" + strconv.Itoa(4)
fmt.Println(keye)
// Output: {mystring 42}
}
|
package limiter
import (
"log"
"net"
"net/http"
"strconv"
"strings"
"time"
"github.com/mash/go-limiter/adaptor"
)
const (
// The header name to retrieve an IP address under a proxy
forwardedForHeader = "X-FORWARDED-FOR"
)
type Quota struct {
Limit uint64
Within time.Duration
}
func (q Quota) ResetUnix(now time.Time) int64 {
seconds := now.Unix()
within := int64(q.Within.Seconds())
return (seconds/within + 1) * within
}
type Result struct {
Denied bool
ResetUnix int64
Remaining uint64
Identifier string
Counter uint64
}
type Limiter struct {
quota Quota
redis adaptor.RedisPool
keyPrefix, keyDelimiter string
Identify func(req *http.Request) (string, error)
ErrorHandler func(w http.ResponseWriter, req *http.Request, err error)
DeniedHandler func(w http.ResponseWriter, req *http.Request, result Result)
}
func NewLimiter(q Quota, redis adaptor.RedisPool) Limiter {
return Limiter{
quota: q,
redis: redis,
keyPrefix: "limiter",
keyDelimiter: "-",
Identify: IPIdentify,
ErrorHandler: DefaultErrorHandler,
DeniedHandler: DefaultDeniedHandler,
}
}
func (l Limiter) Handle(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
identifier, err := l.Identify(req)
if err != nil {
l.ErrorHandler(w, req, err)
return
}
if identifier != "" {
client := l.borrow()
defer client.Close()
result, err := l.CheckLimit(identifier, client)
if err != nil {
l.ErrorHandler(w, req, err)
return
}
l.SetRateLimitHeaders(w, *result)
if result.Denied {
l.DeniedHandler(w, req, *result)
return
}
}
next.ServeHTTP(w, req)
})
}
func (l Limiter) CheckLimit(identifier string, client adaptor.RedisClient) (*Result, error) {
now := time.Now()
key := l.Key(now, identifier)
counter, err := client.Get(key)
if err != nil {
return nil, err
}
reset := l.quota.ResetUnix(now)
if counter < l.quota.Limit {
err = client.Incrx(key, int(l.quota.Within.Seconds()))
if err != nil {
return nil, err
}
return &Result{
Denied: false,
ResetUnix: reset,
Remaining: l.quota.Limit - counter - 1,
Identifier: identifier,
Counter: counter,
}, nil
}
return &Result{
Denied: true,
ResetUnix: reset,
Remaining: 0,
Identifier: identifier,
Counter: counter,
}, nil
}
func (l Limiter) Key(now time.Time, identifier string) string {
return strings.Join([]string{
l.keyPrefix,
strconv.FormatInt(now.Unix()/int64(l.quota.Within.Seconds()), 10),
identifier,
}, l.keyDelimiter)
}
func (l Limiter) SetRateLimitHeaders(w http.ResponseWriter, result Result) {
headers := w.Header()
headers.Set("X-Rate-Limit-Limit", strconv.FormatUint(l.quota.Limit, 10))
headers.Set("X-Rate-Limit-Reset", strconv.FormatInt(result.ResetUnix, 10))
headers.Set("X-Rate-Limit-Remaining", strconv.FormatUint(result.Remaining, 10))
}
func (l Limiter) borrow() adaptor.RedisClient {
return l.redis.Borrow()
}
func DefaultErrorHandler(w http.ResponseWriter, req *http.Request, err error) {
log.Println(err)
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
}
func DefaultDeniedHandler(w http.ResponseWriter, req *http.Request, result Result) {
http.Error(w, "Too Many Requests", 429)
}
func IPIdentify(req *http.Request) (string, error) {
if forwardedFor := req.Header.Get(forwardedForHeader); forwardedFor != "" {
if ipParsed := net.ParseIP(forwardedFor); ipParsed != nil {
return ipParsed.String(), nil
}
}
ip, _, err := net.SplitHostPort(req.RemoteAddr)
if err != nil {
return "", err
}
return ip, nil
}
|
package library
import (
"errors"
"fmt"
"regexp"
"sort"
"github.com/devinmcgloin/sail/pkg/sketch"
"github.com/devinmcgloin/sail/pkg/sketch/accrew"
"github.com/devinmcgloin/sail/pkg/sketch/delaunay"
"github.com/devinmcgloin/sail/pkg/sketch/gradients"
"github.com/devinmcgloin/sail/pkg/sketch/harmonograph"
"github.com/devinmcgloin/sail/pkg/sketch/primitives"
"github.com/devinmcgloin/sail/pkg/sketch/sampling"
)
// Sketches defines all the sketches that the system can render
var options = map[string]sketch.Renderable{
"accrew/dot-clouds": accrew.DotCloud{},
"accrew/disjoint-line-clouds": accrew.DisjointLineCloud{},
"accrew/joint-line-clouds": accrew.JointLineCloud{},
"accrew/dot-lines": accrew.DotLines{},
"delaunay/ring": delaunay.Ring{},
"delaunay/mesh": delaunay.Mesh{},
"sampling/uniform-rectangle": sampling.UniformRectangleDot{},
"sampling/radial-rectangle": sampling.RadialRectangleDot{},
"sampling/dot-walk": sampling.DotWalk{},
"primitive/line-coloring": primitives.LineColoring{},
"primitive/bars": primitives.Bars{},
"primitive/rotated-lines": primitives.RotatedLines{},
"primitive/falling-rectangles": primitives.FallingRectangles{},
"gradients/skyspace": gradients.Skyspace{},
"harmonograph/single": harmonograph.Single{},
"harmonograph/dual": harmonograph.Dual{},
"harmonograph/variable": harmonograph.Variable{},
"harmonograph/offset": harmonograph.Offset{},
}
// Lookup finds a sketch based on the sketchID
func Lookup(sketchID string) (sketch.Renderable, error) {
sketch, ok := options[sketchID]
if !ok {
return nil, errors.New("invalid sketch ID")
}
return sketch, nil
}
// Exists returns true if the sketch is defined, false otherwise.
func Exists(sketchID string) bool {
_, ok := options[sketchID]
return ok
}
// List prints all avaliable sketches
func List(regex string) {
var sketchIDs []string
for sketchID := range options {
sketchIDs = append(sketchIDs, sketchID)
}
sort.Strings(sketchIDs)
for _, sketchID := range sketchIDs {
matched, err := regexp.MatchString(regex, sketchID)
if err != nil {
fmt.Printf("%s -> %s\n", sketchID, err)
continue
}
if matched && err == nil {
fmt.Printf("%s\n", sketchID)
}
}
}
|
package pointer
import (
"reflect"
"time"
)
// New converts value to pointer
func New(value interface{}) interface{} {
rv := reflect.ValueOf(value)
if rv.Type().Kind() == reflect.Ptr {
return value
}
rp := reflect.New(rv.Type())
rp.Elem().Set(rv)
return rp.Interface()
}
// String converts string to pointer
func String(value string) *string {
return New(value).(*string)
}
// Int converts int to pointer
func Int(value int) *int {
return New(value).(*int)
}
// Uint converts uint to pointer
func Uint(value uint) *uint {
return New(value).(*uint)
}
// Float converts float to pointer
func Float(value float64) *float64 {
return New(value).(*float64)
}
// Time converts time to pointer
func Time(value time.Time) *time.Time {
return New(value).(*time.Time)
}
|
package web_dao
import (
"2021/yunsongcailu/yunsong_server/dial"
"2021/yunsongcailu/yunsong_server/web/web_model"
)
type ConsumerDao interface {
// 根据ID 查询用户
QueryConsumerById(id int64) (consumer web_model.Consumers,err error)
// 根据邮箱查询用户
QueryConsumerByEmail(email string) (consumer web_model.Consumers,err error)
// 根据手机查询用户
QueryConsumerByPhone(phone string) (consumer web_model.Consumers,err error)
// 新用户入库
InsertConsumer(consumer web_model.Consumers) (id int64,err error)
// 根据ID 更新头像
UpdateConsumerIcon(id int64,filePath string) (err error)
// 更新用户信息
UpdataConsumerInfoById(consumer web_model.Consumers) (err error)
}
type consumerDao struct {}
func NewConsumerDao() ConsumerDao {
return &consumerDao{}
}
// 根据ID 查询用户
func (cd *consumerDao) QueryConsumerById(id int64) (consumer web_model.Consumers,err error) {
_,err = dial.DB.Where("id = ?",id).Get(&consumer)
return
}
// 根据邮箱查询用户
func (cd *consumerDao) QueryConsumerByEmail(email string) (consumer web_model.Consumers,err error) {
_,err = dial.DB.Where("email = ?",email).Get(&consumer)
return
}
// 根据手机查询用户
func (cd *consumerDao) QueryConsumerByPhone(phone string) (consumer web_model.Consumers,err error) {
_,err = dial.DB.Where("phone = ?",phone).Get(&consumer)
return
}
// 新用户入库
func (cd *consumerDao) InsertConsumer(consumer web_model.Consumers) (id int64,err error) {
return dial.DB.InsertOne(&consumer)
}
// 根据ID 更新头像
func (cd *consumerDao) UpdateConsumerIcon(id int64,filePath string) (err error) {
newConsumer := new(web_model.Consumers)
newConsumer.Icon = filePath
_,err = dial.DB.Where("id = ?",id).Update(newConsumer)
return
}
// 更新用户信息
func (cd *consumerDao) UpdataConsumerInfoById(consumer web_model.Consumers) (err error) {
_,err = dial.DB.Where("id = ?",consumer.Id).Update(consumer)
return
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//331. Verify Preorder Serialization of a Binary Tree
//One way to serialize a binary tree is to use pre-order traversal. When we encounter a non-null node, we record the node's value. If it is a null node, we record using a sentinel value such as #.
// _9_
// / \
// 3 2
// / \ / \
// 4 1 # 6
/// \ / \ / \
//# # # # # #
//For example, the above binary tree can be serialized to the string "9,3,4,#,#,1,#,#,2,#,6,#,#", where # represents a null node.
//Given a string of comma separated values, verify whether it is a correct preorder traversal serialization of a binary tree. Find an algorithm without reconstructing the tree.
//Each comma separated value in the string must be either an integer or a character '#' representing null pointer.
//You may assume that the input format is always valid, for example it could never contain two consecutive commas such as "1,,3".
//Example 1:
//"9,3,4,#,#,1,#,#,2,#,6,#,#"
//Return true
//Example 2:
//"1,#"
//Return false
//Example 3:
//"9,#,#,1"
//Return false
//Credits:
//Special thanks to @dietpepsi for adding this problem and creating all test cases.
//func isValidSerialization(preorder string) bool {
//}
// Time Is Money
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.