text stringlengths 11 4.05M |
|---|
package usecase
import (
models "github.com/OopsMouse/arbitgo/models"
)
type Exchange interface {
GetFee() float64
GetBalances() ([]*models.Balance, error)
GetQuotes() []string
GetSymbols() []models.Symbol
GetDepth(symbol models.Symbol) (*models.Depth, error)
GetDepthOnUpdate() chan *models.Depth
SendOrder(order *models.Order) error
ConfirmOrder(order *models.Order) (float64, error)
CancelOrder(order *models.Order) error
}
|
package three
import (
"fmt"
"os"
"regexp"
"strconv"
"strings"
)
func RunDay(filename string, part string) {
var file, _ = os.ReadFile("three/" + filename + ".txt")
var lines = strings.Split(string(file), "\n")
var parts = make(map[string]func(lines []string))
parts["one"] = partTwo
parts["two"] = partTwo
parts[part](lines)
}
func parseLine(line string) (int, int, int, int, int) {
var pattern = regexp.MustCompile("^#(\\d+) @ (\\d+),(\\d+): (\\d+)x(\\d+)$")
var matches = pattern.FindStringSubmatch(line)
var id, _ = strconv.Atoi(matches[1])
var left, _ = strconv.Atoi(matches[2])
var top, _ = strconv.Atoi(matches[3])
var width, _ = strconv.Atoi(matches[4])
var height, _ = strconv.Atoi(matches[5])
return id, left, top, width, height
}
func partTwo(lines []string) {
var grid = make(map[string]int)
var intact = make([]bool, 1500)
for _, line := range lines {
var id, left, top, width, height = parseLine(line)
intact[id] = true
for x := left; x < left+width; x++ {
for y := top; y < top+height; y++ {
var key = fmt.Sprintf("%d,%d", x, y)
var val, hasVal = grid[key]
if hasVal {
intact[id] = false
if val != -1 {
intact[val] = false
grid[key] = -1
}
} else {
grid[key] = id
}
}
}
}
var count = 0
for _, value := range grid {
if value < 0 {
count++
}
}
for index, value := range intact {
if value {
fmt.Printf("winner: %d\n", index)
}
}
fmt.Printf("total collisions: %d\n", count)
}
|
package cbs
import (
"bytes"
"encoding/binary"
)
// There is better implementation!
func uint16ToBytes(v interface{}) []byte {
return anyToBytes(v)
}
func uint32ToBytes(v interface{}) []byte {
return anyToBytes(v)
}
func anyToBytes(v interface{}) []byte {
var buf bytes.Buffer
err := binary.Write(&buf, binary.BigEndian, v)
if err != nil {
panic(err)
}
return buf.Bytes()
}
|
package main
import (
"fmt"
"time"
)
func main(){
timer1 := time.NewTimer(2*time.Second)
t1 := time.Now()
fmt.Printf("t1: %v\n",t1)
t2 := <- timer1.C
fmt.Printf("t2: %v\n",t2)
time.After()
}
|
package util
import "testing"
func TestFileDoesNotExist(t *testing.T) {
t.Parallel()
if FileExists("i would be surprised to discover that this file exists") {
t.Fail()
}
}
|
package main
type Point struct {
x, y int
}
func (point *Point) Add(another *Point) *Point {
return NewPoint(point.x + another.x, point.y + another.y)
}
func NewPoint(x, y int) *Point {
return &Point{x, y}
}
|
package document
import (
"fmt"
"io"
"sigs.k8s.io/kustomize/v3/k8sdeps/kunstruct"
"sigs.k8s.io/kustomize/v3/k8sdeps/transformer"
"sigs.k8s.io/kustomize/v3/k8sdeps/validator"
"sigs.k8s.io/kustomize/v3/pkg/fs"
"sigs.k8s.io/kustomize/v3/pkg/gvk"
"sigs.k8s.io/kustomize/v3/pkg/loader"
"sigs.k8s.io/kustomize/v3/pkg/plugins"
"sigs.k8s.io/kustomize/v3/pkg/resmap"
"sigs.k8s.io/kustomize/v3/pkg/resource"
"sigs.k8s.io/kustomize/v3/pkg/target"
"sigs.k8s.io/kustomize/v3/pkg/types"
utilyaml "opendev.org/airship/airshipctl/pkg/util/yaml"
)
// KustomizeBuildOptions contain the options for running a Kustomize build on a bundle
type KustomizeBuildOptions struct {
KustomizationPath string
OutputPath string
LoadRestrictor loader.LoadRestrictorFunc
OutOrder int
}
// BundleFactory contains the objects within a bundle
type BundleFactory struct {
KustomizeBuildOptions
resmap.ResMap
fs.FileSystem
}
// Bundle interface provides the specification for a bundle implementation
type Bundle interface {
Write(out io.Writer) error
GetKustomizeResourceMap() resmap.ResMap
SetKustomizeResourceMap(resmap.ResMap) error
GetKustomizeBuildOptions() KustomizeBuildOptions
SetKustomizeBuildOptions(KustomizeBuildOptions) error
SetFileSystem(fs.FileSystem) error
GetFileSystem() fs.FileSystem
Select(selector types.Selector) ([]Document, error)
GetByGvk(string, string, string) ([]Document, error)
GetByName(string) (Document, error)
GetByAnnotation(string) ([]Document, error)
GetByLabel(string) ([]Document, error)
GetAllDocuments() ([]Document, error)
}
// NewBundle is a convenience function to create a new bundle
// Over time, it will evolve to support allowing more control
// for kustomize plugins
func NewBundle(fSys fs.FileSystem, kustomizePath string, outputPath string) (Bundle, error) {
var options = KustomizeBuildOptions{
KustomizationPath: kustomizePath,
OutputPath: outputPath,
LoadRestrictor: loader.RestrictionRootOnly,
OutOrder: 0,
}
// init an empty bundle factory
var bundle Bundle = &BundleFactory{}
// set the fs and build options we will use
bundle.SetFileSystem(fSys)
bundle.SetKustomizeBuildOptions(options)
// boiler plate to allow us to run Kustomize build
uf := kunstruct.NewKunstructuredFactoryImpl()
pf := transformer.NewFactoryImpl()
rf := resmap.NewFactory(resource.NewFactory(uf), pf)
v := validator.NewKustValidator()
pluginConfig := plugins.DefaultPluginConfig()
pl := plugins.NewLoader(pluginConfig, rf)
ldr, err := loader.NewLoader(
bundle.GetKustomizeBuildOptions().LoadRestrictor, v, bundle.GetKustomizeBuildOptions().KustomizationPath, fSys)
if err != nil {
return bundle, err
}
defer ldr.Cleanup()
kt, err := target.NewKustTarget(ldr, rf, pf, pl)
if err != nil {
return bundle, err
}
// build a resource map of kustomize rendered objects
m, err := kt.MakeCustomizedResMap()
bundle.SetKustomizeResourceMap(m)
if err != nil {
return bundle, err
}
return bundle, nil
}
// GetKustomizeResourceMap returns a Kustomize Resource Map for this bundle
func (b *BundleFactory) GetKustomizeResourceMap() resmap.ResMap {
return b.ResMap
}
// SetKustomizeResourceMap allows us to set the populated resource map for this bundle. In
// the future, it may modify it before saving it.
func (b *BundleFactory) SetKustomizeResourceMap(r resmap.ResMap) error {
b.ResMap = r
return nil
}
// GetKustomizeBuildOptions returns the build options object used to generate the resource map
// for this bundle
func (b *BundleFactory) GetKustomizeBuildOptions() KustomizeBuildOptions {
return b.KustomizeBuildOptions
}
// SetKustomizeBuildOptions sets the build options to be used for this bundle. In
// the future, it may perform some basic validations.
func (b *BundleFactory) SetKustomizeBuildOptions(k KustomizeBuildOptions) error {
b.KustomizeBuildOptions = k
return nil
}
// SetFileSystem sets the filesystem that will be used by this bundle
func (b *BundleFactory) SetFileSystem(fSys fs.FileSystem) error {
b.FileSystem = fSys
return nil
}
// GetFileSystem gets the filesystem that will be used by this bundle
func (b *BundleFactory) GetFileSystem() fs.FileSystem {
return b.FileSystem
}
// GetAllDocuments returns all documents in this bundle
func (b *BundleFactory) GetAllDocuments() ([]Document, error) {
docSet := []Document{}
for _, res := range b.ResMap.Resources() {
// Construct Bundle document for each resource returned
doc, err := NewDocument(res)
if err != nil {
return docSet, err
}
docSet = append(docSet, doc)
}
return docSet, nil
}
// GetByName finds a document by name, error if more than one document found
// or if no documents found
func (b *BundleFactory) GetByName(name string) (Document, error) {
resSet := []*resource.Resource{}
for _, res := range b.ResMap.Resources() {
if res.GetName() == name {
resSet = append(resSet, res)
}
}
// alanmeadows(TODO): improve this and other error potentials by
// by adding strongly typed errors
switch found := len(resSet); {
case found == 0:
return &DocumentFactory{}, fmt.Errorf("No documents found with name %s", name)
case found > 1:
return &DocumentFactory{}, fmt.Errorf("More than one document found with name %s", name)
default:
return NewDocument(resSet[0])
}
}
// Select offers a direct interface to pass a Kustomize Selector to the bundle
// returning Documents that match the criteria
func (b *BundleFactory) Select(selector types.Selector) ([]Document, error) {
// use the kustomize select method
resources, err := b.ResMap.Select(selector)
if err != nil {
return []Document{}, err
}
// Construct Bundle document for each resource returned
docSet := []Document{}
for _, res := range resources {
doc, err := NewDocument(res)
if err != nil {
return docSet, err
}
docSet = append(docSet, doc)
}
return docSet, err
}
// GetByAnnotation is a convenience method to get documents for a particular annotation
func (b *BundleFactory) GetByAnnotation(annotation string) ([]Document, error) {
// Construct kustomize annotation selector
selector := types.Selector{AnnotationSelector: annotation}
// pass it to the selector
return b.Select(selector)
}
// GetByLabel is a convenience method to get documents for a particular label
func (b *BundleFactory) GetByLabel(label string) ([]Document, error) {
// Construct kustomize annotation selector
selector := types.Selector{LabelSelector: label}
// pass it to the selector
return b.Select(selector)
}
// GetByGvk is a convenience method to get documents for a particular Gvk tuple
func (b *BundleFactory) GetByGvk(group, version, kind string) ([]Document, error) {
// Construct kustomize gvk object
g := gvk.Gvk{Group: group, Version: version, Kind: kind}
// pass it to the selector
selector := types.Selector{Gvk: g}
return b.Select(selector)
}
// Write will write out the entire bundle resource map
func (b *BundleFactory) Write(out io.Writer) error {
for _, res := range b.ResMap.Resources() {
err := utilyaml.WriteOut(out, res)
if err != nil {
return err
}
}
return nil
}
|
package bitbucket
import (
"testing"
)
func Test_Emails(t *testing.T) {
const dummyEmail = "dummy@localhost.com"
// CREATE an email entry
if _, err := client.Emails.Find(testUser, dummyEmail); err != nil {
_, cerr := client.Emails.Create(testUser, dummyEmail)
if cerr != nil {
t.Error(cerr)
return
}
}
// FIND the email
_, err := client.Emails.Find(testUser, dummyEmail)
if err != nil {
t.Error(err)
}
// LIST the email addresses
emails, err := client.Emails.List(testUser)
if err != nil {
t.Error(err)
}
if len(emails) == 0 {
t.Errorf("List of emails returned empty set")
}
}
|
package dbsrv
import (
"time"
"gopkg.in/doug-martin/goqu.v3"
"github.com/chanxuehong/wechat.v2/mch/core"
"github.com/empirefox/esecend/cerr"
"github.com/empirefox/esecend/front"
"github.com/empirefox/esecend/models"
"github.com/empirefox/esecend/wx"
"github.com/empirefox/reform"
"github.com/golang/glog"
)
func (dbs *DbService) UserSetInfo(payload *front.SetUserInfoPayload) error {
return dbs.GetDB().Update(payload)
}
func (dbs *DbService) FindUserByPhone(phone string) (*models.User, error) {
usr, err := dbs.GetDB().FindOneFrom(models.UserTable, "$Phone", phone)
if err != nil {
return nil, err
}
return usr.(*models.User), nil
}
func (dbs *DbService) UserSavePhone(id uint, phone string) (*models.User, error) {
var usr models.User
db := dbs.GetDB()
err := db.FindByPrimaryKeyTo(&usr, id)
if err != nil {
return nil, err
}
usr.Phone = phone
if err = db.UpdateColumns(&usr, "Phone"); err != nil {
return nil, err
}
return &usr, nil
}
func (dbs *DbService) UserSetPaykey(id uint, paykey []byte) error {
data := models.User{
ID: id,
Paykey: &paykey,
}
return dbs.GetDB().UpdateColumns(&data, "Paykey")
}
func (dbs *DbService) UserWithdraw(tokUsr *models.User, payload *front.WithdrawPayload) (*front.UserCash, error) {
if payload.Amount < 100 {
return nil, cerr.AmountLimit
}
db := dbs.GetDB()
var top front.UserCash
ds := dbs.DS.Where(goqu.I("$UserID").Eq(tokUsr.ID)).Order(goqu.I("$CreatedAt").Desc())
if err := db.DsSelectOneTo(&top, ds); err != nil && err != reform.ErrNoRows {
return nil, err
}
if top.Balance < int(payload.Amount) {
return nil, cerr.NotEnoughMoney
}
now := time.Now().Unix()
cash := &front.UserCash{
UserID: tokUsr.ID,
CreatedAt: now,
Type: front.TUserCashWithdraw,
Amount: -int(payload.Amount),
Balance: top.Balance - int(payload.Amount),
}
err := db.Insert(cash)
if err != nil {
return nil, err
}
data := &wx.TransfersArgs{
TradeNo: cash.TrackingNumber(),
OpenID: tokUsr.OpenId,
Amount: payload.Amount,
Desc: dbs.config.Money.WithdrawDesc,
Ip: payload.Ip,
}
result, err := dbs.wc.Transfers(data)
if err == core.ErrNotFoundSign {
err = nil
}
if err != nil {
return nil, err
}
if result["result_code"] != "SUCCESS" {
glog.Errorln(result["err_code"])
return nil, cerr.WithdrawFailed
}
return cash, nil
}
|
// +build !test
package testws
import (
"path/filepath"
"bldy.build/build/label"
)
type TestWS struct {
WD string
}
func (t *TestWS) AbsPath() string {
panic("not implemented")
}
func (t *TestWS) Buildfile(label.Label) string {
panic("not implemented")
}
func (t *TestWS) File(lbl label.Label) string {
if err := lbl.Valid(); err != nil {
panic(err)
}
return filepath.Join(t.WD, lbl.Package(), lbl.Name())
}
func (t *TestWS) PackageDir(lbl label.Label) string {
return filepath.Join(t.WD, lbl.Package())
}
func (t *TestWS) LoadBuildfile(label.Label) ([]byte, error) {
panic("not implemented")
}
|
package migrations
import (
"database/sql"
"github.com/DemoHn/obsidian-panel/pkg/dbmigrate"
)
func init() {
dbmigrate.AddMigration("20190209212436_create_account", UpT20190209212436, DownT20190209212436)
}
// UpT20190209212436 - migration up script
func UpT20190209212436(db *sql.DB) error {
// Add Up Logic Here!
var err error
var createTableStmt = `create table accounts (
id integer primary key autoincrement,
name text not null unique,
credential blob not null,
permission_level varchar(10) not null,
created_at datetime not null,
updated_at datetime not null
)`
if _, err = db.Exec(createTableStmt); err != nil {
return err
}
return nil
}
// DownT20190209212436 - migration down script
func DownT20190209212436(db *sql.DB) error {
// Add Down Logic Here!
var err error
var deleteTableStmt = `drop table accounts`
if _, err = db.Exec(deleteTableStmt); err != nil {
return err
}
return nil
}
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resourcekeeper
import (
"context"
"testing"
"github.com/crossplane/crossplane-runtime/pkg/test"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/client"
apicommon "github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/oam"
)
func Test_gcHandler_GarbageCollectApplicationRevision(t *testing.T) {
type fields struct {
resourceKeeper *resourceKeeper
cfg *gcConfig
}
tests := []struct {
name string
fields fields
wantErr bool
}{
{
name: "cleanUpApplicationRevision and cleanUpWorkflowComponentRevision success",
fields: fields{
resourceKeeper: &resourceKeeper{
Client: test.NewMockClient(),
app: &v1beta1.Application{},
},
cfg: &gcConfig{
disableApplicationRevisionGC: false,
disableComponentRevisionGC: false,
},
},
},
{
name: "failed",
fields: fields{
resourceKeeper: &resourceKeeper{
Client: &test.MockClient{
MockGet: test.NewMockGetFn(errors.New("mock")),
MockList: test.NewMockListFn(errors.New("mock")),
MockCreate: test.NewMockCreateFn(errors.New("mock")),
MockDelete: test.NewMockDeleteFn(errors.New("mock")),
MockDeleteAllOf: test.NewMockDeleteAllOfFn(errors.New("mock")),
MockUpdate: test.NewMockUpdateFn(errors.New("mock")),
MockPatch: test.NewMockPatchFn(errors.New("mock")),
},
app: &v1beta1.Application{},
},
cfg: &gcConfig{
disableApplicationRevisionGC: false,
disableComponentRevisionGC: false,
},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := &gcHandler{
resourceKeeper: tt.fields.resourceKeeper,
cfg: tt.fields.cfg,
}
if err := h.GarbageCollectApplicationRevision(context.Background()); (err != nil) != tt.wantErr {
t.Errorf("gcHandler.GarbageCollectApplicationRevision() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func Test_cleanUpApplicationRevision(t *testing.T) {
type args struct {
h *gcHandler
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "clean up app-v2",
args: args{
h: &gcHandler{
resourceKeeper: &resourceKeeper{
Client: &test.MockClient{
MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
l, _ := list.(*v1beta1.ApplicationRevisionList)
l.Items = []v1beta1.ApplicationRevision{
{
ObjectMeta: metav1.ObjectMeta{
Name: "app-v1",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "app-v2",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "app-v3",
},
},
}
return nil
},
MockDelete: test.NewMockDeleteFn(nil),
},
app: &v1beta1.Application{
Status: apicommon.AppStatus{
LatestRevision: &apicommon.Revision{
Name: "app-v1",
},
},
},
},
cfg: &gcConfig{
disableApplicationRevisionGC: false,
appRevisionLimit: 1,
},
},
},
},
{
name: "disabled",
args: args{
h: &gcHandler{
cfg: &gcConfig{
disableApplicationRevisionGC: true,
},
},
},
},
{
name: "list failed",
args: args{
h: &gcHandler{
resourceKeeper: &resourceKeeper{
Client: &test.MockClient{
MockList: test.NewMockListFn(errors.New("mock")),
},
app: &v1beta1.Application{},
},
cfg: &gcConfig{
disableApplicationRevisionGC: false,
appRevisionLimit: 1,
},
},
},
wantErr: true,
},
{
name: "delete failed",
args: args{
h: &gcHandler{
resourceKeeper: &resourceKeeper{
Client: &test.MockClient{
MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
l, _ := list.(*v1beta1.ApplicationRevisionList)
l.Items = []v1beta1.ApplicationRevision{
{
ObjectMeta: metav1.ObjectMeta{
Name: "app-v1",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "app-v2",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "app-v3",
},
},
}
return nil
},
MockDelete: test.NewMockDeleteFn(errors.New("mock")),
},
app: &v1beta1.Application{
Status: apicommon.AppStatus{
LatestRevision: &apicommon.Revision{
Name: "app-v1",
},
},
},
},
cfg: &gcConfig{
disableApplicationRevisionGC: false,
appRevisionLimit: 1,
},
},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := cleanUpApplicationRevision(context.Background(), tt.args.h); (err != nil) != tt.wantErr {
t.Errorf("cleanUpApplicationRevision() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func Test_cleanUpWorkflowComponentRevision(t *testing.T) {
type args struct {
h *gcHandler
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "clean up found revisions",
args: args{
h: &gcHandler{
resourceKeeper: &resourceKeeper{
_crRT: &v1beta1.ResourceTracker{},
Client: &test.MockClient{
MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error {
if key.Name == "revision3" {
return kerrors.NewNotFound(schema.GroupResource{}, "")
}
o, _ := obj.(*unstructured.Unstructured)
o.SetLabels(map[string]string{
oam.LabelAppComponentRevision: "revision1",
})
return nil
},
MockDelete: test.NewMockDeleteFn(nil),
MockUpdate: test.NewMockUpdateFn(nil),
MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
l, _ := list.(*appsv1.ControllerRevisionList)
l.Items = []appsv1.ControllerRevision{
{
ObjectMeta: metav1.ObjectMeta{Name: "revision1", Namespace: "default"},
Revision: 1,
},
{
ObjectMeta: metav1.ObjectMeta{Name: "revision2", Namespace: "default"},
Revision: 2,
},
{
ObjectMeta: metav1.ObjectMeta{Name: "revision3", Namespace: "default"},
Revision: 3,
},
}
return nil
},
},
app: &v1beta1.Application{
Status: apicommon.AppStatus{
AppliedResources: []apicommon.ClusterObjectReference{
{
ObjectReference: corev1.ObjectReference{
Namespace: "default",
Name: "revision1",
APIVersion: appsv1.SchemeGroupVersion.String(),
Kind: "Deployment",
},
},
{
ObjectReference: corev1.ObjectReference{
Namespace: "default",
Name: "revision3",
APIVersion: appsv1.SchemeGroupVersion.String(),
Kind: "Deployment",
},
},
},
},
ObjectMeta: metav1.ObjectMeta{}}},
cfg: &gcConfig{
disableComponentRevisionGC: false,
appRevisionLimit: 1,
},
},
},
},
{
name: "no need clean up",
args: args{
h: &gcHandler{
resourceKeeper: &resourceKeeper{
_crRT: &v1beta1.ResourceTracker{},
Client: &test.MockClient{
MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error {
o, _ := obj.(*unstructured.Unstructured)
o.SetLabels(map[string]string{
oam.LabelAppComponentRevision: "revision1",
})
return nil
},
MockDelete: test.NewMockDeleteFn(nil),
MockUpdate: test.NewMockUpdateFn(nil),
MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
l, _ := list.(*appsv1.ControllerRevisionList)
l.Items = []appsv1.ControllerRevision{
{
ObjectMeta: metav1.ObjectMeta{Name: "revision1", Namespace: "default"},
Revision: 1,
},
}
return nil
},
},
app: &v1beta1.Application{
Status: apicommon.AppStatus{
AppliedResources: []apicommon.ClusterObjectReference{
{},
},
},
ObjectMeta: metav1.ObjectMeta{}}},
cfg: &gcConfig{
disableComponentRevisionGC: false,
appRevisionLimit: 1,
},
},
},
},
{
name: "disabled",
args: args{
h: &gcHandler{
cfg: &gcConfig{
disableComponentRevisionGC: true,
},
},
},
},
{
name: "get failed",
args: args{
h: &gcHandler{
resourceKeeper: &resourceKeeper{
Client: &test.MockClient{
MockGet: test.NewMockGetFn(errors.New("mock")),
},
app: &v1beta1.Application{
Status: apicommon.AppStatus{
AppliedResources: []apicommon.ClusterObjectReference{
{},
},
},
ObjectMeta: metav1.ObjectMeta{}}},
cfg: &gcConfig{
disableComponentRevisionGC: false,
appRevisionLimit: 1,
},
},
},
wantErr: true,
},
{
name: "list failed",
args: args{
h: &gcHandler{
resourceKeeper: &resourceKeeper{
Client: &test.MockClient{
MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error {
o, _ := obj.(*unstructured.Unstructured)
o.SetLabels(map[string]string{
oam.LabelAppComponentRevision: "revision1",
})
return nil
},
MockList: test.NewMockListFn(errors.New("mock")),
},
app: &v1beta1.Application{
Status: apicommon.AppStatus{
AppliedResources: []apicommon.ClusterObjectReference{
{},
},
},
ObjectMeta: metav1.ObjectMeta{}}},
cfg: &gcConfig{
disableComponentRevisionGC: false,
appRevisionLimit: 1,
},
},
},
wantErr: true,
},
{
name: "deleteComponentRevision failed",
args: args{
h: &gcHandler{
resourceKeeper: &resourceKeeper{
_crRT: &v1beta1.ResourceTracker{},
Client: &test.MockClient{
MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error {
o, _ := obj.(*unstructured.Unstructured)
o.SetLabels(map[string]string{
oam.LabelAppComponentRevision: "revision1",
})
return nil
},
MockDelete: test.NewMockDeleteFn(errors.New("mock")),
MockUpdate: test.NewMockUpdateFn(nil),
MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
l, _ := list.(*appsv1.ControllerRevisionList)
l.Items = []appsv1.ControllerRevision{
{
ObjectMeta: metav1.ObjectMeta{Name: "revision1", Namespace: "default"},
Revision: 1,
},
{
ObjectMeta: metav1.ObjectMeta{Name: "revision2", Namespace: "default"},
Revision: 2,
},
{
ObjectMeta: metav1.ObjectMeta{Name: "revisio3", Namespace: "default"},
Revision: 3,
},
}
return nil
},
},
app: &v1beta1.Application{
Status: apicommon.AppStatus{
AppliedResources: []apicommon.ClusterObjectReference{
{},
},
},
ObjectMeta: metav1.ObjectMeta{}}},
cfg: &gcConfig{
disableComponentRevisionGC: false,
appRevisionLimit: 1,
},
},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := cleanUpComponentRevision(context.Background(), tt.args.h); (err != nil) != tt.wantErr {
t.Errorf("cleanUpWorkflowComponentRevision() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
|
package schema
import (
"themis/models"
)
func createLinkTypeRelated() []models.LinkType {
linkTypes := []models.LinkType {
createLinkTypeRelatedStoryToTask(),
createLinkTypeRelatedBugToTask(),
createLinkTypeRelatedStoryToBug(),
}
return linkTypes
}
func createLinkTypeRelatedStoryToTask() models.LinkType {
linkType := models.NewLinkType()
linkType.Name = "Related Link"
linkType.Description = "The related relationship."
linkType.Version = 0
linkType.ForwardName = "is related to"
linkType.ReverseName = "is related to"
linkType.Topology = "graph"
linkType.CategoryRef = "default"
linkType.SourceWorkItemTypeRef = "story"
linkType.TargetWorkItemTypeRef = "task"
return *linkType
}
func createLinkTypeRelatedBugToTask() models.LinkType {
linkType := models.NewLinkType()
linkType.Name = "Related Link"
linkType.Description = "The related relationship."
linkType.Version = 0
linkType.ForwardName = "is related to"
linkType.ReverseName = "is related to"
linkType.Topology = "graph"
linkType.CategoryRef = "default"
linkType.SourceWorkItemTypeRef = "bug"
linkType.TargetWorkItemTypeRef = "task"
return *linkType
}
func createLinkTypeRelatedStoryToBug() models.LinkType {
linkType := models.NewLinkType()
linkType.Name = "Related Link"
linkType.Description = "The related relationship."
linkType.Version = 0
linkType.ForwardName = "is related to"
linkType.ReverseName = "is related to"
linkType.Topology = "graph"
linkType.CategoryRef = "default"
linkType.SourceWorkItemTypeRef = "story"
linkType.TargetWorkItemTypeRef = "bug"
return *linkType
}
|
package model
type ViewClubMember struct {
Id int64
Uid int64
Nickname string
Status int
CreatedAt int64
}
|
package oauthstore
import (
"context"
"golang.org/x/oauth2"
)
type storageTokenSource struct {
*Config
oauth2.TokenSource
}
// Token satisfies the TokenSource interface
func (s *storageTokenSource) Token() (*oauth2.Token, error) {
if token, err := s.Config.Storage.GetToken(); err == nil && token.Valid() {
return token, err
}
token, err := s.TokenSource.Token()
if err != nil {
return token, err
}
if err := s.Config.Storage.SetToken(token); err != nil {
return nil, err
}
return token, nil
}
// StorageTokenSource will be used by our config.TokenSource method
func StorageTokenSource(ctx context.Context, c *Config, t *oauth2.Token) oauth2.TokenSource {
if t == nil || !t.Valid() {
if tok, err := c.Storage.GetToken(); err == nil {
t = tok
}
}
ts := c.Config.TokenSource(ctx, t)
return &storageTokenSource{c, ts}
}
|
/*
Copyright 2020 Cornelius Weig.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"context"
"encoding/json"
"net/http"
"time"
"github.com/pkg/errors"
)
type Homebrew struct {
url string
}
type HomebrewAPITarget struct {
Count int `json:"krew"`
}
type HomebrewAPIInstalls struct {
Aggregate30d HomebrewAPITarget `json:"30d,omitempty"`
Aggregate90d HomebrewAPITarget `json:"90d,omitempty"`
Aggregate365d HomebrewAPITarget `json:"365d,omitempty"`
}
type HomebrewAPIAnalytics struct {
Installs HomebrewAPIInstalls `json:"install"`
InstallsOnRequest HomebrewAPIInstalls `json:"install_on_request"`
InstallErrors HomebrewAPIInstalls `json:"build_error"`
}
type HomebrewAPIResponse struct {
Analytics HomebrewAPIAnalytics `json:"analytics,omitempty"`
}
type HomebrewStats struct {
CreatedAt time.Time
Installs30d int `bigquery:"installs_30d"`
Installs90d int `bigquery:"installs_90d"`
Installs365d int `bigquery:"installs_365d"`
InstallsOnRequest30d int `bigquery:"installs_on_request_30d"`
InstallsOnRequest90d int `bigquery:"installs_on_request_90d"`
InstallsOnRequest365d int `bigquery:"installs_on_request_365d"`
BuildErrors30d int `bigquery:"build_errors_30d"`
}
func NewHomebrew(url string) *Homebrew {
return &Homebrew{url: url}
}
func (h *Homebrew) FetchAnalytics(ctx context.Context) (HomebrewStats, error) {
req, err := http.NewRequestWithContext(ctx, "GET", h.url, nil)
if err != nil {
return HomebrewStats{}, errors.Wrapf(err, "error creating GET request for homebrew")
}
res, err := http.DefaultClient.Do(req)
if err != nil {
return HomebrewStats{}, errors.Wrapf(err, "error requesting analytics data from homebrew")
}
defer res.Body.Close()
if res.StatusCode < 200 && 300 <= res.StatusCode {
return HomebrewStats{}, errors.New("fetching homebrew analytics failed")
}
var body HomebrewAPIResponse
if err = json.NewDecoder(res.Body).Decode(&body); err != nil {
return HomebrewStats{}, errors.Wrapf(err, "error parsing response from homebrew")
}
return stats(&body), nil
}
func stats(r *HomebrewAPIResponse) HomebrewStats {
return HomebrewStats{
CreatedAt: time.Now(),
Installs30d: r.Analytics.Installs.Aggregate30d.Count,
Installs90d: r.Analytics.Installs.Aggregate90d.Count,
Installs365d: r.Analytics.Installs.Aggregate365d.Count,
InstallsOnRequest30d: r.Analytics.InstallsOnRequest.Aggregate30d.Count,
InstallsOnRequest90d: r.Analytics.InstallsOnRequest.Aggregate90d.Count,
InstallsOnRequest365d: r.Analytics.InstallsOnRequest.Aggregate365d.Count,
BuildErrors30d: r.Analytics.InstallErrors.Aggregate30d.Count,
}
}
|
package solution02
import (
"adventofcode/inputs/input02"
"fmt"
"time"
)
var opcodes []int
func init(){
fmt.Println("Continue on:",time.Now())
}
func Run(){
// 100*noun + verb
result := -1
for noun :=0; noun <=99; noun++{
if result >= 0 {
break
}
for verb :=0; verb <=99; verb++{
opcodes = input02.ReadInput()
opcodes[1] = noun // noun
opcodes[2] = verb // verb
process(0)
if opcodes[0] == 19690720 {
result = (100* noun) + verb
break
}
}
}
if result >= 0 {
fmt.Println("result:",result)
}else{
fmt.Println("Failed...")
}
}
func process(opcodeaddress int){
var opcode = opcodes[opcodeaddress]
switch opcode {
case 1:
vpos1 := opcodes[opcodes[opcodeaddress+1]]
vpos2 := opcodes[opcodes[opcodeaddress+2]]
spos := opcodes[opcodeaddress+3]
opcodes[spos] = vpos1+vpos2
process(opcodeaddress +4)
case 2:
vpos1 := opcodes[opcodes[opcodeaddress+1]]
vpos2 := opcodes[opcodes[opcodeaddress+2]]
spos := opcodes[opcodeaddress+3]
opcodes[spos] = vpos1*vpos2
process(opcodeaddress +4)
case 99:
return
default:
panic("Unknown operation code!")
}
}
|
package middlerware
import (
"fmt"
"github.com/lestrrat-go/file-rotatelogs"
"github.com/rifflock/lfshook"
"github.com/sirupsen/logrus"
"os"
"time"
"../config"
"../config/bean"
)
var logLevels = map[bean.LogLevel]logrus.Level{
bean.Debug: logrus.DebugLevel,
bean.Info: logrus.InfoLevel,
bean.Warn: logrus.WarnLevel,
bean.Error: logrus.ErrorLevel,
bean.Fatal: logrus.FatalLevel,
bean.Panic: logrus.PanicLevel,
}
var lg *logrus.Logger
func init() {
var lh *lfshook.LfsHook
var wm lfshook.WriterMap = make(lfshook.WriterMap, 6)
var rs = make(map[logrus.Level]*rotatelogs.RotateLogs, 6)
lc := config.AppConfig.Logger
fmt.Println(lc)
lf := new(logrus.JSONFormatter)
lf.TimestampFormat = `2006-01-02 15:04:05`
lf.DisableTimestamp = false
lg = logrus.New()
lg.SetFormatter(lf)
lg.SetOutput(os.Stdout)
if l, ok := logLevels[lc.StandLevel]; ok {
lg.SetLevel(l)
} else {
lg.SetLevel(logrus.InfoLevel)
}
if len(lc.Files) > 0 {
for _, f := range lc.Files {
var wr *rotatelogs.RotateLogs
var err error
if f.LinkName != "" {
wr, err = rotatelogs.New(
f.FileNameFormat,
rotatelogs.WithLinkName(f.LinkName),
rotatelogs.WithRotationTime(lc.RotationTime*time.Second),
rotatelogs.WithRotationCount(lc.RotationCount),
)
} else {
wr, err = rotatelogs.New(
f.FileNameFormat,
rotatelogs.WithRotationTime(lc.RotationTime*time.Second),
rotatelogs.WithRotationCount(lc.RotationCount),
)
}
if err != nil {
logrus.Errorf("[middleware]-`logger初始化异常`, error:`%v`", err)
continue
}
for _, lv := range f.Level {
if l, ok := logLevels[lv]; ok {
rs[l] = wr
}
}
}
}
if len(rs) > 0 {
for l, w := range rs {
wm[l] = w
}
}
lh = lfshook.NewHook(wm, &logrus.JSONFormatter{})
lg.AddHook(lh)
Cont.Register("log", lg)
}
|
/*
* Copyright 2019-present Open Networking Foundation
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package mocks
import (
"context"
"fmt"
"strconv"
"strings"
"sync"
"github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif"
ic "github.com/opencord/voltha-protos/v4/go/inter_container"
of "github.com/opencord/voltha-protos/v4/go/openflow_13"
"github.com/opencord/voltha-protos/v4/go/voltha"
)
func macAddressToUint32Array(mac string) []uint32 {
slist := strings.Split(mac, ":")
result := make([]uint32, len(slist))
var err error
var tmp int64
for index, val := range slist {
if tmp, err = strconv.ParseInt(val, 16, 32); err != nil {
return []uint32{1, 2, 3, 4, 5, 6}
}
result[index] = uint32(tmp)
}
return result
}
// Adapter represents adapter attributes
type Adapter struct {
coreProxy adapterif.CoreProxy
flows map[uint64]*voltha.OfpFlowStats
flowLock sync.RWMutex
devices map[string]*voltha.Device
deviceLock sync.RWMutex
failFlowAdd bool
failFlowDelete bool
failDeleteDevice bool
}
// NewAdapter creates adapter instance
func NewAdapter(cp adapterif.CoreProxy) *Adapter {
return &Adapter{
flows: map[uint64]*voltha.OfpFlowStats{},
devices: map[string]*voltha.Device{},
coreProxy: cp,
}
}
func (ta *Adapter) storeDevice(d *voltha.Device) {
ta.deviceLock.Lock()
defer ta.deviceLock.Unlock()
if d != nil {
ta.devices[d.Id] = d
}
}
func (ta *Adapter) getDevice(id string) *voltha.Device {
ta.deviceLock.RLock()
defer ta.deviceLock.RUnlock()
return ta.devices[id]
}
func (ta *Adapter) updateDevice(d *voltha.Device) {
ta.storeDevice(d)
}
// Adapter_descriptor -
func (ta *Adapter) Adapter_descriptor(ctx context.Context) error { // nolint
return nil
}
// Device_types -
func (ta *Adapter) Device_types(ctx context.Context) (*voltha.DeviceTypes, error) { // nolint
return nil, nil
}
// Health -
func (ta *Adapter) Health(ctx context.Context) (*voltha.HealthStatus, error) {
return nil, nil
}
// Adopt_device -
func (ta *Adapter) Adopt_device(ctx context.Context, device *voltha.Device) error { // nolint
return nil
}
// Reconcile_device -
func (ta *Adapter) Reconcile_device(ctx context.Context, device *voltha.Device) error { // nolint
return nil
}
// Abandon_device -
func (ta *Adapter) Abandon_device(ctx context.Context, device *voltha.Device) error { // nolint
return nil
}
// Disable_device -
func (ta *Adapter) Disable_device(ctx context.Context, device *voltha.Device) error { // nolint
return nil
}
// Reenable_device -
func (ta *Adapter) Reenable_device(ctx context.Context, device *voltha.Device) error { // nolint
return nil
}
// Reboot_device -
func (ta *Adapter) Reboot_device(ctx context.Context, device *voltha.Device) error { // nolint
return nil
}
// Self_test_device -
func (ta *Adapter) Self_test_device(ctx context.Context, device *voltha.Device) error { // nolint
return nil
}
// Delete_device -
func (ta *Adapter) Delete_device(ctx context.Context, device *voltha.Device) error { // nolint
if ta.failDeleteDevice {
return fmt.Errorf("delete-device-failure")
}
return nil
}
// Get_device_details -
func (ta *Adapter) Get_device_details(ctx context.Context, device *voltha.Device) error { // nolint
return nil
}
// Update_flows_bulk -
func (ta *Adapter) Update_flows_bulk(ctx context.Context, device *voltha.Device, flows *voltha.Flows, groups *voltha.FlowGroups, flowMetadata *voltha.FlowMetadata) error { // nolint
return nil
}
// Update_flows_incrementally mocks the incremental flow update
func (ta *Adapter) Update_flows_incrementally(ctx context.Context, device *voltha.Device, flows *of.FlowChanges, groups *of.FlowGroupChanges, flowMetadata *voltha.FlowMetadata) error { // nolint
ta.flowLock.Lock()
defer ta.flowLock.Unlock()
if flows.ToAdd != nil && len(flows.ToAdd.Items) > 0 {
if ta.failFlowAdd {
return fmt.Errorf("flow-add-error")
}
for _, f := range flows.ToAdd.Items {
ta.flows[f.Id] = f
}
}
if flows.ToRemove != nil && len(flows.ToRemove.Items) > 0 {
if ta.failFlowDelete {
return fmt.Errorf("flow-delete-error")
}
for _, f := range flows.ToRemove.Items {
delete(ta.flows, f.Id)
}
}
return nil
}
// Update_pm_config -
func (ta *Adapter) Update_pm_config(ctx context.Context, device *voltha.Device, pmConfigs *voltha.PmConfigs) error { // nolint
return nil
}
// Receive_packet_out -
func (ta *Adapter) Receive_packet_out(ctx context.Context, deviceID string, egressPortNo int, msg *of.OfpPacketOut) error { // nolint
return nil
}
// Suppress_event -
func (ta *Adapter) Suppress_event(ctx context.Context, filter *voltha.EventFilter) error { // nolint
return nil
}
// Unsuppress_event -
func (ta *Adapter) Unsuppress_event(ctx context.Context, filter *voltha.EventFilter) error { // nolint
return nil
}
// Get_ofp_device_info -
func (ta *Adapter) Get_ofp_device_info(ctx context.Context, device *voltha.Device) (*ic.SwitchCapability, error) { // nolint
return &ic.SwitchCapability{
Desc: &of.OfpDesc{
HwDesc: "adapter_mock",
SwDesc: "adapter_mock",
SerialNum: "000000000",
},
SwitchFeatures: &of.OfpSwitchFeatures{
NBuffers: 256,
NTables: 2,
Capabilities: uint32(of.OfpCapabilities_OFPC_FLOW_STATS |
of.OfpCapabilities_OFPC_TABLE_STATS |
of.OfpCapabilities_OFPC_PORT_STATS |
of.OfpCapabilities_OFPC_GROUP_STATS),
},
}, nil
}
// Process_inter_adapter_message -
func (ta *Adapter) Process_inter_adapter_message(ctx context.Context, msg *ic.InterAdapterMessage) error { // nolint
return nil
}
// Download_image -
func (ta *Adapter) Download_image(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) { // nolint
return nil, nil
}
// Get_image_download_status -
func (ta *Adapter) Get_image_download_status(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) { // nolint
return nil, nil
}
// Cancel_image_download -
func (ta *Adapter) Cancel_image_download(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) { // nolint
return nil, nil
}
// Activate_image_update -
func (ta *Adapter) Activate_image_update(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) { // nolint
return nil, nil
}
// Revert_image_update -
func (ta *Adapter) Revert_image_update(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) { // nolint
return nil, nil
}
// Enable_port -
func (ta *Adapter) Enable_port(ctx context.Context, deviceId string, port *voltha.Port) error { //nolint
return nil
}
// Disable_port -
func (ta *Adapter) Disable_port(ctx context.Context, deviceId string, port *voltha.Port) error { //nolint
return nil
}
// Child_device_lost -
func (ta *Adapter) Child_device_lost(ctx context.Context, pDeviceID string, pPortNo uint32, onuID uint32) error { //nolint
return nil
}
// Start_omci_test
func (ta *Adapter) Start_omci_test(ctx context.Context, device *voltha.Device, request *voltha.OmciTestRequest) (*voltha.TestResponse, error) { //nolint
return nil, nil
}
func (ta *Adapter) Get_ext_value(ctx context.Context, deviceId string, device *voltha.Device, valueflag voltha.ValueType_Type) (*voltha.ReturnValues, error) { //nolint
return nil, nil
}
// GetFlowCount returns the total number of flows presently under this adapter
func (ta *Adapter) GetFlowCount() int {
ta.flowLock.RLock()
defer ta.flowLock.RUnlock()
return len(ta.flows)
}
// ClearFlows removes all flows in this adapter
func (ta *Adapter) ClearFlows() {
ta.flowLock.Lock()
defer ta.flowLock.Unlock()
ta.flows = map[uint64]*voltha.OfpFlowStats{}
}
// SetFlowAction sets the adapter action on addition and deletion of flows
func (ta *Adapter) SetFlowAction(failFlowAdd, failFlowDelete bool) {
ta.failFlowAdd = failFlowAdd
ta.failFlowDelete = failFlowDelete
}
// SetDeleteAction sets the adapter action on delete device
func (ta *Adapter) SetDeleteAction(failDeleteDevice bool) {
ta.failDeleteDevice = failDeleteDevice
}
|
// DON'T EDIT *** generated by scaneo *** DON'T EDIT //
package model
import "database/sql"
func ScanCategory(r *sql.Row) (Category, error) {
var s Category
if err := r.Scan(
&s.ID,
&s.Name,
&s.ImageUrl,
); err != nil {
return Category{}, err
}
return s, nil
}
func ScanCategorys(rs *sql.Rows) ([]Category, error) {
structs := make([]Category, 0, 16)
var err error
for rs.Next() {
var s Category
if err = rs.Scan(
&s.ID,
&s.Name,
&s.ImageUrl,
); err != nil {
return nil, err
}
structs = append(structs, s)
}
if err = rs.Err(); err != nil {
return nil, err
}
return structs, nil
}
func ScanRecipe(r *sql.Row) (Recipe, error) {
var s Recipe
if err := r.Scan(
&s.ID,
&s.Name,
&s.ImageUrl,
); err != nil {
return Recipe{}, err
}
return s, nil
}
func ScanRecipes(rs *sql.Rows) ([]Recipe, error) {
structs := make([]Recipe, 0, 16)
var err error
for rs.Next() {
var s Recipe
if err = rs.Scan(
&s.ID,
&s.Name,
&s.ImageUrl,
); err != nil {
return nil, err
}
structs = append(structs, s)
}
if err = rs.Err(); err != nil {
return nil, err
}
return structs, nil
}
|
package controllers
import (
"net/http"
"strings"
"time"
"user/common"
"user/models"
"user/util"
"github.com/astaxie/beego"
)
type ThirdloginController struct {
beego.Controller
}
const (
//H5_APP_ID = "wx87f81569b7e4b5f6"
//H5_APP_SECRET = "8421fd4781b1c29077c2e82e71ce3d2a"
WEIXIN_URL = "https://api.weixin.qq.com/sns/"
)
var (
H5_APP_ID string = beego.AppConfig.String("h5.app.id")
H5_APP_SECRET string = beego.AppConfig.String("h5.app.secret")
)
//test url : https://open.weixin.qq.com/connect/oauth2/authorize?appid=wx87f81569b7e4b5f6&redirect_uri=http%3a%2f%2ftest-user.lezhuale.com&response_type=code&scope=snsapi_userinfo&state=STATE#wechat_redirect
// @router /weixin_h5 [get]
func (u *ThirdloginController) Weixin_h5() {
code := u.Input().Get("code")
access_token := u.Input().Get("access_token")
redirect_url := u.Input().Get("url")
//获取用户accesstoken和openid
token_url := WEIXIN_URL + "oauth2/access_token?grant_type=authorization_code&appid=" + H5_APP_ID + "&secret=" + H5_APP_SECRET + "&code=" + code
resp, err := util.GetJSON(token_url, 5*time.Second)
if err != nil {
beego.Error("weixin_h5 error :", err)
}
if _, exists := resp["access_token"]; !exists {
u.Data["json"] = common.ResponseResult(common.ErrCodeInvalidParams, nil)
u.ServeJSON()
return
}
access_token = resp["access_token"].(string)
openid := resp["openid"].(string)
beego.Debug("token", access_token, "openid:", openid)
//获取用户信息 unionid等等
userInfo_url := WEIXIN_URL + "userinfo?access_token=" + access_token + "&openid=" + openid
resp, err = util.GetJSON(userInfo_url, 5*time.Second)
if err != nil {
beego.Error("weixin_h5 get userInfo error :", err)
}
beego.Debug("weixin_h5 userInfo :", resp)
unionid := resp["unionid"].(string)
nickname := resp["nickname"].(string)
pic := resp["headimgurl"].(string)
sex := resp["sex"].(float64) //1:男, 0:女
//用户是否已经存在
user := models.GetUser("weixin_unionid1", unionid)
//生成新用户
if user.Id_ == 0 {
newUser := &models.User{
Weixin_openid: openid,
Weixin_access_token: access_token,
Weixin_unionid: unionid,
Pic: pic,
Nickname: nickname,
Via: "weixin",
Sex: sex,
}
beego.Debug("weixin_h5 user :", &user)
user, err = models.BuildUser(newUser)
if err != nil {
u.Data["json"] = common.ResponseResult(common.ErrCodeDuplicated, "")
u.ServeJSON()
return
}
}
beego.Debug("redirect_url:", redirect_url)
//转跳地址
if redirect_url != "" {
redirect_url = getRedirectUrlWithToken(redirect_url, user.Token)
u.Redirect(redirect_url, http.StatusFound)
return
}
u.Data["json"] = common.ResponseResult(common.ErrCodeOk, user)
u.ServeJSON()
}
func getRedirectUrlWithToken(originUrl, token string) string {
if strings.Contains(originUrl, "?") {
originUrl = originUrl + "&"
} else {
originUrl = originUrl + "?"
}
if strings.Contains(originUrl, "{access_token}") {
originUrl = strings.Replace(originUrl, "{access_token}", token, 1)
} else {
originUrl = originUrl + "access_token=" + token
}
beego.Debug("replace url : ", originUrl)
return originUrl
}
|
package main
func letterCasePermutation(s string) []string {
bs := []byte(s)
n := len(bs)
r := []string{}
loopLetterCase(0, n, &bs, &r)
return r
}
func loopLetterCase(l int, n int, bs *[]byte, r *[]string) {
if l < n {
loopLetterCase(l+1, n, bs, r)
if (*bs)[l] >= 'a' && (*bs)[l] <= 'z' {
tmp := (*bs)[l]
(*bs)[l] = tmp - 32
loopLetterCase(l+1, n, bs, r)
(*bs)[l] = tmp
} else if (*bs)[l] >= 'A' && (*bs)[l] <= 'Z' {
tmp := (*bs)[l]
(*bs)[l] = tmp + 32
loopLetterCase(l+1, n, bs, r)
(*bs)[l] = tmp
}
} else {
newPerm := make([]byte, n)
copy(newPerm, *bs)
*r = append(*r, string(newPerm))
}
}
|
package easypost_test
import (
"reflect"
"strings"
"github.com/EasyPost/easypost-go/v3"
)
func (c *ClientTests) TestParcelCreate() {
client := c.TestClient()
assert, require := c.Assert(), c.Require()
parcel, err := client.CreateParcel(c.fixture.BasicParcel())
require.NoError(err)
assert.Equal(reflect.TypeOf(&easypost.Parcel{}), reflect.TypeOf(parcel))
assert.True(strings.HasPrefix(parcel.ID, "prcl_"))
assert.Equal(15.4, parcel.Weight)
}
func (c *ClientTests) TestParcelRetrieve() {
client := c.TestClient()
assert, require := c.Assert(), c.Require()
parcel, err := client.CreateParcel(c.fixture.BasicParcel())
require.NoError(err)
retrievedParcel, err := client.GetParcel(parcel.ID)
require.NoError(err)
assert.Equal(reflect.TypeOf(&easypost.Parcel{}), reflect.TypeOf(retrievedParcel))
assert.Equal(parcel, retrievedParcel)
}
|
package demo
import (
"fmt"
"sync"
)
var num int
var wq sync.WaitGroup // 使用 waitGroup 确保线程运行完
var lock sync.Mutex // 使用互斥锁
func add() {
for i := 0; i < 5000; i++ {
lock.Lock()
num++
lock.Unlock()
}
wq.Done()
}
func Test() {
wq.Add(2)
go add()
go add()
wq.Wait()
fmt.Println("num=", num)
}
|
package logger
import (
"context"
"fmt"
"github.com/mingo-chen/wheel-minirpc/logger"
"gopkg.in/yaml.v3"
)
type LoggerPlugin struct {
}
// load config by plugin name
func (l LoggerPlugin) Startup(cfg yaml.Node) {
var log miniLog
if err := cfg.Decode(&log.config); err != nil {
panic(fmt.Errorf("decode plugin config err:%+v", err))
}
// 生成logger对象,注入到logger.Log中
logger.Log = log
}
// 日志配置
type logConfig struct {
Level string `yaml:"level"` // 日志级别
Path string `yaml:"path"` // 输出地址
RollType string `yaml:"roll_type"` // 日志滚动切割方式,按大小,按时间
MaxSize int `yaml:"max_size"` // 按大小切割时的上限
Compress bool `yaml:"compress"` // 日志切割后是否要压缩
Format string `yaml:"format"` // 日志格式
}
// miniLog 框架自带实现的日志器
type miniLog struct {
config logConfig
}
// Debug Debug级别日志输出
func (d miniLog) Debug(format string, args ...interface{}) {
fmt.Printf("[DEBUG] "+format+"\n", args...)
}
// DebugContext Debug级别日志输出
func (d miniLog) DebugContext(ctx context.Context, format string, args ...interface{}) {
fmt.Printf("[DEBUG] "+format+"\n", args...)
}
// Info Info级别日志输出
func (d miniLog) Info(format string, args ...interface{}) {
fmt.Printf("[INFO] "+format+"\n", args...)
}
// InfoContext Info级别日志输出
func (d miniLog) InfoContext(ctx context.Context, format string, args ...interface{}) {
fmt.Printf("[INFO] "+format+"\n", args...)
}
// Warn Warn级别日志输出
func (d miniLog) Warn(format string, args ...interface{}) {
fmt.Printf("[WARN] "+format+"\n", args...)
}
// WarnContext Warn级别日志输出
func (d miniLog) WarnContext(ctx context.Context, format string, args ...interface{}) {
fmt.Printf("[WARN] "+format+"\n", args...)
}
// Error Error级别日志输出
func (d miniLog) Error(format string, args ...interface{}) {
fmt.Printf("[ERROR] "+format+"\n", args...)
}
// ErrorContext Error级别日志输出
func (d miniLog) ErrorContext(ctx context.Context, format string, args ...interface{}) {
fmt.Printf("[ERROR] "+format+"\n", args...)
}
|
//+ build js,wasm
package gobridge
import (
"syscall/js"
"github.com/pkg/errors"
)
var bridgeRoot js.Value
const (
bridgeJavaScriptName = "__gobridge__"
)
func registrationWrapper(fn func(this js.Value, args []js.Value) (interface{}, error)) func(this js.Value, args []js.Value) interface{} {
return func(this js.Value, args []js.Value) interface{} {
cb := args[len(args)-1]
ret, err := fn(this, args[:len(args)-1])
if err != nil {
cb.Invoke(err.Error(), js.Null())
} else {
cb.Invoke(js.Null(), ret)
}
return ret
}
}
type intCallback = func(string) (int64, error)
type stringCallback = func(string) (string, error)
type boolCallback = func(string) (bool, error)
type txCallback = func(string, int64) (string, error)
// RegisterCallback registers a Go function to be a callback used in JavaScript
func RegisterCallback(name string, inputFunc interface{}) {
mycb := func(_ js.Value, jsInputs []js.Value) (interface{}, error){
if len(jsInputs)<1{
return nil, errors.Errorf("Invalid number of parameters. Expected at least 1")
}
args := jsInputs[0].String()
var num int64 = 0
if len(jsInputs)>=2 && jsInputs[1].Type()==js.TypeNumber{
num = int64(jsInputs[1].Int())
}
switch callback := inputFunc.(type) {
case intCallback:
return callback(args)
case stringCallback:
return callback(args)
case boolCallback:
return callback(args)
case txCallback:
return callback(args, num)
default:
return "", errors.Errorf("Unexpected error when executing callback")
}
}
bridgeRoot.Set(name, js.FuncOf(registrationWrapper(mycb)))
}
// RegisterValue registers a static value output from Go for access in JavaScript
func RegisterValue(name string, value interface{}) {
bridgeRoot.Set(name, value)
}
func init() {
global := js.Global()
bridgeRoot = global.Get(bridgeJavaScriptName)
}
|
package gen
type GoBaseType int
const (
GoUnknown GoBaseType = iota
GoBool
GoInt64
GoFloat64
GoString
GoEmpty
GoSlice
GoArray
GoMap
GoStruct
)
func (g GoBaseType) ReferenceType() bool {
return g == GoSlice || g == GoMap
}
func (g GoBaseType) ScalarType() bool {
return g >= GoBool && g < GoEmpty
} |
package promise
import (
"bytes"
"encoding/json"
"errors"
"testing"
"time"
)
func TestPromiseExecutorExecResolve(t *testing.T) {
e := StartExecutor(4, 100)
defer e.Stop()
expectedValue := "hello world"
p := e.Exec(func() (interface{}, error) {
return expectedValue, nil
})
v, err := p.Result()
if err != nil {
t.Fatalf("unexpected error %[1]v (%[1]T)", err)
}
if v.(string) != expectedValue {
t.Logf("exp: %s", expectedValue)
t.Logf("got: %s", v.(string))
t.Error("unexpected promise value")
}
}
func TestPromiseExecutorExecReject(t *testing.T) {
e := StartExecutor(4, 100)
defer e.Stop()
expectedError := errors.New("hello world")
p := e.Exec(func() (interface{}, error) {
return nil, expectedError
})
_, err := p.Result()
if err == nil {
t.Fatalf("expected error, got nil")
}
if err != expectedError {
t.Logf("exp: %v", expectedError)
t.Logf("got: %v", err)
t.Error("unexpected promise error")
}
}
func TestPromiseExecutorExecOnStopedExecutor(t *testing.T) {
e := StartExecutor(1, 1)
expectedValue := "hello world"
running := e.Exec(func() (interface{}, error) {
time.Sleep(200 * time.Millisecond)
return expectedValue, nil
})
buffered := e.Exec(func() (interface{}, error) {
return "", nil
})
var pending *Promise
pendingPlanned := make(chan struct{})
go func() {
close(pendingPlanned)
pending = e.Exec(func() (interface{}, error) {
return "", errors.New("unexpected error")
})
_, err := pending.Result()
if err != ErrExecutorStopped {
t.Logf("exp: %[1]T %[1]v", ErrExecutorStopped)
t.Logf("got: %[1]T %[1]v", err)
t.Error("unexpected error of pending promise")
}
}()
// runnin promise function will start that will block executor
// because it has concurrency == 1. Then pending promise function
// will be placed into executor and wait for execution. And then
// before longRunning promise is done we stop the executor.
<-pendingPlanned
e.Stop()
rejected := e.Exec(func() (interface{}, error) {
return "", nil
})
_, err := buffered.Result()
if err != ErrExecutorStopped {
t.Logf("exp: %[1]T %[1]v", ErrExecutorStopped)
t.Logf("got: %[1]T %[1]v", err)
t.Error("unexpected error of buffered promise")
}
_, err = rejected.Result()
if err != ErrExecutorStopped {
t.Logf("exp: %[1]T %[1]v", ErrExecutorStopped)
t.Logf("got: %[1]T %[1]v", err)
t.Error("unexpected error of rejected promise")
}
v, err := running.Result()
if err != nil {
t.Fatalf("unexpected error %[1]v (%[1]T)", err)
}
if v.(string) != expectedValue {
t.Logf("exp: %s", expectedValue)
t.Logf("got: %v", v)
t.Error("unexpected value of long running promise")
}
}
func TestWhenAllOk(t *testing.T) {
e := StartExecutor(4, 100)
defer e.Stop()
expexted := []interface{}{
"hello",
"world",
}
p1 := e.Exec(func() (interface{}, error) {
time.Sleep(100 * time.Millisecond)
return "hello", nil
})
p2 := e.Exec(func() (interface{}, error) {
time.Sleep(100 * time.Millisecond)
return "world", nil
})
res, err := WhenAll(e, p1, p2).Result()
if err != nil {
t.Fatalf("unexpected error: %[1]v (%[1]T)", err)
}
s1, _ := json.Marshal(expexted)
s2, _ := json.Marshal(res)
if !bytes.Equal(s1, s2) {
t.Logf("exp: %s", s1)
t.Logf("got: %s", s2)
t.Errorf("unexpected result")
}
}
func TestWhenAllErr(t *testing.T) {
e := StartExecutor(4, 100)
defer e.Stop()
expectedError := errors.New("hello world")
p1 := e.Exec(func() (interface{}, error) {
time.Sleep(100 * time.Millisecond)
return "", expectedError
})
p2 := e.Exec(func() (interface{}, error) {
time.Sleep(100 * time.Millisecond)
return "world", nil
})
_, err := WhenAll(e, p1, p2).Result()
if err == nil {
t.Fatalf("expected error, got nil")
}
if expectedError != err {
t.Logf("exp: %v", expectedError)
t.Logf("got: %v", err)
t.Errorf("unexpected error")
}
}
func TestWhenAnyOk(t *testing.T) {
e := StartExecutor(4, 100)
defer e.Stop()
expectedError := errors.New("hello world")
p1 := e.Exec(func() (interface{}, error) {
time.Sleep(100 * time.Millisecond)
return "", expectedError
})
expectedValue := "hello world"
p2 := e.Exec(func() (interface{}, error) {
time.Sleep(100 * time.Millisecond)
return expectedValue, nil
})
res, err := WhenAny(e, p1, p2).Result()
if err != nil {
t.Fatalf("unexpected error: %[1]v (%[1]T)", err)
}
s1, _ := json.Marshal(expectedValue)
s2, _ := json.Marshal(res)
if !bytes.Equal(s1, s2) {
t.Logf("exp: %s", s1)
t.Logf("got: %s", s2)
t.Errorf("unexpected when any result")
}
}
func TestWhenAnyTakesLastError(t *testing.T) {
e := StartExecutor(4, 100)
defer e.Stop()
p1 := e.Exec(func() (interface{}, error) {
time.Sleep(100 * time.Millisecond)
return "", errors.New("unexpected error")
})
expectedError := errors.New("hello world")
p2 := e.Exec(func() (interface{}, error) {
time.Sleep(100 * time.Millisecond)
return "", expectedError
})
_, err := WhenAny(e, p1, p2).Result()
if err == nil {
t.Fatalf("expected error, got nil")
}
if err != expectedError {
t.Logf("exp: %v", expectedError)
t.Logf("got: %v", err)
t.Errorf("unexpected error")
}
}
|
package storage
import "fmt"
type NotFoundError struct {
Key string
}
func (nf *NotFoundError) Error() string {
return fmt.Sprintf("%v does not exist", nf.Key)
}
|
package main
import "fmt"
func main() {
var n byte
fmt.Printf("Бүхэл тоо оруулна уу? ")
fmt.Scanf("%d", &n)
fmt.Printf("Дээд 4 бит = %d\n", (n&0xF0)>>4)
fmt.Printf("Доод 4 бит = %d\n", (n & 0x0F))
}
|
package inc
import (
"encoding/json"
"testing"
)
func Test_NewConfig_1(t *testing.T) {
config, err := NewConfig("../conf/config.json")
if err != nil {
t.Error(err)
return
}
json, err := json.Marshal(config)
if err != nil {
t.Error(err)
return
}
t.Log(string(json))
}
|
// Copyright 2020 astaxie
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"github.com/beego/beego/v2/server/web"
apmbeego "github.com/opentracing-contrib/beego"
)
func main() {
web.Router("/", &MainController{})
// Actually, I just use the opentracing-contrib/web as example but I do not check
// whether it is a good middleware
web.RunWithMiddleWares("localhost:8080", apmbeego.Middleware("bee-go-demo"))
// start the server and then request GET http://localhost:8080/
}
type MainController struct {
web.Controller
}
func (m *MainController) Get() {
m.Ctx.WriteString(fmt.Sprintf("hello world"))
}
|
package main
import (
"os"
"bufio"
"fmt"
"strconv"
"strings"
"github.com/Amertz08/EECS560-go/Lab01/LinkedList"
"io"
"path/filepath"
)
func main() {
if len(os.Args) == 1 {
fmt.Println("Please provide an input file")
os.Exit(1)
}
fileName := os.Args[1]
fullPath, err := filepath.Abs(fileName)
check(err)
list := LinkedList.NewLinkedList()
f, err := os.Open(fullPath)
check(err)
reader := bufio.NewReader(f)
data, err := reader.ReadString('\n')
if err != io.EOF {
panic(err)
}
f.Close()
vals := strings.Fields(data)
for _, val := range vals {
ival, err := strconv.Atoi(val)
check(err)
list.Insert(ival)
}
choice := 0
for choice != 5 {
choice = usage()
switch choice {
case 1:
value := getIntInput("Insert value: ")
list.Insert(value)
case 2:
value := getIntInput("Delete value: ")
list.Erase(value)
case 3:
value := getIntInput("Find value: ")
if list.Find(value) {
fmt.Println("Value found")
} else {
fmt.Println("Vale not found")
}
case 4:
list.Print()
case 5:
fmt.Println("Exiting...")
default:
fmt.Println("Invalid input")
}
}
}
func getInput(prompt string) string {
fmt.Print(prompt)
reader := bufio.NewReader(os.Stdin)
text, _ := reader.ReadString('\n')
text = strings.Replace(text, "\n", "", -1)
return text
}
func getIntInput(prompt string) int {
text := getInput(prompt)
value, _ := strconv.Atoi(text)
return value
}
func usage() int {
fmt.Print(
"1 - Insert\n" +
"2 - Delete\n" +
"3 - Find\n" +
"4 - Print\n" +
"5 - Exit\n")
choice := getIntInput("Select a choice: ")
return choice
}
func check(e error) {
if e != nil {
panic(e)
}
}
|
package lccu_strings
import (
"fmt"
"github.com/satori/go.uuid"
"strings"
)
func RandomUUIDString() string {
return fmt.Sprintf("%s", uuid.NewV4())
}
func RandomUUIDStringNoLine() string {
return strings.ReplaceAll(fmt.Sprintf("%s", uuid.NewV4()), "-", "")
}
|
package main
import (
"fmt"
"math"
)
type Vertex struct {
X, Y float64
}
// Remember: a method is just a function with a receiver argument.
func (v Vertex) Abs() float64 {
return math.Sqrt(v.X*v.X + v.Y*v.Y)
}
func main() {
j := Vertex{3, 4}
fmt.Println(j.Abs())
} |
/*
* Copyright 2019 Dgraph Labs, Inc. and Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"flag"
"fmt"
"log"
"os"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"testing"
)
var (
// PATH is the user-defined location for writing the stats.csv file.
flagPath = flag.String(
"path",
"stats.csv",
"Filepath for benchmark CSV data.",
)
// CACHE determines what libraries to include in the benchmarks.
flagCache = flag.String(
"cache",
"ristretto",
`Libraries to include in the benchmark: either "all" or "ristretto".`,
)
// SUITE is the flag determing what collection of benchmarks to run.
flagSuite = flag.String(
"suite",
"full",
`You can chose from the following options:
"all" - hit ratio and speed performance
"hits" - hit ratio
"speed" - throughput
`,
)
// PARALLEL is the goroutine multiplier to use for benchmarking performance
// using a variable number of goroutines.
flagParallel = flag.Int(
"parallel",
1,
"The goroutine multiplier (see runtime.GOMAXPROCS()).",
)
)
// Benchmark is used to generate benchmarks.
type Benchmark struct {
// Name is the cache implementation identifier.
Name string
// Label is for denoting variations within implementations.
Label string
// TODO - document and clean
HitsBencher func(*Benchmark, *LogCollection) func()
SpeedBencher func(*Benchmark, *LogCollection) func(*testing.B)
// Para is the multiple of runtime.GOMAXPROCS(0) to use for this benchmark.
Para int
// Create is the lazily evaluated function for creating new instances of the
// underlying cache.
Create func(hits bool) Cache
}
func (b *Benchmark) Log() {
log.Printf("running: %s (%s) * %d", b.Name, b.Label, b.Para)
}
type benchSuite struct {
label string
benchHits func(*Benchmark, *LogCollection) func()
benchSpeed func(*Benchmark, *LogCollection) func(*testing.B)
}
func NewBenchmarks(kind string, para, capa int, cache *benchCache) []*Benchmark {
suite := make([]*benchSuite, 0)
// create the bench suite from the suite param (SUITE flag)
if kind == "hits" || kind == "all" {
suite = append(suite, []*benchSuite{
{"hits-zipf ", HitsZipf, nil},
{"hits-arc-p3 ", HitsARC("p3"), nil},
{"hits-arc-p8 ", HitsARC("p8"), nil},
{"hits-arc-s3 ", HitsARC("s3"), nil},
{"hits-arc-ds1 ", HitsARC("ds1"), nil},
{"hits-arc-oltp ", HitsARC("oltp"), nil},
{"hits-lirs-loop", HitsLIRS("loop"), nil},
}...)
}
if kind == "speed" || kind == "all" {
suite = append(suite, []*benchSuite{
{"get-same ", nil, GetSame},
{"get-zipf ", nil, GetZipf},
{"set-get ", nil, SetGet},
{"set-same ", nil, SetSame},
{"set-zipf ", nil, SetZipf},
{"set-get-zipf ", nil, SetGetZipf},
}...)
}
// create benchmarks from bench suite
benchmarks := make([]*Benchmark, len(suite))
for i := range benchmarks {
benchmarks[i] = &Benchmark{
Name: cache.name,
Label: suite[i].label,
Para: para,
Create: func(hits bool) Cache { return cache.create(capa, hits) },
}
if suite[i].benchHits != nil {
benchmarks[i].HitsBencher = suite[i].benchHits
} else if suite[i].benchSpeed != nil {
benchmarks[i].SpeedBencher = suite[i].benchSpeed
}
}
return benchmarks
}
type benchCache struct {
name string
create func(int, bool) Cache
}
// getBenchCaches() returns a slice of benchCache's depending on the value of
// the include params (which is the cache/suite flags passed from main).
func getBenchCaches(include, suite string) []*benchCache {
caches := []*benchCache{
{"ristretto ", NewBenchRistretto},
}
if include == "ristretto" {
return caches
}
if include == "all" {
if suite == "hits" {
// BenchOptimal is not safe for concurrent access, so it's only
// included if the hit ratio suite is being ran.
caches = append(caches, []*benchCache{
{"optimal ", NewBenchOptimal},
}...)
}
caches = append(caches, []*benchCache{
{"base-mutex ", NewBenchBaseMutex},
{"goburrow ", NewBenchGoburrow},
{"bigcache ", NewBenchBigCache},
{"fastcache ", NewBenchFastCache},
{"freecache ", NewBenchFreeCache},
}...)
}
return caches
}
func init() {
flag.Parse()
}
func main() {
var (
caches = getBenchCaches(*flagCache, *flagSuite)
logs = make([]*Log, 0)
benchmarks = make([]*Benchmark, 0)
)
// create benchmark generators for each cache
for _, cache := range caches {
benchmarks = append(benchmarks,
NewBenchmarks(*flagSuite, *flagParallel, capacity, cache)...,
)
}
for _, benchmark := range benchmarks {
// log the current benchmark to keep user updated
benchmark.Log()
// collection of policy logs for hit ratio analysis
coll := NewLogCollection()
var result testing.BenchmarkResult
if benchmark.HitsBencher != nil {
benchmark.HitsBencher(benchmark, coll)()
} else if benchmark.SpeedBencher != nil {
result = testing.Benchmark(benchmark.SpeedBencher(benchmark, coll))
}
// append benchmark result to logs
logs = append(logs, &Log{benchmark, NewResult(result, coll)})
// clear GC after each benchmark to reduce random effects on the data
runtime.GC()
}
// save logs CSV to disk
if err := save(logs); err != nil {
log.Panic(err)
}
}
// save writes all logs to the PATH file in CSV format.
func save(logs []*Log) error {
// will hold all log records with the first row being column labels
records := make([][]string, 0)
for _, log := range logs {
records = append(records, log.Record())
}
// write csv data
records = append([][]string{Labels()}, records...)
// create file for writing
file, err := os.OpenFile(*flagPath, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
return err
}
// clear contents
if err = file.Truncate(0); err != nil {
return err
}
defer file.Close()
for i := range records {
for j := range records[i] {
seg := records[i][j] + ","
if j != 0 {
seg = " " + seg
}
if j == len(records[i])-1 {
seg = seg[:len(seg)-1]
}
if _, err := file.WriteString(seg); err != nil {
return err
}
}
if _, err := file.WriteString("\n"); err != nil {
return err
}
}
return nil
}
// Labels returns the column headers of the CSV data. The order is important and
// should correspond with Log.Record().
func Labels() []string {
return []string{
"name ",
"label ",
"go",
" mop/s",
" ns/op",
"ac",
"byt",
"hits ",
"misses ",
" ratio ",
}
}
// Log is the primary unit of the CSV output files.
type Log struct {
Benchmark *Benchmark
Result *Result
}
// Record generates a CSV record.
func (l *Log) Record() []string {
var (
goroutines string = fmt.Sprintf("%2d", l.Benchmark.Para*l.Result.Procs)
mOpsPerSec string = fmt.Sprintf("%6.2f", l.Result.Ops)
allocsPerOp string = fmt.Sprintf("%02d", l.Result.Allocs)
bytesPerOp string = fmt.Sprintf("%03d", l.Result.Bytes)
nsPerOp string = fmt.Sprintf("%6d", l.Result.NsOp)
totalHits string = fmt.Sprintf("%08d", l.Result.Hits)
totalMisses string = fmt.Sprintf("%08d", l.Result.Misses)
hitRatio string = fmt.Sprintf("%6.2f%%",
100*(float64(l.Result.Hits)/float64(l.Result.Hits+l.Result.Misses)),
)
)
if l.Benchmark.Label[:4] == "hits" {
mOpsPerSec = "------"
allocsPerOp = "--"
bytesPerOp = "---"
nsPerOp = "------"
} else {
totalHits = "--------"
totalMisses = "--------"
hitRatio = "-------"
}
return []string{
l.Benchmark.Name,
l.Benchmark.Label,
// throughput stats
goroutines,
mOpsPerSec,
nsPerOp,
allocsPerOp,
bytesPerOp,
// hit ratio stats
totalHits,
totalMisses,
hitRatio,
}
}
// Result is a wrapper for testing.BenchmarkResult that adds fields needed for
// our CSV data.
type Result struct {
// Ops represents millions of operations per second.
Ops float64
// Allocs is the number of allocations per iteration.
Allocs uint64
// Bytes is the number of bytes allocated per iteration.
Bytes uint64
// Procs is the value of runtime.GOMAXPROCS(0) at the time result was
// recorded.
Procs int
Hits int64
Misses int64
NsOp int64
}
// NewResult extracts the data we're interested in from a BenchmarkResult.
func NewResult(res testing.BenchmarkResult, coll *LogCollection) *Result {
result := &Result{}
if res.N == 0 {
result.Hits = coll.Hits()
result.Misses = coll.Misses()
return result
}
memops := strings.Trim(strings.Split(res.String(), "\t")[2], " MB/s")
opsraw, err := strconv.ParseFloat(memops, 64)
if err != nil {
log.Panic(err)
}
if coll == nil {
coll = &LogCollection{}
}
return &Result{
Ops: opsraw,
Allocs: uint64(res.AllocsPerOp()),
Bytes: uint64(res.AllocedBytesPerOp()),
Procs: runtime.GOMAXPROCS(0),
Hits: coll.Hits(),
Misses: coll.Misses(),
NsOp: res.NsPerOp(),
}
}
type LogCollection struct {
sync.Mutex
Logs []*policyLog
}
func NewLogCollection() *LogCollection {
return &LogCollection{
Logs: make([]*policyLog, 0),
}
}
func (c *LogCollection) Append(plog *policyLog) {
c.Lock()
defer c.Unlock()
c.Logs = append(c.Logs, plog)
}
func (c *LogCollection) Hits() int64 {
c.Lock()
defer c.Unlock()
var sum int64
for i := range c.Logs {
sum += c.Logs[i].GetHits()
}
return sum
}
func (c *LogCollection) Misses() int64 {
c.Lock()
defer c.Unlock()
var sum int64
for i := range c.Logs {
sum += c.Logs[i].GetMisses()
}
return sum
}
type policyLog struct {
hits int64
misses int64
evictions int64
}
func (p *policyLog) Hit() {
atomic.AddInt64(&p.hits, 1)
}
func (p *policyLog) Miss() {
atomic.AddInt64(&p.misses, 1)
}
func (p *policyLog) Evict() {
atomic.AddInt64(&p.evictions, 1)
}
func (p *policyLog) GetMisses() int64 {
return atomic.LoadInt64(&p.misses)
}
func (p *policyLog) GetHits() int64 {
return atomic.LoadInt64(&p.hits)
}
func (p *policyLog) GetEvictions() int64 {
return atomic.LoadInt64(&p.evictions)
}
func (p *policyLog) Ratio() float64 {
hits := atomic.LoadInt64(&p.hits)
misses := atomic.LoadInt64(&p.misses)
return float64(hits) / float64(hits+misses)
}
|
package cmd
import (
"fmt"
"net/http"
"net/http/httputil"
"net/url"
"github.com/gin-gonic/gin"
"github.com/gin-gonic/gin/binding"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
func newServerCmd(cfg *Config) *cobra.Command {
return &cobra.Command{
Use: "serve",
Short: "",
Long: ``,
RunE: func(cmd *cobra.Command, args []string) error {
router := newRouter(cfg)
return router.Run()
},
}
}
func newRouter(cfg *Config) *gin.Engine {
const realWebHookQuery = "trumpet_to"
proxy := httputil.ReverseProxy{
Director: func(request *http.Request) {
// already checked
trumpetTo, _ := url.Parse(request.URL.Query().Get(realWebHookQuery))
request.Host = trumpetTo.Host
request.URL = trumpetTo
request.RequestURI = ""
request.Header["X-Forwarded-For"] = nil
request.ContentLength = -1
delete(request.Header, "Content-Length")
if logrus.GetLevel() >= logrus.DebugLevel {
req, err := httputil.DumpRequest(request, true)
fmt.Printf(
"\n-------------------- Request --------------------\n%s\nDumpRequestError:%s\n",
req, err,
)
}
},
ModifyResponse: func(response *http.Response) error {
if logrus.GetLevel() >= logrus.DebugLevel {
resp, err := httputil.DumpResponse(response, true)
fmt.Printf(
"\n-------------------- Request --------------------\n%s\nDumpResponseError:%s\n",
resp, err,
)
}
return nil
},
}
r := gin.Default()
r.POST("/transformers/:transformer", func(c *gin.Context) {
if c.ContentType() != binding.MIMEJSON {
c.String(http.StatusBadRequest, "currently we only accept `%s` content", binding.MIMEJSON)
return
}
transformerSlug := c.Param("transformer")
_, err := url.Parse(c.Query(realWebHookQuery))
if err != nil {
c.String(http.StatusBadRequest, err.Error())
return
}
transformer, ok := cfg.GetTransformer(transformerSlug)
if !ok {
c.String(http.StatusNotFound, "no such transformer `%s`", transformer)
return
}
req, err := transformer.Exec(c.Request)
if err != nil {
c.String(http.StatusInternalServerError, "error when transform data: %s", err)
return
}
proxy.ServeHTTP(c.Writer, req)
})
return r
}
|
package server
import (
"context"
"net/http"
"github.com/danielkvist/botio/proto"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/pkg/errors"
"google.golang.org/grpc"
)
func (s *server) jsonGateway() error {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
mux := runtime.NewServeMux()
options := []grpc.DialOption{
grpc.WithInsecure(),
}
err := proto.RegisterBotioHandlerFromEndpoint(ctx, mux, s.listener.Addr().String(), options)
if err != nil {
return errors.Wrapf(err, "while registering Botio HTTP handler")
}
return http.ListenAndServe(s.httpPort, mux)
}
|
package haymakerengines
import (
"errors"
"fmt"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ecr"
)
var ecrInstance *ecr.ECR
var repoName string
var imageTAG string
func createRepositoryStub(repositoryName *string) (*ecr.CreateRepositoryOutput, error) {
mutability := ecr.ImageTagMutabilityImmutable
createRepositoryInputObject := &ecr.CreateRepositoryInput{
ImageScanningConfiguration: &ecr.ImageScanningConfiguration{
ScanOnPush: aws.Bool(true),
},
ImageTagMutability: &mutability,
RepositoryName: repositoryName,
}
return ecrInstance.CreateRepository(createRepositoryInputObject)
}
func deleteRepositoryStub(repositoryName *string) (*ecr.DeleteRepositoryOutput, error) {
deleteRepositoryInputObject := &ecr.DeleteRepositoryInput{
Force: aws.Bool(true),
RepositoryName: repositoryName,
}
return ecrInstance.DeleteRepository(deleteRepositoryInputObject)
}
func describeRepositoriesStub(repositoryNames []*string) (*ecr.DescribeRepositoriesOutput, error) {
describeRepositoryInputObject := &ecr.DescribeRepositoriesInput{
RepositoryNames: repositoryNames,
}
return ecrInstance.DescribeRepositories(describeRepositoryInputObject)
}
func getAuthorizationTokenStub() (*ecr.GetAuthorizationTokenOutput, error) {
describeRepositoryInputObject := &ecr.GetAuthorizationTokenInput{}
return ecrInstance.GetAuthorizationToken(describeRepositoryInputObject)
}
func batchDeleteImageStub(repositoryName *string, imageIDs []*ecr.ImageIdentifier) (*ecr.BatchDeleteImageOutput, error) {
batchDeleteImageInputObject := &ecr.BatchDeleteImageInput{
RepositoryName: repositoryName,
ImageIds: imageIDs,
}
return ecrInstance.BatchDeleteImage(batchDeleteImageInputObject)
}
func SpinupContainerRepository() error {
fmt.Println("Spining Up ECR Repository")
createRepositoryResults, createRepositoryError := createRepositoryStub(&repoName)
if createRepositoryError != nil {
return errors.New("|" + "HayMaker->haymakerengines->ecr_engine->SpinupContainerRepository->createRepositoryStub:" + createRepositoryError.Error() + "|")
}
fmt.Println(*createRepositoryResults.Repository.RepositoryUri)
return nil
}
func DestroyECRRepository() error {
fmt.Println("Destroying ECR Repository")
_, deleteRepositoryError := deleteRepositoryStub(&repoName)
if aerr, ok := deleteRepositoryError.(awserr.Error); ok {
switch aerr.Code() {
case ecr.ErrCodeRepositoryNotFoundException:
return nil
default:
return errors.New("|" + "HayMaker->haymakerengines->ecr_engine->DeleteContainerRepository->deleteRepositoryStub:" + deleteRepositoryError.Error() + "|")
}
}
return nil
}
func DestroyDockerImageOnECR() error {
fmt.Println("Destroying Docker Image On ECR")
imageIDs := []*ecr.ImageIdentifier{}
imageIDs = append(imageIDs, &ecr.ImageIdentifier{
ImageTag: &imageTAG,
})
_, batchDeleteImageError := batchDeleteImageStub(&repoName, imageIDs)
if batchDeleteImageError != nil {
return errors.New("|" + "HayMaker->haymakerengines->ecr_engine->DeleteImageFromECR->batchDeleteImageStub:" + batchDeleteImageError.Error() + "|")
}
return nil
}
func DescribeRepositories() error {
fmt.Println("Getting Information for ECR Repository")
repoNames := []*string{&repoName}
describeRepositoriesStubResult, describeRepositoriesStubError := describeRepositoriesStub(repoNames)
if describeRepositoriesStubError != nil {
return errors.New("|" + "HayMaker->haymakerengines->ecr_engine->DescribeRepositories->describeRepositoriesStub:" + describeRepositoriesStubError.Error() + "|")
}
fmt.Println("Repositories:")
for _, repository := range describeRepositoriesStubResult.Repositories {
fmt.Println(fmt.Sprintf("Name:%s URI:%s", repository.RepositoryName, repository.RepositoryUri))
}
return nil
}
func GetAuthorizationToken() (map[string]*string, error) {
fmt.Println("Obtaining ECR Authorization Token")
getAuthorizationTokenStubResult, getAuthorizationTokenStubError := getAuthorizationTokenStub()
if getAuthorizationTokenStubError != nil {
return nil, errors.New("|" + "HayMaker->haymakerengines->ecr_engine->GetAuthorizationToken->getAuthorizationTokenStub:" + getAuthorizationTokenStubError.Error() + "|")
}
authorizationTokenStruct := make(map[string]*string, 0)
if len(getAuthorizationTokenStubResult.AuthorizationData) > 0 {
authorizationTokenStruct["token"] = getAuthorizationTokenStubResult.AuthorizationData[0].AuthorizationToken
protocolStrippedEndpoing := strings.TrimLeft(*getAuthorizationTokenStubResult.AuthorizationData[0].ProxyEndpoint, "https://")
authorizationTokenStruct["endpoint"] = &protocolStrippedEndpoing
return authorizationTokenStruct, nil
} else {
return nil, errors.New("|" + "HayMaker->haymakerengines->ecr_engine->GetAuthorizationToken: no repository found.")
}
return nil, nil
}
func GetRepositoryURI() (*string, error) {
repoNames := []*string{&repoName}
describeRepositoriesStubResult, describeRepositoriesStubError := describeRepositoriesStub(repoNames)
if describeRepositoriesStubError != nil {
return nil, errors.New("|" + "HayMaker->haymakerengines->ecr_engine->GetRepositoryURI->describeRepositoriesStub:" + describeRepositoriesStubError.Error() + "|")
}
if len(describeRepositoriesStubResult.Repositories) > 0 {
return describeRepositoriesStubResult.Repositories[0].RepositoryUri, nil
} else {
return nil, errors.New("|" + "HayMaker->haymakerengines->ecr_engine->GetRepositoryURI: no repository found.")
}
return nil, nil
}
func InitECREngine(ecrInst *ecr.ECR, ecrConfig interface{}, dockerConfig interface{}) {
ecrInstance = ecrInst
repoName = ecrConfig.(map[string]interface{})["repo_name"].(string)
imageTAG = dockerConfig.(map[string]interface{})["tag"].(string)
}
|
package day10
import (
"sort"
"strconv"
"../utils"
)
var input, _ = utils.ReadFile("day10/input.txt")
// ParseLines parses the file input
func ParseLines(input []string) []int {
var data []int
for _, line := range input {
v, _ := strconv.Atoi(line)
data = append(data, v)
}
sort.Ints(data)
return data
}
// Solve1 returns answer to first problem
func Solve1() int {
data := ParseLines(input)
var ones int
var threes int
switch data[0] {
case 1:
ones++
case 3:
threes++
}
data = append(data, data[len(data)-1]+3)
for i := 0; i < len(data)-1; i++ {
switch data[i+1] - data[i] {
case 1:
ones++
case 3:
threes++
}
}
//fmt.Println(data, ones, threes)
return ones * threes
}
func numsets(data []int) int {
switch len(data) {
case 0, 1, 2:
return 1
case 3:
return 2
case 4:
return 4
case 5:
return 7
}
i := 0
for i < len(data)-1 {
if data[i+1] == data[i]+1 {
i++
} else {
break
}
}
if len(data[:i]) == 0 {
return numsets(data[i+1:])
} else {
return numsets(data[:i+1]) * numsets(data[i+1:])
}
}
// Solve2 returns answer to second problem
func Solve2() int {
data := ParseLines(input)
data = append(data, data[len(data)-1]+3)
data = append([]int{0}, data...)
return numsets(data)
}
|
package main
import (
"fmt"
"strconv"
)
//条件分岐
//エラーハンドリング
func main() {
var s string = "A"
//var s string = "1000"
//i, _ := strconv.Atoi(s)
//fmt.Printf("i = %T\n", i)
i, err := strconv.Atoi(s)
if err != nil {
fmt.Println(err)
}
fmt.Printf("i = %T\n", i)
} |
package model
import "gorm.io/gorm"
type User struct {
gorm.Model
Username string `json:"username" validate:"required,min=1,max=30"`
Email string `json:"email" validate:"required,email,unique"`
Password string `json:"password" validate:"required"`
Role string `json:"role" gorm:"default=USER" validate:"oneof=USER ADMIN"`
}
|
package reading
import (
"time"
"github.com/kapmahc/axe/plugins/nut"
)
// Book book
type Book struct {
tableName struct{} `sql:"reading_books"`
ID uint
Author string
Publisher string
Title string
Type string
Lang string
File string
Subject string
Description string
PublishedAt time.Time
Cover string
UpdatedAt time.Time
CreatedAt time.Time
}
// Note note
type Note struct {
tableName struct{} `sql:"reading_notes"`
ID uint
Type string
Body string
User nut.User
Book Book
}
|
package backend_controller
import (
"2021/yunsongcailu/yunsong_server/common"
"github.com/gin-gonic/gin"
"github.com/shirou/gopsutil/v3/cpu"
"github.com/shirou/gopsutil/v3/disk"
"github.com/shirou/gopsutil/v3/host"
"github.com/shirou/gopsutil/v3/mem"
"net"
"os"
"path/filepath"
"time"
)
type ServerInfo struct {
CpuMode string `json:"cpu_mode"`
CpuCores int32 `json:"cpu_cores"`
MemTotal uint64 `json:"mem_total"`
MemUsed uint64 `json:"mem_used"`
MemFree uint64 `json:"mem_free"`
HostName string `json:"host_name"`
HostPlatform string `json:"host_platform"`
DiskPath string `json:"disk_path"`
DiskTotal uint64 `json:"disk_total"`
DiskFree uint64 `json:"disk_free"`
DiskUsed uint64 `json:"disk_used"`
DiskUsedPercent float64 `json:"disk_used_percent"`
HostIp net.IP `json:"host_ip"`
StartTime string `json:"start_time"`
}
// 服务器信息
func PostServerInfo(ctx *gin.Context) {
var serverInfo ServerInfo
cpuInfo,_ := cpu.Info()
//for _,info := range cpuInfo {
// data,_ := json.MarshalIndent(info,""," ")
// fmt.Print(string(data))
//}
memInfo,_ := mem.VirtualMemory()
hostInfo,_ := host.Info()
exePath,_ := os.Executable()
runPath,_ := filepath.EvalSymlinks(filepath.Dir(exePath))
diskInfo,_ := disk.Usage(runPath)
//conn,_ := net.Dial("udp","8.8.8.8:80")
//defer conn.Close()
//localAddr := conn.LocalAddr().(*net.UDPAddr)
//fmt.Printf("ip:%v\n",localAddr.String())
addrs, err := net.InterfaceAddrs()
if err != nil {
return
}
for _, addr := range addrs {
ipAddr, ok := addr.(*net.IPNet)
if !ok {
continue
}
if ipAddr.IP.IsLoopback() {
continue
}
if !ipAddr.IP.IsGlobalUnicast() {
continue
}
serverInfo.HostIp = ipAddr.IP.To4()
}
startTimestamp,_ := host.BootTime()
startTime := time.Unix(int64(startTimestamp), 0)
serverInfo.StartTime = startTime.Local().Format("2006-01-02 15:04:05")
serverInfo.CpuMode = cpuInfo[0].ModelName
serverInfo.CpuCores = cpuInfo[0].Cores
serverInfo.MemTotal = memInfo.Total
serverInfo.MemFree = memInfo.Free
serverInfo.MemUsed = memInfo.Used
serverInfo.HostName = hostInfo.Hostname
serverInfo.HostPlatform = hostInfo.Platform
serverInfo.DiskPath = runPath
serverInfo.DiskTotal = diskInfo.Total
serverInfo.DiskFree = diskInfo.Free
serverInfo.DiskUsed = diskInfo.Used
serverInfo.DiskUsedPercent = diskInfo.UsedPercent
common.Success(ctx,serverInfo)
}
|
func addBinary(a string, b string) string {
a_len := len(a)
b_len := len(b)
max_len := a_len
if a_len > b_len {
for i := 0; i < a_len-b_len; i++ {
b = "0" + b
}
} else if b_len > a_len {
max_len = b_len
for i := 0; i < b_len-a_len; i++ {
a = "0" + a
}
}
carry := 0
result := ""
a_int := 0
b_int := 0
sum := 0
for i := max_len - 1; i >= 0; i-- {
a_int, _ = strconv.Atoi(string(a[i]))
b_int, _ = strconv.Atoi(string(b[i]))
if carry == 1 {
a_int++
}
sum = a_int + b_int
if sum == 3 {
result = "1" + result
carry = 1
} else if sum == 2 {
result = "0" + result
carry = 1
} else if sum == 1 {
result = "1" + result
carry = 0
} else {
result = "0" + result
carry = 0
}
sum = 0
if i == 0 && carry == 1 {
result = "1" + result
}
}
return result
}
|
package manager
import (
"encoding/json"
"github.com/gin-gonic/gin"
"github.com/golang/glog"
"io/ioutil"
"net/http"
"regexp"
"strings"
"sub_account_service/order_server_zhengdao/client"
"sub_account_service/order_server_zhengdao/db"
"sub_account_service/order_server_zhengdao/lib"
"sub_account_service/order_server_zhengdao/models"
)
//QueryPofile 查询详情
func QueryPofile(c *gin.Context, cli *client.Client) {
var qp struct {
CurrentPage int `json:"currentPage"`
PageSize int `json:"pageSize"`
OrderID int `json:"orderID"`
StartDate string `json:"startDate"`
EndDate string `json:"endDate"`
}
if err := parseReq(c, "query_profile", &qp); err != nil {
c.JSON(http.StatusOK, lib.Result.Fail(-1, "查询失败"))
return
}
//glog.Infoln(qp)
var resp respQP
var querystr string
var params []interface{}
if qp.OrderID != -1 {
querystr += " AND order_saves.order_id = ?"
params = append(params, qp.OrderID)
}
if qp.StartDate != "" {
qp.StartDate += " 00:00:00"
querystr += " AND order_saves.create_time > ?"
params = append(params, qp.StartDate)
}
if qp.EndDate != "" {
qp.EndDate += " 23:59:59"
querystr += " AND order_saves.create_time < ?"
params = append(params, qp.EndDate)
}
if db.DbClient.Client == nil {
c.JSON(http.StatusOK, lib.Result.Fail(-1, "db.DbClient.Client==nil"))
glog.Errorln("QueryPofile*******************************db == nil")
return
}
db := db.DbClient.Client
resp.Pagination.PageSize = qp.PageSize
resp.Pagination.CurrentPage = qp.CurrentPage
re := regexp.MustCompile(`(?i:^and)`)
querystr = re.ReplaceAllString(strings.Trim(querystr, " "), "")
var Models []ordersvrBill //todo 换成对应模型
//mydb := db.Where(querystr, params...).Order("create_time desc").Limit(qp.PageSize).Offset((qp.CurrentPage-1)*qp.PageSize).Find(&Models)
mydb := db.Table("order_saves").Joins("join bills on order_saves.order_id = bills.order_id").
Select("order_saves.order_id, order_saves.id, order_saves.create_time, order_saves.company,"+
"order_saves.branch_shop, order_saves.order_content, order_saves.price, bills.trade_status, bills.transfer_info").
Where(querystr, params...).Order("create_time desc").Limit(qp.PageSize).Offset((qp.CurrentPage - 1) * qp.PageSize).Find(&Models)
if mydb.Error != nil {
glog.Errorln("QueryPofile**********************", mydb.Error)
c.JSON(http.StatusOK, lib.Result.Fail(-1, "查询失败"))
return
}
var total int
mydb = db.Table("order_saves").Joins("join bills on order_saves.order_id = bills.order_id").
Select("order_saves.order_id, order_saves.id, order_saves.create_time, order_saves.company,"+
"order_saves.branch_shop, order_saves.order_content, order_saves.price, bills.trade_status, bills.transfer_info").
Where(querystr, params...).Count(&total)
if mydb.Error != nil {
c.JSON(http.StatusOK, lib.Result.Fail(-1, "查询失败"))
return
}
resp.Pagination.Total = total
var Orders []order
for _, one := range Models {
var order order
order.OrderId = one.OrderId
order.CreateTime = one.CreateTime
order.Company = one.Company
order.BranchShop = one.BranchShop
oc := make([]models.Dish, 0)
if err := json.Unmarshal([]byte(strings.Trim(one.OrderContent, " ")), &oc); err != nil {
glog.Errorln("json*************************解码错误", err)
return
}
order.OrderContent = oc
order.Price = one.Price
order.ID = one.ID
order.TradeStatus = one.TradeStatus
order.TransferInfo = one.TransferInfo
Orders = append(Orders, order)
}
resp.List = Orders
glog.Infoln("QueryPofile**********************", resp)
c.JSON(http.StatusOK, resp)
}
type ordersvrBill struct {
models.OrderSave
TradeStatus int64
TransferInfo string
}
type order struct {
ID uint
OrderId int64 // 订单id(订单的唯一id)
CreateTime string // 创建时间
Company string // 公司名
BranchShop string // 分店名
OrderContent []models.Dish
Price float64 // 订单价格
TradeStatus int64
TransferInfo string
}
type respQP struct {
List interface{} `json:"list"`
Pagination pagination `json:"pagination"`
}
type pagination struct {
CurrentPage int `json:"currentPage"`
PageSize int `json:"pageSize"`
Total int `json:"total"`
}
func parseReq(c *gin.Context, funcName string, qb interface{}) error {
glog.Infoln(funcName + "**************************start")
buf, err := ioutil.ReadAll(c.Request.Body)
if err != nil {
glog.Errorln(funcName+"*************************readll 错误", err)
return err
}
err = json.Unmarshal(buf, qb)
if err != nil {
glog.Errorln(funcName+"**************************json 解码错误", err)
return err
}
return nil
}
|
package beatmanage
import (
"github.com/beatwatcher/conf"
"bufio"
"bytes"
"encoding/json"
"fmt"
"github.com/bitly/go-simplejson"
"github.com/ghodss/yaml"
"io/ioutil"
"log"
"net"
"os"
"os/exec"
"strconv"
"strings"
"time"
)
// Name of this beat
type collectionStatus struct {
Agentuuid string `json:"agentuuid"`
Configname string `json:"configname"`
Pid int `json:"pid"`
Status string `json:"status"`
Other string `json:"other"`
}
// collection 状态
var CollectionStatusSlice [] collectionStatus
func init() {
fmt.Println("get in here --------------------------")
//CollectionStatusSlice = make([]collectionStatus,0)
//curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-6.3.2-x86_64.rpm
//sudo rpm -vi filebeat-6.3.2-x86_64.rpm
//curl -L -O https://artifacts.elastic.co/downloads/beats/metricbeat/metricbeat-6.3.2-x86_64.rpm
//sudo rpm -vi metricbeat-6.3.2-x86_64.rpm
/**
var metricbeat = "metricbeat"
var metricbeatT = "metricbeat-6.3.2"
var metricbeatV = "metricbeat-6.3.2-x86_64"
var filebeat = "filebeat"
var filebeatT = "filebeat-6.3.2"
var filebeatV = "filebeat-6.3.2-x86_64"
//判断是否需要下载安装,若是
InstalledList := rpmInstalledList()
if strings.Contains(strings.ToLower(InstalledList),strings.ToLower(metricbeatT))==false {
preDownloadandInstall(metricbeat,metricbeatV);
}
if strings.Contains(strings.ToLower(InstalledList),strings.ToLower(filebeatT))==false {
preDownloadandInstall(filebeat,filebeatV);
}
*/
}
func rpmInstalledList() string{
cmd := exec.Command("rpm", "-aq")
var out, stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
if err := cmd.Start(); err != nil {
fmt.Printf("Error: The command can not be startup: %s\n", err)
return ""
}
if err := cmd.Wait(); err != nil { //为了获取cmd的所有输出内容,调用Wait()方法一直阻塞到其所属所有命令执行完
fmt.Printf("Error: Can not wait for the command: %s\n", err)
return ""
}
return out.String();
}
func getYcmdReader() (*bufio.Reader,error) {
yescmd := exec.Command("echo", "y","y","y","y","y","y","y","y","y","y")
stdout1, err := yescmd.StdoutPipe() //yescmd上建立一个输出管道,为*io.Reader类型
if err != nil {
fmt.Printf("Error: Can not obtain the stdout pipe for command: %s", err)
return nil,err
}
if err := yescmd.Start(); err != nil {
fmt.Printf("Error: The command can not running: %s\n", err)
return nil,err
}
outputBuf1 := bufio.NewReader(stdout1) //避免数据过多带来的困扰,使用带缓冲的读取器来获取输出管道中的数据
return outputBuf1,nil
}
func preDownloadandInstall(beat,beatV string){
beatPKG := beatV+".rpm"
url := "https://artifacts.elastic.co/downloads/beats/"+beat+"/"+beatPKG
downloadcmd := exec.Command("curl", "-L","-O",url)
rpmcmd := exec.Command("sudo","rpm", "-vi",beatPKG)
fmt.Println("down: "+"curl", "-L","-O",url)
fmt.Println("rpmcmd: "+"sudo","rpm", "-vi",beatPKG)
outputBuf1,err := getYcmdReader()
if err != nil {
fmt.Printf("Error: getYcmdReader Error: %s", err)
return
}
executeCmd(downloadcmd,outputBuf1)
executeCmd(rpmcmd,outputBuf1)
}
func executeCmd(cmd *exec.Cmd,outputBuf1 *bufio.Reader) {
stdin2, err := cmd.StdinPipe() //cmd上建立一个输入管道
if err != nil {
fmt.Printf("Error: Can not obtain the stdin pipe for command: %s\n", err)
return ;
}
outputBuf1.WriteTo(stdin2) //将缓冲读取器里的输出管道数据写入输入管道里
var out, stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
if err := cmd.Start(); err != nil {
fmt.Printf("Error: The command can not be startup: %s\n", err)
return
}
err = stdin2.Close() //关闭cmd的输入管道
if err := cmd.Wait(); err != nil { //为了获取cmd的所有输出内容,调用Wait()方法一直阻塞到其所属所有命令执行完
fmt.Printf("Error: Can not wait for the command: %s\n", err)
return
}
fmt.Printf("%s\n", out.Bytes()) //输出执行结果
}
type Operate struct {
Operate string `json:"operate"`
Param int `json:"param"`
Id string `json:"id"`
Timestamp int64 `json:"timestamp"`
File simplejson.Json `json:"file"`
Other string `json:"other"`
}
type BeatJson struct {
Name string `json:"name"`
JsonFile string `json:"jsonFile"`
ModulesJsonFile string `json:"modulesJsonFile"`
}
func DoServerStuff(conn net.Conn) {
remote := conn.RemoteAddr().String()
fmt.Println(remote, " connected!")
//for {
// 1024 是数组的长度并且也是切片的初始长度,可增加。
buf := make([]byte, 5120)
//一定要等到有error或EOF的时候才会返回结果,因此只能等到客户端退出时才会返回结果。 因此不用此方法
//buf,err :=ioutil.ReadAll(conn)
size, err := conn.Read(buf)
if err != nil {
fmt.Println("Read Error:", err.Error());
return
}
fmt.Println("data from client:",string(buf),"size:",size)
var operate Operate
err = json.Unmarshal(buf[:size], &operate)
if err != nil {
fmt.Println("Unmarshal Error:", err.Error());
return
}
fmt.Println("Operate after Unmarshal:", operate)
var operateReturn Operate;
operateReturn.Timestamp =time.Now().Unix()
operateReturn.Operate = "success"
fmt.Println(operate.Operate)
if(operate.Operate=="start"){
fmt.Println("get in")
cmd := exec.Command("ls", "-l")
err = cmd.Run()
if err != nil {
fmt.Printf("Error %v executing command!", err)
os.Exit(1)
}
fmt.Printf("The command is %v", cmd)
}else if(operate.Operate=="stop"){
// 并且停止所有收集器 并停止运行beatwatcher
fmt.Println("get the operate stop")
operateReturn.Timestamp =time.Now().Unix()
buf, err = json.Marshal(operateReturn)
if err != nil {
fmt.Println("Marshal Error:", err.Error());
return
}
for i :=range CollectionStatusSlice{
stopPid := CollectionStatusSlice[i].Pid
if CollectionStatusSlice[i].Status=="on" {
cmd := exec.Command("kill","-9",strconv.Itoa(stopPid))
err:=cmd.Run();
if err != nil {
fmt.Println("kill Pid failed Error:", err.Error());
return
}
}
}
conn.Write(buf)
conn.Close()
os.Exit(1);
}else if(operate.Operate=="metricbeat"){
configName,err:=operate.File.Get("name").String();
if err != nil {
fmt.Println("MarshalJSON Error:", err.Error());
return
}
metricbeatYml:=operate.Operate+"_"+configName+".yml"
metricbeatModulesYml:=operate.Operate+"_"+configName+"Modules.yml"
operate.File.Get("jsonFile").Get("metricbeat.config.modules").Set("path","${path.config}/modules.d"+"/"+metricbeatModulesYml)
jsonFilebuf,err :=operate.File.Get("jsonFile").MarshalJSON()
if err != nil {
fmt.Println("MarshalJSON Error:\n", err.Error());
return
}
modulesJsonFilebuf,err :=operate.File.Get("modulesJsonFile").MarshalJSON()
if err != nil {
fmt.Println("MarshalJSON Error:\n", err.Error());
return
}
ymlfile, err :=yaml.JSONToYAML(jsonFilebuf)
if err != nil {
fmt.Println("JSONToYAML Error:", err.Error());
return
}
ModulesYmlFile, err :=yaml.JSONToYAML(modulesJsonFilebuf)
if err != nil {
fmt.Println("JSONToYAML Error:", err.Error());
return
}
// WriteFile 向文件 filename 中写入数据 data
// 如果文件不存在,则以 perm 权限创建该文件
// 如果文件存在,则先清空文件,然后再写入
ioutil.WriteFile(conf.Config.MetricbeatFolder+"/"+metricbeatYml,ymlfile,os.ModeAppend)
ioutil.WriteFile(conf.Config.MetricbeatFolder+"/modules.d"+"/"+metricbeatModulesYml,ModulesYmlFile,os.ModeAppend)
if err != nil {
fmt.Println("WriteFile Error:", err.Error());
return
}
// 此处启动 待续..
// ./metricbeat-6.5.4-linux-x86_64/metricbeat -e -c ./metricbeat-6.5.4-linux-x86_64/metricbeat_new_Collection.yml
launchcmd := exec.Command("./"+conf.Config.MetricbeatFolder+"/"+conf.Config.Metricbeat, "-c","./"+conf.Config.MetricbeatFolder+"/"+metricbeatYml)
fmt.Println("launch metricbeat cmd ")
fmt.Println( "./"+conf.Config.MetricbeatFolder+"/"+conf.Config.Metricbeat, "-c","./"+conf.Config.MetricbeatFolder+"/"+metricbeatYml)
err =launchcmd.Start()
go func() {
err = launchcmd.Wait()
fmt.Println(err)
}()
if err != nil {
fmt.Println("launchcmd start Error:", err.Error());
return
}
var cStatus collectionStatus;
cStatus.Pid = launchcmd.Process.Pid
cStatus.Status = "on"
cStatus.Configname = configName
cStatus.Agentuuid = conf.Uuid
CollectionStatusSlice = append(CollectionStatusSlice,cStatus)
}else if(operate.Operate=="filebeat"){
//启动
configName,err:=operate.File.Get("name").String();
if err != nil {
fmt.Println("MarshalJSON Error:", err.Error());
return
}
filebeatYml:=operate.Operate+"_"+configName+".yml"
filebeatModulesYml:=operate.Operate+"_"+configName+"Modules.yml"
operate.File.Get("jsonFile").Get("filebeat.config.modules").Set("path","${path.config}/modules.d"+"/"+filebeatModulesYml)
jsonFilebuf,err :=operate.File.Get("jsonFile").MarshalJSON()
if err != nil {
fmt.Println("MarshalJSON Error:\n", err.Error());
return
}
modulesJsonFilebuf,err :=operate.File.Get("modulesJsonFile").MarshalJSON()
if err != nil {
fmt.Println("MarshalJSON Error:\n", err.Error());
return
}
ymlfile, err :=yaml.JSONToYAML(jsonFilebuf)
if err != nil {
fmt.Println("JSONToYAML Error:", err.Error());
return
}
ModulesYmlFile, err :=yaml.JSONToYAML(modulesJsonFilebuf)
if err != nil {
fmt.Println("JSONToYAML Error:", err.Error());
return
}
// WriteFile 向文件 filename 中写入数据 data
// 如果文件不存在,则以 perm 权限创建该文件
// 如果文件存在,则先清空文件,然后再写入
ioutil.WriteFile(conf.Config.FilebeatFolder+"/"+filebeatYml,ymlfile,os.ModeAppend)
ioutil.WriteFile(conf.Config.FilebeatFolder+"/modules.d"+"/"+filebeatModulesYml,ModulesYmlFile,os.ModeAppend)
if err != nil {
fmt.Println("WriteFile Error:", err.Error());
return
}
// 此处启动 待续..
// ./filebeat-6.5.4-linux-x86_64/filebeat -e -c ./filebeat-6.5.4-linux-x86_64/filebeat_new_Collection.yml
launchcmd := exec.Command("./"+conf.Config.FilebeatFolder+"/"+conf.Config.Filebeat, "-c","./"+conf.Config.FilebeatFolder+"/"+filebeatYml)
fmt.Println("launch filebeat cmd ")
fmt.Println( "./"+conf.Config.FilebeatFolder+"/"+conf.Config.Filebeat, "-c","./"+conf.Config.FilebeatFolder+"/"+filebeatYml)
err =launchcmd.Start()
go func() {
err = launchcmd.Wait()
fmt.Println(err)
}()
if err != nil {
fmt.Println("launchcmd start Error:", err.Error());
return
}
var cStatus collectionStatus;
cStatus.Pid = launchcmd.Process.Pid
cStatus.Status = "on"
cStatus.Configname = configName
cStatus.Agentuuid = conf.Uuid
CollectionStatusSlice = append(CollectionStatusSlice,cStatus)
}else if(operate.Operate=="metricbeat_stop"){
//停止
stopPid := operate.Param
for i :=range CollectionStatusSlice{
if CollectionStatusSlice[i].Pid==stopPid {
if CollectionStatusSlice[i].Status=="on" {
cmd := exec.Command("kill","-9",strconv.Itoa(stopPid))
err:=cmd.Run();
if err != nil {
fmt.Println("kill Pid failed Error:", err.Error());
return
}
}
}
}
}else if(operate.Operate=="filebeat_stop"){
//停止
stopPid := operate.Param
for i :=range CollectionStatusSlice{
if CollectionStatusSlice[i].Pid==stopPid {
if CollectionStatusSlice[i].Status=="on" {
cmd := exec.Command("kill","-9",strconv.Itoa(stopPid))
err:=cmd.Run();
if err != nil {
fmt.Println("kill Pid failed Error:", err.Error());
return
}
}
}
}
}else if(operate.Operate=="readfile_test"){
// 读取文件
// operate.Param 为读取行数,operate.Other为文件路径
lines := operate.Param
file, err := os.Open(operate.Other)
if err != nil {
log.Fatal(err)
}
defer file.Close()
reader := bufio.NewReader(file)
var buffer bytes.Buffer
i := 0;
for {
l, isPrefix, err := reader.ReadLine()
buffer.Write(l)
// If we've reached the end of the line, add "\n"
if !isPrefix {
buffer.Write([]byte("\n"))
}
if i>=lines{
break
}
i = i+1
if buffer.Len()> 32*1024{
break
}
// If we're just at the EOF, break
if err != nil {
break
}
}
operateReturn.Other = buffer.String()
}
operateReturn.Timestamp =time.Now().Unix()
buf, err = json.Marshal(operateReturn)
if err != nil {
fmt.Println("Marshal Error:", err.Error());
return
}
conn.Write(buf)
conn.Close()
//break
//}
}
/**
* 判断文件是否存在 存在返回 true 不存在返回false
*/
func checkFileIsExist(filename string) (bool) {
var exist = true;
if _, err := os.Stat(filename); os.IsNotExist(err) {
exist = false;
}
return exist;
}
func updateCollectionStatus(status *collectionStatus){
cmd := exec.Command("cat","/proc/"+strconv.Itoa(status.Pid)+"/cmdline")
var out bytes.Buffer
cmd.Stdout = &out
err:=cmd.Run()
if err != nil {
fmt.Printf("Error: execute cmd "+"cat","/proc/"+strconv.Itoa(status.Pid)+"/cmdline "+":\n %s", err)
status.Status = "off"
return
}
if strings.Contains(out.String(),status.Configname){
status.Status = "on"
}else {
// 说明该pid虽然存在,但是不是之前启动的pid
status.Status = "off"
}
} |
package main
import (
"io"
"log"
"net"
"fmt"
"os"
)
func connectTwoConn(conn1 net.Conn,conn2 net.Conn){
if conn1==nil || conn2==nil{return }
go func() {
io.Copy(conn1,conn2)
}()
go func() {
io.Copy(conn2,conn1)
}()
}
func server(portStr string) net.Listener{
listener, err := net.Listen("tcp",fmt.Sprintf("0.0.0.0:%s",portStr))
if err != nil {
log.Fatalf("Listen failed: %v", err)
}
return listener
}
func listen(listener net.Listener,connChan chan net.Conn){
for {
conn, err := listener.Accept()
if err != nil {
log.Fatalf("ERROR: failed to accept listener: %v", err)
}
log.Printf("Accepted connection %v\n", conn)
connChan <- conn
}
}
func main() {
if len(os.Args) != 3 {
log.Fatalf("Usage %s server1:port server2:port\n", os.Args[0]);
return
}
var s1ConnChan=make(chan net.Conn)
var s2ConnChan=make(chan net.Conn)
var s1Conn net.Conn
var s2Conn net.Conn
var listener1= server(os.Args[1])
var listener2= server(os.Args[2])
go listen(listener1,s1ConnChan)
go listen(listener2,s2ConnChan)
for {
select {
case conn := <- s1ConnChan:
if s1Conn!=nil{
s1Conn.Close()
}
s1Conn=conn
connectTwoConn(s1Conn,s2Conn)
case conn :=<-s2ConnChan:
if s2Conn!=nil{
s2Conn.Close()
}
s2Conn=conn
connectTwoConn(s2Conn,s1Conn)
}
}
}
|
////////////////////////////////////////////////////////////////////////////////
// //
// Copyright 2019 Dell, Inc. //
// //
// Licensed under the Apache License, Version 2.0 (the "License"); //
// you may not use this file except in compliance with the License. //
// You may obtain a copy of the License at //
// //
// http://www.apache.org/licenses/LICENSE-2.0 //
// //
// Unless required by applicable law or agreed to in writing, software //
// distributed under the License is distributed on an "AS IS" BASIS, //
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //
// See the License for the specific language governing permissions and //
// limitations under the License. //
// //
////////////////////////////////////////////////////////////////////////////////
package transformer
import (
"encoding/json"
"github.com/Azure/sonic-mgmt-common/translib/tlerr"
"github.com/Azure/sonic-mgmt-common/translib/db"
"github.com/golang/glog"
"regexp"
)
func init() {
XlateFuncBind("rpc_showtech_cb", rpc_showtech_cb)
}
var rpc_showtech_cb RpcCallpoint = func(body []byte, dbs [db.MaxDB]*db.DB) ([]byte, error) {
var err error
var matched bool
var output string
var operand struct {
Input struct {
Date string `json:"date"`
} `json:"sonic-show-techsupport:input"`
}
err = json.Unmarshal(body, &operand)
if err != nil {
glog.Errorf("%Error: Failed to parse rpc input; err=%v", err)
return nil,tlerr.InvalidArgs("Invalid rpc input")
}
if operand.Input.Date == "" {
matched = true
} else {
matched, _ = regexp.MatchString((`\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?` +
`(Z|[\+\-]\d{2}:\d{2})`), operand.Input.Date)
if err != nil {
glog.Errorf("%Error: Failed to match regex pattern for parsesd rpc input; err=%v", err)
}
}
var showtech struct {
Output struct {
Status string `json:"output-status"`
Filename string `json:"output-filename"`
} `json:"sonic-show-techsupport:output"`
}
if !(matched) {
showtech.Output.Status = "Invalid input: Incorrect DateTime format"
showtech.Output.Filename = ""
result, _ := json.Marshal(&showtech)
return result, nil
}
host_output := HostQuery("showtech.info", operand.Input.Date)
if host_output.Err != nil {
glog.Errorf("%Error: Showtech host Query failed: err=%v", host_output.Err)
glog.Flush()
showtech.Output.Status = host_output.Err.Error()
showtech.Output.Filename = ""
result, _ := json.Marshal(&showtech)
return result, nil
}
output, _ = host_output.Body[1].(string)
matched, _ = regexp.MatchString(`\/var\/.*dump.*\.gz`, output)
if err != nil {
glog.Errorf("%Error: Failed to find a filename in rpc output: %v", output)
showtech.Output.Status = output
showtech.Output.Filename = ""
result, _ := json.Marshal(&showtech)
return result, nil
}
showtech.Output.Status = "Success"
showtech.Output.Filename = output
result, _ := json.Marshal(&showtech)
return result, nil
}
|
package integration_test
import (
"github.com/cloudfoundry/libbuildpack/cutlass"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("deploy a staticfile app", func() {
var app *cutlass.App
var app_name string
AfterEach(func() {
if app != nil {
app.Destroy()
}
app = nil
app_name = ""
})
JustBeforeEach(func() {
Expect(app_name).ToNot(BeEmpty())
app = cutlass.New(Fixtures(app_name))
PushAppAndConfirm(app)
})
Context("ssi is toggled on", func() {
BeforeEach(func() { app_name = "ssi_enabled" })
It("", func() {
body, err := app.GetBody("/")
Expect(err).To(BeNil())
Expect(body).To(ContainSubstring("I feel included!"))
Expect(body).ToNot(ContainSubstring("<!--# include file=\"ssi_body.html\" -->"))
})
})
Context("ssi is toggled off", func() {
BeforeEach(func() { app_name = "ssi_disabled" })
It("", func() {
body, err := app.GetBody("/")
Expect(err).To(BeNil())
Expect(body).ToNot(ContainSubstring("I feel included!"))
Expect(body).To(ContainSubstring("<!--# include file=\"ssi_body.html\" -->"))
})
})
})
|
package main
import (
"log"
"net/http"
"github.com/doniacld/outdoorsight/internal/routers"
)
const (
asciiOutdoorsight = " ____ __ __ _____ __ __ \n / __ \\__ __/ /____/ /__ ___ ____/ __(_)__ _/ / / /_\n/ /_/ / // / __/ _ / _ \\/ _ \\/ __/\\ \\/ / _ `/ _ \\/ __/\n\\____/\\_,_/\\__/\\_,_/\\___/\\___/_/ /___/_/\\_, /_//_/\\__/ \n /___/ "
)
func main() {
log.Print(asciiOutdoorsight)
router := routers.NewRouter()
log.Fatal(http.ListenAndServe(":8080", router))
}
|
package tumblr
// Post is tumblr post struct
type Post struct {
BlogName string `json:"blog_name"`
ID int64 `json:"id"`
PostURL string `json:"post_url"`
Slug string `json:"slug"`
Type string `json:"type"`
Timestamp int64 `json:"timestamp"`
Date string `json:"date"`
Format string `json:"format"`
ReblogKey string `json:"reblog_key"`
Tags []string `json:"tags"`
//Bookmarklet bool `json:"bookmarklet"`
//Mobile bool `json:""`
SourceURL string `json:"source_url"`
SourceTitle string `json:"source_title"`
Liked bool `json:"liked"`
Followed bool `json:"followed"`
NoteCount int `json:"note_count"`
Caption string `json:"caption"`
ImagePermalink string `json:"image_permalink"`
LinkUrl string `json:"link_url"`
// Post
Title string `json:"title"`
Body string `json:"body"`
// Photo
Photos []Photo `json:"photos"`
// Quote
Text string `json:"text"`
Source string `json:"source"`
}
// Photo is post's photo struct
type Photo struct {
Caption string `json:"caption"`
AltSizes []AltSize `json:"alt_sizes"`
}
// AltSize is photo's alt_sizez struct
type AltSize struct {
Width int `json:""`
Height int `json:""`
URL string `json:""`
}
|
package polymorphism
func process(iduck Iduck) {
iduck.Quack()
}
|
package main
import (
"context"
kafka "github.com/segmentio/kafka-go"
)
/*
topic and broker addresses
assuming the below brokers are already configured and the topic is created
if you need more info on how to run these check this blog
https://www.sohamkamani.com/blog/2017/11/22/how-to-install-and-run-kafka/
*/
const (
topic = "my-kafka-topic"
broker1 = "localhost:9092"
broker2 = "localhost:9093"
broker3 = "localhost:9094"
)
// returns messages to be sent to kafka
func getMessages() []string {
return []string{"First test message!", "Second test message!", "Third test message!", "Fourth test message!"}
}
// produce messages to kafka
func produce(ctx context.Context) error{
// creating a new writer
// the topic can be defined on this level as well
k := &kafka.Writer{
Addr: kafka.TCP(broker1, broker2, broker3),
Balancer: &kafka.LeastBytes{}, // Other types implementing Balancer interface could be used
}
var kms []kafka.Message
for _, m := range getMessages(){
km := kafka.Message{
Topic: topic, // This can be defined on writer level instead
Value: []byte(m),
}
kms = append(kms, km)
}
return k.WriteMessages(ctx, kms...)
} |
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
"time"
)
// This test sets globalMaxConn to 1 and starts 6 connections in
// parallel on a server with the rate limit handler configured. This
// should allow one request to execute at a time, and at most 4 to
// wait to execute and the 6th request should get a 429 status code
// error.
func TestRateLimitHandler(t *testing.T) {
// save the global Max connections
saveGlobalMaxConn := globalMaxConn
globalMaxConn = 1
testHandler := func(w http.ResponseWriter, r *http.Request) {
time.Sleep(100 * time.Millisecond)
fmt.Fprintln(w, "Hello client!")
}
rlh := setRateLimitHandler(http.HandlerFunc(testHandler))
ts := httptest.NewServer(rlh)
respCh := make(chan int)
startTime := time.Now()
for i := 0; i < 6; i++ {
go func(ch chan<- int) {
resp, err := http.Get(ts.URL)
if err != nil {
t.Errorf(
"Got error requesting test server - %v\n",
err,
)
}
respCh <- resp.StatusCode
}(respCh)
}
tooManyReqErrCount := 0
for i := 0; i < 6; i++ {
code := <-respCh
if code == 429 {
tooManyReqErrCount++
} else if code != 200 {
t.Errorf("Got non-200 resp code - %d\n", code)
}
}
duration := time.Since(startTime)
if duration < time.Duration(500*time.Millisecond) {
// as globalMaxConn is 1, only 1 request will execute
// at a time, and the five allowed requested will take
// at least 500 ms.
t.Errorf("Expected all requests to take at least 500ms, but it was done in %v\n",
duration)
}
if tooManyReqErrCount != 1 {
t.Errorf("Expected to get 1 error, but got %d",
tooManyReqErrCount)
}
ts.Close()
// restore the global Max connections
globalMaxConn = saveGlobalMaxConn
}
|
package models
import (
"github.com/astaxie/beego/logs"
"math"
"strconv"
"strings"
)
//TemplateGet func
func TemplateGet(page, limit, templates string) ([]Template, int64, error) {
par := []string{"host", "name", "templateid"}
hostspar := []string{"host", "name", "hostid"}
rep, err := API.Call("template.get", Params{"output": par,
"selectApplications": "count", "selectItems": "count",
"selectTriggers": "count", "selectGraphs": "count",
"selectDiscoveries": "count", "selectScreens": "count",
"selectHosts": hostspar})
if err != nil {
logs.Error(err)
return []Template{}, 0, err
}
hba, err := json.Marshal(rep.Result)
if err != nil {
logs.Error(err)
return []Template{}, 0, err
}
var hb []Template
err = json.Unmarshal(hba, &hb)
if err != nil {
logs.Error(err)
return []Template{}, 0, err
}
var dt []Template
var d Template
if templates != "" {
for _, v := range hb {
if strings.Contains(v.Name, templates) {
d.Host = v.Host
d.Templateid = v.Templateid
d.Name = v.Name
d.Hosts = v.Hosts
d.Applications = v.Applications
d.Triggers = v.Triggers
d.Items = v.Items
d.Graphs = v.Graphs
d.Screens = v.Screens
d.Discoveries = v.Discoveries
dt = append(dt, d)
}
}
} else {
for _, v := range hb {
d.Host = v.Host
d.Templateid = v.Templateid
d.Name = v.Name
d.Hosts = v.Hosts
d.Applications = v.Applications
d.Triggers = v.Triggers
d.Items = v.Items
d.Graphs = v.Graphs
d.Screens = v.Screens
d.Discoveries = v.Discoveries
dt = append(dt, d)
}
}
IntPage, err := strconv.Atoi(page)
if err != nil {
IntPage = 1
}
IntLimit, err := strconv.Atoi(limit)
if err != nil {
IntLimit = 10
}
//如果dt为空直接返回
if len(dt) == 0 {
return dt, int64(len(dt)), err
}
//分页
nums := len(dt)
//page总数
totalpages := int(math.Ceil(float64(nums) / float64(IntLimit)))
if IntPage >= totalpages {
IntPage = totalpages
}
if IntPage <= 0 {
IntPage = 1
}
//结束页数据
var end int
//begin 开始页数据
begin := (IntPage - 1) * IntLimit
if IntPage == totalpages {
end = nums
}
if IntPage < totalpages {
end = IntPage * IntLimit
} else {
end = nums
}
//根据开始和结束返回数据列表
var newtemplates []Template
for i := begin; i < end; i++ {
newtemplates = append(newtemplates, dt[i])
}
return newtemplates, int64(len(dt)), err
}
//TemplateAllGet func
func TemplateAllGet() ([]Template, int64, error) {
par := []string{"host", "name", "templateid"}
hostspar := []string{"host", "name", "hostid"}
rep, err := API.Call("template.get", Params{"output": par,
"selectApplications": "count", "selectItems": "count",
"selectTriggers": "count", "selectGraphs": "count",
"selectDiscoveries": "count", "selectScreens": "count",
"selectHosts": hostspar})
if err != nil {
return []Template{}, 0, err
}
hba, err := json.Marshal(rep.Result)
if err != nil {
return []Template{}, 0, err
}
var hb []Template
err = json.Unmarshal(hba, &hb)
if err != nil {
return []Template{}, 0, err
}
return hb, int64(len(hb)), nil
}
|
package datatype
import (
"database/sql"
"database/sql/driver"
"errors"
"server/libs/log"
)
var (
objects = make(map[string]func() Entity)
ErrRowError = errors.New("row index out of range")
ErrColError = errors.New("col index out of range")
ErrTypeMismatch = errors.New("val type mismatch")
ErrColTypeError = errors.New("column type error")
ErrPropertyNotFound = errors.New("property not found")
ErrSqlRowError = errors.New("sql query not found")
ErrSqlUpdateError = errors.New("update id not found")
ErrContainerFull = errors.New("container is full")
ErrContainerIndexHasChild = errors.New("container index not empty")
ErrContainerIndexOutOfRange = errors.New("container index out of range")
ErrContainerNotInit = errors.New("container not init")
ErrContainerCapacity = errors.New("capacity illegal")
ErrChildObjectNotFound = errors.New("child obj not found")
ErrCopyObjError = errors.New("type not equal")
ErrExtraDataError = errors.New("extra data not found")
)
type ExecQueryer interface {
Exec(query string, args ...interface{}) (result driver.Result, err error)
Query(query string, args ...interface{}) (row *sql.Rows, err error)
GetDB() *sql.DB
}
type DBSaveLoader interface {
Base() string
Update(eq ExecQueryer, dbId uint64, extfields string, extplacehold string, extobjs ...interface{}) error
Insert(eq ExecQueryer, dbId uint64, extfields string, extplacehold string, extobjs ...interface{}) error
Load(eq ExecQueryer, dbId uint64, extfield string, extobjs ...interface{}) error
Marshal() (map[string]interface{}, error)
Unmarshal(data map[string]interface{}) error
}
type Record interface {
//获取表格名
Name() string
//获取表最大行数
Caps() int
//获取表格列数
ColCount() int
//获取表格行数
RowCount() int
//获取列类型
ColTypes() ([]int, []string)
//脏标志
IsDirty() bool
//清理脏标志
ClearDirty()
//是否保存
IsSave() bool
//是否可视
IsVisible() bool
//设置单元格值
Set(row, col int, val interface{}) error
//获取单元格值
Get(row, col int) (val interface{}, err error)
//设置一行的值
SetRow(row int, args ...interface{}) error
//通过rowdata设置一行的值
SetRowByBytes(row int, rowdata []byte) error
//通过行类型进行设置一行的值
SetRowInterface(row int, rowvalue interface{}) error
//获取一行
FindRowInterface(row int) (rowvalue interface{}, err error)
//增加一行数据,row插入的位置,-1表示插入在最后
Add(row int, args ...interface{}) int
//增加一行数据,row插入的位置,-1表示插入在最后
AddByBytes(row int, rowdata []byte) int
//增加一行
AddRow(row int) int
//删除一行
Del(row int)
//清除表格内容
Clear()
//表格监视
SetMonitor(s TableMonitor)
Monitor() TableMonitor
//序列号表格
Serial() ([]byte, error)
//序列号一行
SerialRow(row int) ([]byte, error)
}
//表格变动监视
type TableMonitor interface {
RecAppend(self Entity, rec Record, row int)
RecDelete(self Entity, rec Record, row int)
RecClear(self Entity, rec Record)
RecModify(self Entity, rec Record, row, col int)
RecSetRow(self Entity, rec Record, row int)
}
//属性更新回调,主要针对客户端可视的属性,用来向客户端同步数据
type PropUpdater interface {
Update(self Entity, index int16, value interface{})
}
//属性变动回调,逻辑层挂勾后的属性回调的接口
type PropChanger interface {
OnPropChange(object Entity, prop string, value interface{})
}
//entity序列化数据
type EntityInfo struct {
Type string
Caps int32
DbId uint64
ObjId ObjectID
Index int
Data []byte
Childs []*EntityInfo
}
type Entity interface {
//唯一标识,也就是mailbox
UID() uint64
SetUID(v uint64)
//是否在base中
SetInBase(v bool)
IsInBase() bool
//是否在场景中
SetInScene(v bool)
IsInScene() bool
//属性同步模块
SetPropUpdate(sync PropUpdater)
PropUpdate() PropUpdater
//属性回调挂钩
SetPropHook(hooker PropChanger)
//设置属性标志(内部使用)
PropFlag(idx int) bool
SetPropFlag(idx int, flag bool)
//设置关键属性(回调标志)
IsCritical(idx int) bool
SetCritical(prop string)
ClearCritical(prop string)
//加载标志
SetLoading(loading bool)
IsLoading() bool
//退出标志
SetQuiting()
IsQuiting() bool
//获取配置编号
Config() string
SetConfig(config string)
ConfigHash() int32
//设置存档标志
SetSaveFlag()
ClearSaveFlag()
//是否需要保存
NeedSave() bool
//获取根对象
Root() Entity
//设置父对象
SetParent(p Entity)
//获取父对象
Parent() Entity
//删除标志
SetDeleted(d bool)
IsDeleted() bool
//设置对象号
SetObjId(id ObjectID)
//获取对象号
ObjectId() ObjectID
//设置名字hash
SetNameHash(v int32)
//判断两个对象的configid是否相等
ConfigIdEqual(id string) bool
NameHash() int32
//判断名字是否相等
NameEqual(name string) bool
//设置容量(-1无限)
SetCapacity(capacity int32, initcap int32)
//修改容量
ChangeCapacity(capacity int32) error
//获取容量
Caps() int32
//获取实际的容量
RealCaps() int32
//子对象数量
ChildCount() int
//获取所有的子对象
AllChilds() []Entity
//获取在父对象中的索引
ChildIndex() int
//设置索引(由引擎自己设置,不要手动设置)
SetIndex(idx int)
//清除所有子对象
ClearChilds()
//增加一个子对象
AddChild(idx int, e Entity) (index int, err error)
//删除一个子对象
RemoveChild(e Entity) error
//通过索引获取一个子对象
GetChild(idx int) Entity
//通过配置ID获取一个子对象
FindChildByConfigId(id string) Entity
//通过配置ID获取第一个子对象
FindFirstChildByConfigId(id string) (int, Entity)
//通过配置ID获取从start开始的下一个子对象
NextChildByConfigId(start int, id string) (int, Entity)
//获取名字获取子对象
FindChildByName(name string) Entity
//通过名字获取第一个子对象
FindFirstChildByName(name string) (int, Entity)
//通过名字获取从start开始的下一下子对象
NextChildByName(start int, name string) (int, Entity)
//交换两个子对象的位置
SwapChild(src int, dest int) error
//设置data
SetExtraData(key string, value interface{})
//获取data
FindExtraData(key string) interface{}
//获取所有data
ExtraDatas() map[string]interface{}
//通过key移除data
RemoveExtraData(key string)
//移除所有data
ClearExtraData()
//对象类型枚举
ObjType() int
//对象类型字符串
ObjTypeName() string
//父类型(暂时未用)
Base() Entity
//数据库ID
DBId() uint64
//设置数据库ID(!!!不要手动设置)
SetDBId(id uint64)
//是否保存
IsSave() bool
SetSave(s bool)
//获取所有属性名
Propertys() []string
//获取所有可视属性名
VisiblePropertys(typ int) []string
//获取所有属性类型
PropertyType(p string) (int, string, error)
//获取属性索引
PropertyIndex(p string) (int, error)
//属性自增
Inc(p string, v interface{}) error
//设置属性(通用接口)
Set(p string, v interface{}) error
//通过属性索引设置
SetByIndex(index int16, v interface{}) error
//通过属性名获取属性不抛出异常(在确定属性存在的情况下使用)
MustGet(p string) interface{}
//通过属性名获取属性
Get(p string) (val interface{}, err error)
//属性是否别人可见(同步到别人的客户端)
PropertyIsPublic(p string) bool
//属性是否自己可见(同步到自己的客户端)
PropertyIsPrivate(p string) bool
//属性是否保存
PropertyIsSave(p string) bool
//获取所有脏数据(保存用)
Dirtys() map[string]interface{}
//清除脏标志
ClearDirty()
//获取所有被修改的属性(同步用)
Modifys() map[string]interface{}
//清除所有修改标志
ClearModify()
//通过表格名获取表格
FindRec(rec string) Record
//获取所有表格的名字
RecordNames() []string
//清空对象所有数据
Reset()
//复制另一个对象数据
Copy(other Entity) error
//DB
SyncToDb()
//获取保存对象的配置ID
GetConfigFromDb(data interface{}) string
//从数据库加载
SyncFromDb(data interface{}) bool
//获取数据库操作接口
SaveLoader() DBSaveLoader
//序列化
Serial() ([]byte, error)
//序列化变动数据
SerialModify() ([]byte, error)
//是否是场景数据(跟随玩家进入场景的数据)
IsSceneData(prop string) bool
//从scenedata同步
SyncFromSceneData(val interface{}) error
//获取scenedata
SceneData() interface{}
}
//注册函数
func Register(name string, createfunc func() Entity) {
if _, dup := objects[name]; dup {
panic("entity: Register called twice for object " + name)
}
log.LogMessage("register entity:", name)
objects[name] = createfunc
}
//创建数据对象
func Create(name string) Entity {
if create, exist := objects[name]; exist {
return create()
}
return nil
}
func GetAllTypes() []string {
typs := make([]string, 0, len(objects))
for k, _ := range objects {
typs = append(typs, k)
}
return typs
}
|
package pg
import (
"github.com/kyleconroy/sqlc/internal/sql/ast"
)
type TableFunc struct {
NsUris *ast.List
NsNames *ast.List
Docexpr ast.Node
Rowexpr ast.Node
Colnames *ast.List
Coltypes *ast.List
Coltypmods *ast.List
Colcollations *ast.List
Colexprs *ast.List
Coldefexprs *ast.List
Notnulls []uint32
Ordinalitycol int
Location int
}
func (n *TableFunc) Pos() int {
return n.Location
}
|
// Copyright © 2020 Attestant Limited.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package immediate
import (
"context"
"encoding/json"
"time"
eth2client "github.com/attestantio/go-eth2-client"
api "github.com/attestantio/go-eth2-client/api/v1"
"github.com/attestantio/go-eth2-client/spec"
"github.com/attestantio/go-eth2-client/spec/altair"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/attestantio/vouch/services/metrics"
"github.com/pkg/errors"
"github.com/rs/zerolog"
zerologger "github.com/rs/zerolog/log"
)
// Service is the submitter for signed items.
type Service struct {
clientMonitor metrics.ClientMonitor
attestationsSubmitter eth2client.AttestationsSubmitter
beaconBlockSubmitter eth2client.BeaconBlockSubmitter
beaconCommitteeSubscriptionsSubmitter eth2client.BeaconCommitteeSubscriptionsSubmitter
aggregateAttestationsSubmitter eth2client.AggregateAttestationsSubmitter
syncCommitteeMessagesSubmitter eth2client.SyncCommitteeMessagesSubmitter
syncCommitteeSubscriptionsSubmitter eth2client.SyncCommitteeSubscriptionsSubmitter
syncCommitteeContributionsSubmitter eth2client.SyncCommitteeContributionsSubmitter
}
// module-wide log.
var log zerolog.Logger
// New creates a new submitter.
func New(ctx context.Context, params ...Parameter) (*Service, error) {
parameters, err := parseAndCheckParameters(params...)
if err != nil {
return nil, errors.Wrap(err, "problem with parameters")
}
// Set logging.
log = zerologger.With().Str("service", "submitter").Str("impl", "immediate").Logger()
if parameters.logLevel != log.GetLevel() {
log = log.Level(parameters.logLevel)
}
s := &Service{
clientMonitor: parameters.clientMonitor,
attestationsSubmitter: parameters.attestationsSubmitter,
beaconBlockSubmitter: parameters.beaconBlockSubmitter,
beaconCommitteeSubscriptionsSubmitter: parameters.beaconCommitteeSubscriptionsSubmitter,
aggregateAttestationsSubmitter: parameters.aggregateAttestationsSubmitter,
syncCommitteeMessagesSubmitter: parameters.syncCommitteeMessagesSubmitter,
syncCommitteeSubscriptionsSubmitter: parameters.syncCommitteeSubscriptionsSubmitter,
syncCommitteeContributionsSubmitter: parameters.syncCommitteeContributionsSubmitter,
}
return s, nil
}
// SubmitBeaconBlock submits a block.
func (s *Service) SubmitBeaconBlock(ctx context.Context, block *spec.VersionedSignedBeaconBlock) error {
if block == nil {
return errors.New("no beacon block supplied")
}
started := time.Now()
err := s.beaconBlockSubmitter.SubmitBeaconBlock(ctx, block)
if service, isService := s.beaconBlockSubmitter.(eth2client.Service); isService {
s.clientMonitor.ClientOperation(service.Address(), "submit beacon block", err == nil, time.Since(started))
} else {
s.clientMonitor.ClientOperation("<unknown>", "submit beacon block", err == nil, time.Since(started))
}
if err != nil {
return errors.Wrap(err, "failed to submit beacon block")
}
if e := log.Trace(); e.Enabled() {
data, err := json.Marshal(block)
if err == nil {
e.Str("block", string(data)).Msg("Submitted beacon block")
}
}
return nil
}
// SubmitAttestations submits multiple attestations.
func (s *Service) SubmitAttestations(ctx context.Context, attestations []*phase0.Attestation) error {
if len(attestations) == 0 {
return errors.New("no attestations supplied")
}
started := time.Now()
err := s.attestationsSubmitter.SubmitAttestations(ctx, attestations)
if service, isService := s.attestationsSubmitter.(eth2client.Service); isService {
s.clientMonitor.ClientOperation(service.Address(), "submit attestations", err == nil, time.Since(started))
} else {
s.clientMonitor.ClientOperation("<unknown>", "submit attestations", err == nil, time.Since(started))
}
if err != nil {
return errors.Wrap(err, "failed to submit attestations")
}
if e := log.Trace(); e.Enabled() {
data, err := json.Marshal(attestations)
if err == nil {
e.Str("attestations", string(data)).Msg("Submitted attestations")
}
}
return nil
}
// SubmitBeaconCommitteeSubscriptions submits a batch of beacon committee subscriptions.
func (s *Service) SubmitBeaconCommitteeSubscriptions(ctx context.Context, subscriptions []*api.BeaconCommitteeSubscription) error {
if len(subscriptions) == 0 {
return errors.New("no beacon committee subscriptions supplied")
}
subs := make([]*api.BeaconCommitteeSubscription, len(subscriptions))
for i, subscription := range subscriptions {
subs[i] = &api.BeaconCommitteeSubscription{
Slot: subscription.Slot,
CommitteeIndex: subscription.CommitteeIndex,
CommitteesAtSlot: subscription.CommitteesAtSlot,
IsAggregator: subscription.IsAggregator,
}
}
started := time.Now()
err := s.beaconCommitteeSubscriptionsSubmitter.SubmitBeaconCommitteeSubscriptions(ctx, subs)
if service, isService := s.beaconCommitteeSubscriptionsSubmitter.(eth2client.Service); isService {
s.clientMonitor.ClientOperation(service.Address(), "submit beacon committee subscription", err == nil, time.Since(started))
} else {
s.clientMonitor.ClientOperation("<unknown>", "submit beacon committee subscription", err == nil, time.Since(started))
}
if err != nil {
return errors.Wrap(err, "failed to submit beacon committee subscriptions")
}
if e := log.Trace(); e.Enabled() {
// Summary counts.
aggregating := 0
for i := range subscriptions {
if subscriptions[i].IsAggregator {
aggregating++
}
}
data, err := json.Marshal(subscriptions)
if err == nil {
e.Str("subscriptions", string(data)).Int("subscribing", len(subscriptions)).Int("aggregating", aggregating).Msg("Submitted subscriptions")
}
}
return nil
}
// SubmitAggregateAttestations submits aggregate attestations.
func (s *Service) SubmitAggregateAttestations(ctx context.Context, aggregates []*phase0.SignedAggregateAndProof) error {
if len(aggregates) == 0 {
return errors.New("no aggregate attestations supplied")
}
started := time.Now()
err := s.aggregateAttestationsSubmitter.SubmitAggregateAttestations(ctx, aggregates)
if service, isService := s.aggregateAttestationsSubmitter.(eth2client.Service); isService {
s.clientMonitor.ClientOperation(service.Address(), "submit aggregate attestation", err == nil, time.Since(started))
} else {
s.clientMonitor.ClientOperation("<unknown>", "submit aggregate attestation", err == nil, time.Since(started))
}
if err != nil {
return errors.Wrap(err, "failed to submit aggregate attestation")
}
if e := log.Trace(); e.Enabled() {
data, err := json.Marshal(aggregates)
if err == nil {
e.Str("attestation", string(data)).Msg("Submitted aggregate attestations")
}
}
return nil
}
// SubmitSyncCommitteeMessages submits sync committee messages.
func (s *Service) SubmitSyncCommitteeMessages(ctx context.Context, messages []*altair.SyncCommitteeMessage) error {
if len(messages) == 0 {
return errors.New("no sync committee messages supplied")
}
started := time.Now()
err := s.syncCommitteeMessagesSubmitter.SubmitSyncCommitteeMessages(ctx, messages)
if service, isService := s.aggregateAttestationsSubmitter.(eth2client.Service); isService {
s.clientMonitor.ClientOperation(service.Address(), "submit sync committee messages", err == nil, time.Since(started))
} else {
s.clientMonitor.ClientOperation("<unknown>", "submit sync committee messages", err == nil, time.Since(started))
}
if err != nil {
return errors.Wrap(err, "failed to submit sync committee messages")
}
if e := log.Trace(); e.Enabled() {
data, err := json.Marshal(messages)
if err == nil {
e.Str("messages", string(data)).Msg("Submitted sync committee messages")
}
}
return nil
}
// SubmitSyncCommitteeSubscriptions submits a batch of beacon committee subscriptions.
func (s *Service) SubmitSyncCommitteeSubscriptions(ctx context.Context, subscriptions []*api.SyncCommitteeSubscription) error {
if len(subscriptions) == 0 {
return errors.New("no sync committee subscriptions supplied")
}
started := time.Now()
err := s.syncCommitteeSubscriptionsSubmitter.SubmitSyncCommitteeSubscriptions(ctx, subscriptions)
if service, isService := s.syncCommitteeSubscriptionsSubmitter.(eth2client.Service); isService {
s.clientMonitor.ClientOperation(service.Address(), "submit sync committee subscription", err == nil, time.Since(started))
} else {
s.clientMonitor.ClientOperation("<unknown>", "submit sync committee subscription", err == nil, time.Since(started))
}
if err != nil {
return errors.Wrap(err, "failed to submit sync committee subscriptions")
}
if e := log.Trace(); e.Enabled() {
data, err := json.Marshal(subscriptions)
if err == nil {
e.Str("subscriptions", string(data)).Int("subscribing", len(subscriptions)).Msg("Submitted subscriptions")
}
}
return nil
}
// SubmitSyncCommitteeContributions submits sync committee contributions.
func (s *Service) SubmitSyncCommitteeContributions(ctx context.Context, contributionAndProofs []*altair.SignedContributionAndProof) error {
if len(contributionAndProofs) == 0 {
return errors.New("no sync committee contribution and proofs supplied")
}
started := time.Now()
err := s.syncCommitteeContributionsSubmitter.SubmitSyncCommitteeContributions(ctx, contributionAndProofs)
if service, isService := s.syncCommitteeContributionsSubmitter.(eth2client.Service); isService {
s.clientMonitor.ClientOperation(service.Address(), "submit sync committee contribution and proofs", err == nil, time.Since(started))
} else {
s.clientMonitor.ClientOperation("<unknown>", "submit sync committee contribution and proofs", err == nil, time.Since(started))
}
if err != nil {
return errors.Wrap(err, "failed to submit sync committee contribution and proofs")
}
if e := log.Trace(); e.Enabled() {
data, err := json.Marshal(contributionAndProofs)
if err == nil {
e.Str("contributionAndProofs", string(data)).Msg("Submitted contribution and proofs")
}
}
return nil
}
|
package activity
import (
"errors"
"fmt"
"testing"
"github.com/aws/aws-sdk-go/service/swf"
. "github.com/sclasen/swfsm/sugar"
)
func TestInterceptors(t *testing.T) {
calledFail := false
calledBefore := false
calledComplete := false
calledCanceled := false
task := &swf.PollForActivityTaskOutput{
ActivityType: &swf.ActivityType{Name: S("test"), Version: S("test")},
ActivityId: S("ID"),
WorkflowExecution: &swf.WorkflowExecution{WorkflowId: S("ID"), RunId: S("run")},
}
interceptor := &FuncInterceptor{
BeforeTaskFn: func(decision *swf.PollForActivityTaskOutput) {
calledBefore = true
},
AfterTaskCompleteFn: func(decision *swf.PollForActivityTaskOutput, result interface{}) {
calledComplete = true
},
AfterTaskFailedFn: func(decision *swf.PollForActivityTaskOutput, err error) {
calledFail = true
},
AfterTaskCanceledFn: func(decision *swf.PollForActivityTaskOutput, details string) {
calledCanceled = true
},
}
worker := &ActivityWorker{
ActivityInterceptor: interceptor,
SWF: &MockSWF{},
}
handler := &ActivityHandler{
Activity: "test",
HandlerFunc: func(activityTask *swf.PollForActivityTaskOutput, input interface{}) (interface{}, error) {
return nil, nil
},
}
worker.AddHandler(handler)
worker.HandleActivityTask(task)
if !calledBefore {
t.Fatal("no before")
}
if !calledComplete {
t.Fatal("no after ok")
}
task.ActivityType.Name = S("nottest")
calledFail = false
calledBefore = false
calledComplete = false
worker.HandleActivityTask(task)
if !calledBefore {
t.Fatal("no before")
}
if !calledFail {
t.Fatal("no after fail")
}
if calledComplete {
t.Fatal("complete should not be called")
}
if calledCanceled {
t.Fatal("cancel should not be called")
}
}
func TestFailedInterceptor(t *testing.T) {
var (
calledFail = false
calledBefore = false
calledComplete = false
calledCanceled = false
failMessage string
)
task := &swf.PollForActivityTaskOutput{
ActivityType: &swf.ActivityType{Name: S("test"), Version: S("test")},
ActivityId: S("ID"),
WorkflowExecution: &swf.WorkflowExecution{WorkflowId: S("ID"), RunId: S("run")},
}
interceptor := &FuncInterceptor{
BeforeTaskFn: func(decision *swf.PollForActivityTaskOutput) {
calledBefore = true
},
AfterTaskCompleteFn: func(decision *swf.PollForActivityTaskOutput, result interface{}) {
calledComplete = true
},
AfterTaskFailedFn: func(decision *swf.PollForActivityTaskOutput, err error) {
calledFail = true
failMessage = err.Error()
},
AfterTaskCanceledFn: func(decision *swf.PollForActivityTaskOutput, details string) {
calledCanceled = true
},
}
worker := &ActivityWorker{
ActivityInterceptor: interceptor,
SWF: &MockSWF{},
}
handler := &ActivityHandler{
Activity: "test",
HandlerFunc: func(activityTask *swf.PollForActivityTaskOutput, input interface{}) (interface{}, error) {
return nil, errors.New("fail")
},
}
worker.AddHandler(handler)
worker.HandleActivityTask(task)
if !calledBefore {
t.Fatal("no before")
}
if !calledFail {
t.Fatal("no after fail")
}
if failMessage != "fail" {
t.Fatal("wrong error message")
}
if calledComplete {
t.Fatal("complete should not be called")
}
if calledCanceled {
t.Fatal("cancel should not be called")
}
}
func TestCanceledInterceptor(t *testing.T) {
var (
calledFail = false
calledBefore = false
calledComplete = false
calledCanceled = false
details string
)
task := &swf.PollForActivityTaskOutput{
ActivityType: &swf.ActivityType{Name: S("test"), Version: S("test")},
ActivityId: S("ID"),
WorkflowExecution: &swf.WorkflowExecution{WorkflowId: S("ID"), RunId: S("run")},
}
interceptor := &FuncInterceptor{
BeforeTaskFn: func(decision *swf.PollForActivityTaskOutput) {
calledBefore = true
},
AfterTaskCompleteFn: func(decision *swf.PollForActivityTaskOutput, result interface{}) {
calledComplete = true
},
AfterTaskFailedFn: func(decision *swf.PollForActivityTaskOutput, err error) {
calledFail = true
},
AfterTaskCanceledFn: func(decision *swf.PollForActivityTaskOutput, det string) {
calledCanceled = true
details = det
},
}
worker := &ActivityWorker{
ActivityInterceptor: interceptor,
SWF: &MockSWF{},
}
handler := &ActivityHandler{
Activity: "test",
HandlerFunc: func(activityTask *swf.PollForActivityTaskOutput, input interface{}) (interface{}, error) {
return nil, ActivityTaskCanceledError{details: "details"}
},
}
worker.AddHandler(handler)
worker.HandleActivityTask(task)
if !calledBefore {
t.Fatal("no before")
}
if !calledCanceled {
t.Fatal("no after canceled")
}
if details != "details" {
t.Fatalf("wong task canceled details. Got: %q", details)
}
if calledFail {
t.Fatal("fail should not be called")
}
if calledComplete {
t.Fatal("complete should not be called")
}
}
func TestComposedInterceptor(t *testing.T) {
calledFirst := false
calledThird := false
c := NewComposedDecisionInterceptor(
&FuncInterceptor{
BeforeTaskFn: func(decision *swf.PollForActivityTaskOutput) {
calledFirst = true
},
AfterTaskFn: func(t *swf.PollForActivityTaskOutput, result interface{}, passedthrough error) (interface{}, error) {
return "overridden", passedthrough
},
},
nil, // shouldn't blow up on nil second,
&FuncInterceptor{
BeforeTaskFn: func(decision *swf.PollForActivityTaskOutput) {
calledThird = true
},
},
)
c.BeforeTask(nil)
if !calledFirst {
t.Fatalf("first not called")
}
if !calledThird {
t.Fatalf("third not called")
}
c.AfterTaskComplete(nil, nil) // shouldn't blow up on non-implemented methods
passthrough := fmt.Errorf("passthrough")
result, err := c.AfterTask(nil, nil, passthrough)
if result != "overridden" {
t.Fatalf("overridden value not returned")
}
if err != passthrough {
t.Fatalf("passed through value not returned")
}
}
|
package controllers
import "server/src/services/interfaces"
type ExerciseController struct {
exerciseService interfaces.ExerciseServiceProvider
}
func NewExerciseController(exerciseService interfaces.ExerciseServiceProvider) *ExerciseController{
return &ExerciseController{exerciseService: exerciseService}
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//420. Strong Password Checker
//A password is considered strong if below conditions are all met:
//It has at least 6 characters and at most 20 characters.
//It must contain at least one lowercase letter, at least one uppercase letter, and at least one digit.
//It must NOT contain three repeating characters in a row ("...aaa..." is weak, but "...aa...a..." is strong, assuming other conditions are met).
//Write a function strongPasswordChecker(s), that takes a string s as input, and return the MINIMUM change required to make s a strong password. If s is already strong, return 0.
//Insertion, deletion or replace of any one character are all considered as one change.
//func strongPasswordChecker(s string) int {
//}
// Time Is Money |
package bitmap
import "testing"
func TestBitma(t *testing.T) {
bitmap := NewBitmap(19)
t.Logf("%+v", bitmap)
bitmap.SetBit(13, 1)
t.Logf("%b", bitmap.data)
t.Log(bitmap.GetBit(1))
}
|
package gin
import (
"context"
"github.com/game-explorer/animal-chess-server/internal/pkg/log"
"github.com/gin-gonic/gin"
"net/http"
"time"
)
type Engine struct {
*gin.Engine
httpServer *http.Server
}
func NewGin(debug bool) *Engine {
if debug {
gin.SetMode(gin.ReleaseMode)
} else {
gin.SetMode(gin.DebugMode)
}
engine := gin.New()
httpServer := &http.Server{
Addr: "",
Handler: engine,
}
return &Engine{
Engine: engine,
httpServer: httpServer,
}
}
func (g *Engine) Listen(ctx context.Context, addr string, ) {
go func() {
<-ctx.Done()
g.GracefulStop()
}()
g.httpServer.Addr = addr
err := g.httpServer.ListenAndServe()
if err != nil && err != http.ErrServerClosed {
log.Panicf("http.ListenAndServe err:%+v", err)
}
return
}
func (g *Engine) GracefulStop() {
ctx, _ := context.WithTimeout(context.Background(), 30*time.Second)
err := g.httpServer.Shutdown(ctx)
if err != nil {
log.Error(err)
}
}
|
package main
import "fmt"
type Stack []int
func (s *Stack) pop() int {
if len(*s) == 0 {
return -1
}
tmp := *s
result := tmp[len(*s)-1]
*s = tmp[:len(*s)-1]
return result
}
func (s *Stack) push(data int) {
*s = append(*s, data)
}
func main() {
stack := Stack{}
stack.push(1)
stack.push(2)
stack.push(3)
fmt.Println(stack)
fmt.Println(stack.pop())
fmt.Println(stack)
}
|
package processor
import (
"github.com/bitmaelum/bitmaelum-suite/internal/account"
"github.com/bitmaelum/bitmaelum-suite/internal/api"
"github.com/bitmaelum/bitmaelum-suite/internal/config"
"github.com/bitmaelum/bitmaelum-suite/internal/container"
"github.com/bitmaelum/bitmaelum-suite/internal/message"
"github.com/bitmaelum/bitmaelum-suite/internal/resolve"
"github.com/bitmaelum/bitmaelum-suite/pkg/address"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"io/ioutil"
"os"
)
// ProcessMessage will process a message found in the processing queue.
// * If it's a local address, it will be moved to the local mailbox
// * If it's a remote address, it will be send to the remote mail server
// * If things fail, it will be moved to the retry queue, where it will be moved to processed queue later
func ProcessMessage(msgID string) {
logrus.Debugf("processing message %s", msgID)
// Set the message in the scoreboard, so we know this message is being processed.
AddToScoreboard(message.SectionProcessing, msgID)
defer func() {
RemoveFromScoreboard(message.SectionProcessing, msgID)
}()
// Check header and get recipient
header, err := message.GetMessageHeader(message.SectionProcessing, msgID)
if err != nil {
// cannot read header.. Let's move to retry queue
logrus.Warnf("cannot find or read header for message %s. Retrying.", msgID)
MoveToRetryQueue(msgID)
return
}
rs := container.GetResolveService()
res, err := rs.Resolve(header.To.Addr)
if err != nil {
logrus.Warnf("cannot resolve address %s for message %s. Retrying.", header.To.Addr, msgID)
MoveToRetryQueue(msgID)
return
}
// Local addresses don't need to be send. They are treated locally
ar := container.GetAccountRepo()
if ar.Exists(header.To.Addr) {
// probably move the message to the incoming queue
// Do stuff locally
logrus.Debugf("Message %s can be transferred locally to %s", msgID, res.Hash)
err := deliverLocal(res, msgID)
if err != nil {
logrus.Warnf("cannot deliver message %s locally to %s. Retrying.", msgID, header.To.Addr)
MoveToRetryQueue(msgID)
}
return
}
// Otherwise, send to outgoing server
logrus.Debugf("Message %s is remote, transferring to %s", msgID, res.Server)
err = deliverRemote(header, res, msgID)
if err != nil {
logrus.Warnf("cannot deliver message %s remotely to %s. Retrying.", msgID, header.To.Addr)
MoveToRetryQueue(msgID)
}
}
// deliverLocal moves a message to a local mailbox. This is an easy process as it only needs to move
// the message to another directory.
func deliverLocal(info *resolve.Info, msgID string) error {
// Deliver mail to local user's inbox
ar := container.GetAccountRepo()
err := ar.SendToBox(address.HashAddress(info.Hash), account.BoxInbox, msgID)
if err != nil {
// Something went wrong.. let's try and move the message back to the retry queue
logrus.Warnf("cannot deliver %s locally. Moving to retry queue", msgID)
MoveToRetryQueue(msgID)
}
return nil
}
// deliverRemote uploads a message to a remote mail server. For this to work it first needs to fetch a
// ticket from that server. Either that ticket is supplied, or we need to do proof-of-work first before
// we get the ticket. Once we have the ticket, we can upload the message to the server in the same way
// we upload a message from a client to a server.
func deliverRemote(header *message.Header, info *resolve.Info, msgID string) error {
client, err := api.NewAnonymous(api.ClientOpts{
Host: info.Server,
AllowInsecure: config.Server.Server.AllowInsecure,
Debug: config.Client.Server.DebugHttp,
})
if err != nil {
return err
}
// Get upload ticket
logrus.Tracef("getting ticket for %s:%s:%s", header.From.Addr, address.HashAddress(info.Hash), "")
t, err := client.GetAnonymousTicket(header.From.Addr, address.HashAddress(info.Hash), "")
if err != nil {
return err
}
if !t.Valid {
logrus.Debugf("ticket %s not valid. Need to do proof of work", t.ID)
// Do proof of work. We have to wait for it. THis is ok as this is just a separate thread.
t.Pow.Work(0)
logrus.Debugf("work for %s is completed", t.ID)
t, err = client.GetAnonymousTicketByProof(t.ID, t.Pow.Proof)
if err != nil || !t.Valid {
logrus.Warnf("Ticket for message %s not valid after proof of work, moving to retry queue", msgID)
MoveToRetryQueue(msgID)
return err
}
}
// parallelize uploads
g := new(errgroup.Group)
g.Go(func() error {
logrus.Tracef("uploading header for ticket %s", t.ID)
return client.UploadHeader(*t, header)
})
g.Go(func() error {
catalogPath, err := message.GetPath(message.SectionProcessing, msgID, "catalog")
if err != nil {
return err
}
catalogData, err := ioutil.ReadFile(catalogPath)
if err != nil {
return err
}
logrus.Tracef("uploading catalog for ticket %s", t.ID)
return client.UploadCatalog(*t, catalogData)
})
messageFiles, err := message.GetFiles(message.SectionProcessing, msgID)
if err != nil {
_ = client.DeleteMessage(*t)
return err
}
for _, messageFile := range messageFiles {
// Store locally, otherwise the anonymous go function doesn't know which "block"
mf := messageFile
g.Go(func() error {
// Open reader
f, err := os.Open(mf.Path)
if err != nil {
return err
}
defer func() {
_ = f.Close()
}()
logrus.Tracef("uploading block %s for ticket %s", mf.ID, t.ID)
return client.UploadBlock(*t, mf.ID, f)
})
}
// Wait until all are completed
if err := g.Wait(); err != nil {
logrus.Debugf("Error while uploading message %s: %s", msgID, err)
_ = client.DeleteMessage(*t)
return err
}
// All done, mark upload as completed
logrus.Tracef("message completed for ticket %s", t.ID)
err = client.CompleteUpload(*t)
if err != nil {
return err
}
// Remove local message from processing queue
return message.RemoveMessage(message.SectionProcessing, msgID)
}
|
package eea
import (
"math/rand"
"reflect"
"github.com/renproject/shamir/poly"
)
// Generate implements the quick.Generator interface.
func (eea Stepper) Generate(rand *rand.Rand, size int) reflect.Value {
size = size / 8
rPrev := poly.Poly{}.Generate(rand, size).Interface().(poly.Poly)
rNext := poly.Poly{}.Generate(rand, size).Interface().(poly.Poly)
sPrev := poly.Poly{}.Generate(rand, size).Interface().(poly.Poly)
sNext := poly.Poly{}.Generate(rand, size).Interface().(poly.Poly)
tPrev := poly.Poly{}.Generate(rand, size).Interface().(poly.Poly)
tNext := poly.Poly{}.Generate(rand, size).Interface().(poly.Poly)
q := poly.Poly{}.Generate(rand, size).Interface().(poly.Poly)
r := poly.Poly{}.Generate(rand, size).Interface().(poly.Poly)
stepper := Stepper{
rPrev: rPrev, rNext: rNext,
sPrev: sPrev, sNext: sNext,
tPrev: tPrev, tNext: tNext,
q: q, r: r,
}
return reflect.ValueOf(stepper)
}
// SizeHint implements the surge.SizeHinter interface.
func (eea Stepper) SizeHint() int {
return eea.rNext.SizeHint() +
eea.rPrev.SizeHint() +
eea.sNext.SizeHint() +
eea.sPrev.SizeHint() +
eea.tNext.SizeHint() +
eea.tPrev.SizeHint() +
eea.q.SizeHint() +
eea.r.SizeHint()
}
// Marshal implements the surge.Marshaler interface.
func (eea Stepper) Marshal(buf []byte, rem int) ([]byte, int, error) {
buf, rem, err := eea.rNext.Marshal(buf, rem)
if err != nil {
return buf, rem, err
}
buf, rem, err = eea.rPrev.Marshal(buf, rem)
if err != nil {
return buf, rem, err
}
buf, rem, err = eea.sNext.Marshal(buf, rem)
if err != nil {
return buf, rem, err
}
buf, rem, err = eea.sPrev.Marshal(buf, rem)
if err != nil {
return buf, rem, err
}
buf, rem, err = eea.tNext.Marshal(buf, rem)
if err != nil {
return buf, rem, err
}
buf, rem, err = eea.tPrev.Marshal(buf, rem)
if err != nil {
return buf, rem, err
}
buf, rem, err = eea.q.Marshal(buf, rem)
if err != nil {
return buf, rem, err
}
return eea.r.Marshal(buf, rem)
}
// Unmarshal implements the surge.Unmarshaler interface.
func (eea *Stepper) Unmarshal(buf []byte, rem int) ([]byte, int, error) {
buf, rem, err := eea.rNext.Unmarshal(buf, rem)
if err != nil {
return buf, rem, err
}
buf, rem, err = eea.rPrev.Unmarshal(buf, rem)
if err != nil {
return buf, rem, err
}
buf, rem, err = eea.sNext.Unmarshal(buf, rem)
if err != nil {
return buf, rem, err
}
buf, rem, err = eea.sPrev.Unmarshal(buf, rem)
if err != nil {
return buf, rem, err
}
buf, rem, err = eea.tNext.Unmarshal(buf, rem)
if err != nil {
return buf, rem, err
}
buf, rem, err = eea.tPrev.Unmarshal(buf, rem)
if err != nil {
return buf, rem, err
}
buf, rem, err = eea.q.Unmarshal(buf, rem)
if err != nil {
return buf, rem, err
}
return eea.r.Unmarshal(buf, rem)
}
|
package main
import (
"fmt"
"strconv"
)
func main() {
//fmt.Println(translateNum(12258))
fmt.Println(translateNum(322))
fmt.Println(translateNum(444))
//fmt.Println(translateNum(25))
}
func translateNum(num int) int {
nums := strconv.Itoa(num)
dp := make([]int, len(nums)+1)
dp[0] = 1
dp[1] = 1
for i := 2; i <= len(nums); i++ {
if nums[i-2] == '1' || (nums[i-2] == '2' && nums[i-1] < '6') {
dp[i] = dp[i-1] + dp[i-2]
} else {
dp[i] = dp[i-1]
}
}
return dp[len(nums)]
}
|
package main
import (
"fmt"
)
func main() {
for number := 1; number <= 20; number++ {
if number%2 == 0 {
fmt.Printf("%d %s\n", number, "is even.")
} else {
fmt.Printf("%d %s\n", number, "is odd.")
}
}
}
|
package api
import (
"fmt"
"io"
"log"
workerpool "pn/pool"
"pn/reader"
"sync"
"time"
)
const (
goRoutines = 1000
)
func Search(id, filepath string) string {
start := time.Now()
jobs := make(chan []string, 1000)
results := make(chan string, 4000)
var wg sync.WaitGroup
pool := workerpool.New(id, goRoutines, &wg, jobs, results)
pool.Start()
r := reader.New(filepath, 1000)
for {
rows, err := r.Read()
if err != nil {
if err != io.EOF {
log.Fatalf("Error reading file: %v", err)
}
break
}
jobs <- rows
}
close(jobs)
found := result(results, &wg)
elapsed := time.Since(start)
fmt.Printf("Elapsed: %s\n", elapsed)
return found
}
func result(results chan string, wg *sync.WaitGroup) string {
done := make(chan bool)
found := ""
go func() {
for result := range results {
if result != "" {
found = result
}
}
done <- true
}()
wg.Wait()
close(results)
<-done
return found
}
|
package main
import (
"database/sql"
"fmt"
_ "github.com/lib/pq"
)
const (
host = "localhost"
port = 5432
user = "postgres"
password = "postgres"
dbname = "postgres"
)
func main() {
psqlInfo := fmt.Sprintf("host=%s port=%d user=%s "+
"password=%s dbname=%s sslmode=disable",
host, port, user, password, dbname)
db, err := sql.Open("postgres", psqlInfo)
if err != nil {
panic(err)
}
defer db.Close()
err = db.Ping()
if err != nil {
panic(err)
}
id := 99
name := "newhector"
fmt.Println("Successfully connected!")
sqlStatement := `
INSERT INTO student (id, name)
VALUES ($1, $2)`
_, err = db.Exec(sqlStatement, id, name)
if err != nil {
panic(err)
}
fmt.Println("this is hector")
}
// fucntion to insert into table
//function to send reminder via email
//fucntion to fetch data from data base
// fucntion to to check if the data of a row in the table is within a certain days away
|
package aoc2015
import (
"fmt"
"strconv"
"strings"
aoc "github.com/janreggie/aoc/internal"
"github.com/pkg/errors"
)
// racingReindeer is a reindeer that participates in the Reindeer Olympics
type racingReindeer struct {
name string
flyingSpeed uint
flyingTime uint
restingTime uint
}
// reindeerOlympics represents the state of a Reindeer Olympics
// at some particular time
type reindeerOlympics struct {
competitors []racingReindeer
points []uint
travelled []uint
time uint
}
// newRacingReindeer generates a racing reindeer by reading from a string that looks like:
// NAME can fly FLYINGSPEED km/s for FLYINGTIME seconds, but then must rest for RESTINGTIME seconds.
func newRacingReindeer(description string) (racingReindeer, error) {
splitDesc := strings.Fields(description)
if len(splitDesc) != 15 {
return racingReindeer{}, fmt.Errorf("could not parse %v", description)
}
var out racingReindeer
out.name = splitDesc[0]
fs, err := strconv.Atoi(splitDesc[3])
if err != nil {
return out, errors.Wrapf(err, "could not parse FLYINGSPEED %v", splitDesc[3])
}
out.flyingSpeed = uint(fs)
ft, err := strconv.Atoi(splitDesc[6])
if err != nil {
return out, errors.Wrapf(err, "could not parse FLYINGTIME: %v", splitDesc[6])
}
out.flyingTime = uint(ft)
rt, err := strconv.Atoi(splitDesc[13])
if err != nil {
return out, errors.Wrapf(err, "could not parse RESTINGTIME: %v", splitDesc[13])
}
out.restingTime = uint(rt)
return out, nil
}
// distance returns the distance that the reindeer would have travelled
// after some amount of time
func (reindeer racingReindeer) distance(time uint) uint {
roundTime := reindeer.flyingTime + reindeer.restingTime
if time > roundTime {
rounds := time / roundTime
remaining := time % roundTime
return rounds*reindeer.flyingSpeed*reindeer.flyingTime + reindeer.distance(remaining)
}
if time > reindeer.flyingTime {
return reindeer.flyingSpeed * reindeer.flyingTime
}
return reindeer.flyingSpeed * time
}
// isFlying returns if the reindeer is flying at a specific instance in time
func (reindeer racingReindeer) isFlying(time uint) bool {
if time == 0 {
return false
}
roundTime := reindeer.flyingTime + reindeer.restingTime
if time > roundTime {
return reindeer.isFlying(time % roundTime)
}
if time > reindeer.flyingTime {
return false
}
return true
}
// newReindeerOlympics creates a reindeer olympics
func newReindeerOlympics(reindeer []racingReindeer) reindeerOlympics {
var out reindeerOlympics
out.competitors = make([]racingReindeer, len(reindeer))
copy(out.competitors, reindeer)
out.points = make([]uint, len(out.competitors))
out.travelled = make([]uint, len(out.competitors))
out.time = 0 // this is already default tho...
return out
}
// reset resets the Reindeer Olympics
func (olympics *reindeerOlympics) reset() {
for ii := range olympics.points {
olympics.points[ii] = 0
olympics.travelled[ii] = 0
}
olympics.time = 0
}
// iterate iterates the Reindeer Olympics by one second
func (olympics *reindeerOlympics) iterate() {
olympics.time++
highestDistance := uint(0)
for ii, reindeer := range olympics.competitors {
if reindeer.isFlying(olympics.time) {
olympics.travelled[ii] += reindeer.flyingSpeed
}
if olympics.travelled[ii] > highestDistance {
highestDistance = olympics.travelled[ii]
}
}
for ii := range olympics.competitors {
if olympics.travelled[ii] == highestDistance {
olympics.points[ii]++
}
}
}
// iterateUntil iterates the Olympics until at some time.
// if olympics.time > time then nothing happens.
func (olympics *reindeerOlympics) iterateUntil(time uint) {
for olympics.time < time {
olympics.iterate()
}
}
// Day14 solves the fourteenth day puzzle "Reindeer Olympics".
//
// Input
//
// A file containing nine lines, each of which
// describes the flying speed, flying time, and resting time for
// a particular reindeer. For example:
//
// Vixen can fly 19 km/s for 7 seconds, but then must rest for 124 seconds.
// Rudolph can fly 3 km/s for 15 seconds, but then must rest for 28 seconds.
// Donner can fly 19 km/s for 9 seconds, but then must rest for 164 seconds.
//
// It is guaranteed that the numbers are non-negative integers,
// each no more than 200.
func Day14(input string) (answer1, answer2 string, err error) {
allReindeer := make([]racingReindeer, 0)
for _, line := range aoc.SplitLines(input) {
rd, e := newRacingReindeer(line)
if e != nil {
err = errors.Wrapf(err, "could not create reindeer %v", line)
return
}
allReindeer = append(allReindeer, rd)
}
olympics := newReindeerOlympics(allReindeer)
olympics.iterateUntil(2503)
var furthestDistance, mostPoints uint
for ii := range olympics.competitors {
if olympics.points[ii] > mostPoints {
mostPoints = olympics.points[ii]
}
if olympics.travelled[ii] > furthestDistance {
furthestDistance = olympics.travelled[ii]
}
}
answer1 = strconv.FormatUint(uint64(furthestDistance), 10)
answer2 = strconv.FormatUint(uint64(mostPoints), 10)
return
}
|
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package internal
import (
"fmt"
"time"
"golang.org/x/net/context"
"golang.org/x/oauth2"
)
type userAuthTokenProvider struct {
oauthTokenProvider
ctx context.Context
config *oauth2.Config
}
// NewUserAuthTokenProvider returns TokenProvider that can perform 3-legged
// OAuth flow involving interaction with a user.
func NewUserAuthTokenProvider(ctx context.Context, clientID, clientSecret string, scopes []string) (TokenProvider, error) {
return &userAuthTokenProvider{
oauthTokenProvider: oauthTokenProvider{
interactive: true,
tokenFlavor: "user",
},
ctx: ctx,
config: &oauth2.Config{
ClientID: clientID,
ClientSecret: clientSecret,
Endpoint: oauth2.Endpoint{
AuthURL: "https://accounts.google.com/o/oauth2/auth",
TokenURL: "https://accounts.google.com/o/oauth2/token",
},
RedirectURL: "urn:ietf:wg:oauth:2.0:oob",
Scopes: scopes,
},
}, nil
}
func (p *userAuthTokenProvider) MintToken() (Token, error) {
// Grab the authorization code by redirecting a user to a consent screen.
url := p.config.AuthCodeURL("", oauth2.AccessTypeOffline, oauth2.ApprovalForce)
fmt.Printf("Visit the URL to get authorization code.\n\n%s\n\n", url)
fmt.Printf("Authorization code: ")
var code string
if _, err := fmt.Scan(&code); err != nil {
return nil, err
}
// Exchange it for a token.
tok, err := p.config.Exchange(p.ctx, code)
if err != nil {
return nil, err
}
return makeToken(tok), nil
}
func (p *userAuthTokenProvider) RefreshToken(tok Token) (Token, error) {
// Clear expiration time to force token refresh. Do not use 0 since it means
// that token never expires.
t := extractOAuthToken(tok)
t.Expiry = time.Unix(1, 0)
src := p.config.TokenSource(p.ctx, &t)
newTok, err := src.Token()
if err != nil {
return nil, err
}
return makeToken(newTok), nil
}
|
package main
import "fmt"
func main() {
var name = "Gello"
runes := []rune(name)
runes[0] = 'H'
//name = string(runes)
fmt.Println(string(runes), name)
}
|
package main
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/satori/go.uuid"
"golang.org/x/net/websocket"
)
// domain is default jupyter kernel gateway listening address
// TODO: port should not be hardcoded
const domain = "localhost:8888"
var (
baseURI = fmt.Sprintf("http://%s", domain)
wsURI = fmt.Sprintf("ws://%s", domain)
currentKernel = kernel{Name: "python"}
kgPID int
)
// msg is jupyter message implementation
// https://jupyter-client.readthedocs.io/en/latest/messaging.html#general-message-format
type msg struct {
Header *header `json:"header"`
ParentHeader *header `json:"parent_header"`
Channel string `json:"channel"`
Content map[string]interface{} `json:"content"`
Metadata map[string]interface{} `json:"metadata"`
Buffers []interface{} `json:"buffers"`
}
type header struct {
Username string `json:"username"`
Version string `json:"version"`
Session string `json:"session"`
MsgID string `json:"msg_id"`
MsgType string `json:"msg_type"`
Date string `json:"date"`
}
// kernel represents jupyter kernel info
type kernel struct {
Name string `json:"name"`
ID string `json:"id,omitempty"`
}
func isJupyterRunning() bool {
_, err := http.Get(fmt.Sprintf("%s/api", baseURI))
if err != nil {
return false
}
return true
}
// RunKernelGateway runs jupyter kernel gateway
// https://github.com/jupyter/kernel_gateway
func RunKernelGateway(stdout, stderr io.Writer, kernelName string) {
if isJupyterRunning() {
return
}
currentKernel.Name = kernelName
cmd := exec.Command(
"jupyter",
"--NotebookApp.token=''",
"kernelgateway",
"--JupyterWebsocketPersonality.list_kernels=True",
)
cmd.Stderr = stdout
cmd.Stdout = stderr
err := cmd.Start()
if err != nil {
log.Fatal(err)
}
kgPID = cmd.Process.Pid
go func() {
log.Printf("Error starting kernel gateway: %s", cmd.Wait())
}()
}
// Run sends code to jupyter kernel for processing
func Run(ctx context.Context, script, function string) (string, time.Duration, error) {
duration := time.Duration(0)
ws := dialKernelWebSocket()
respCh := make(chan string)
errCh := make(chan string)
go handleWebSocket(ws, respCh, errCh)
scriptContent, err := scriptContent(script)
if err != nil {
return "", duration, err
}
err = websocket.JSON.Send(ws, createExecuteMsg(scriptContent))
if err != nil {
return "", duration, err
}
err = websocket.JSON.Send(ws, createExecuteMsg(function))
if err != nil {
return "", duration, err
}
var data string
start := time.Now().UTC()
select {
case data = <-respCh:
data = strings.Trim(data, "'")
case data = <-errCh:
err = errors.New("Script error")
break
case <-ctx.Done():
}
duration = time.Now().UTC().Sub(start) / time.Millisecond
return data, duration, err
}
func scriptContent(script string) (string, error) {
path := filepath.Join(args.ResourceDir, script)
data, err := ioutil.ReadFile(path)
if err != nil {
return "", err
}
return string(data), nil
}
// SetKernelName sets currentKernel name
func SetKernelName(name string) {
currentKernel.Name = name
}
func getKernelURI() string {
return fmt.Sprintf(`%s/api/kernels`, baseURI)
}
func isKernelRunning(name string) bool {
uri := getKernelURI()
resp, err := http.Get(uri)
if err != nil {
return false
}
if resp != nil {
defer resp.Body.Close()
}
runningKernels := []kernel{}
err = json.NewDecoder(resp.Body).Decode(&runningKernels)
if err != nil {
return false
}
for _, kernel := range runningKernels {
if kernel.Name == name {
return true
}
}
return false
}
func startKernel(k *kernel) {
var body bytes.Buffer
json.NewEncoder(&body).Encode(k)
uri := getKernelURI()
// TODO: this could be handled better
time.Sleep(2 * time.Second)
response, err := http.Post(uri, "application/json", &body)
if err != nil {
log.Println(err)
return
}
if response != nil {
defer response.Body.Close()
}
err = json.NewDecoder(response.Body).Decode(¤tKernel)
if err != nil {
log.Printf("Error decoding kernel: %s", err)
}
}
// GetKernel gets kernel id by name and starts kernel process
func GetKernel() {
if !isKernelRunning(currentKernel.Name) {
startKernel(¤tKernel)
}
}
// createMsg creates msg to be sent to kernel gateway
func createMsg(msgType, channel string, content map[string]interface{}) *msg {
return &msg{
Header: &header{
Version: "5.0",
MsgID: uuid.NewV4().String(),
MsgType: msgType,
Session: uuid.NewV4().String(),
Date: time.Now().Format(time.RFC3339),
},
Channel: channel,
Content: content,
}
}
func createExecuteMsg(code string) *msg {
return createMsg("execute_request", "shell", map[string]interface{}{
"code": code,
"silent": false,
"store_history": false,
"allow_stdin": false,
})
}
// handleWebSocket handles jupyter gateway websocket connection
func handleWebSocket(ws *websocket.Conn, respCh chan string, errCh chan string) {
var err error
defer ws.Close()
for {
var respMsg msg
err = websocket.JSON.Receive(ws, &respMsg)
if err != nil {
if err == io.EOF {
ws.Close()
break
}
log.Printf("Error receiving message from websocket: %s", err)
} else {
go handleResponseMsg(&respMsg, respCh, errCh)
}
}
}
// handleResponseMsg handles required types of jupyter messages
func handleResponseMsg(respMsg *msg, resp chan string, errCh chan string) {
var err error
switch respMsg.Header.MsgType {
case "display_data", "execute_result":
data := respMsg.Content["data"].(map[string]interface{})
for _, v := range data {
resp <- v.(string)
break
}
break
case "stream":
var out io.Writer
outMsg := respMsg.Content["text"].(string)
switch respMsg.Content["name"].(string) {
case "stdout":
out = os.Stdout
case "stderr":
out = os.Stderr
}
_, err = fmt.Fprint(out, outMsg)
if err != nil {
log.Println(err)
}
case "error":
var buf bytes.Buffer
for _, v := range respMsg.Content["traceback"].([]interface{}) {
buf.WriteString(v.(string))
}
errCh <- buf.String()
break
case "execute_reply":
resp <- ""
break
}
}
// dialKernelWebSocket is a helper function to quick message sending
func dialKernelWebSocket() *websocket.Conn {
if currentKernel.ID == "" {
GetKernel()
}
uri := fmt.Sprintf("%s/api/kernels/%s/channels", wsURI, currentKernel.ID)
ws, err := websocket.Dial(uri, "", baseURI)
if err != nil {
log.Fatalf("Error dialing websocket: %s", err)
}
return ws
}
|
package config
import (
"github.com/caarlos0/env/v6"
)
type Config struct {
Env string `env:"TODO_ENV" envDefault:"dev"`
Port int `env:"PORT" envDefault:"80"`
DBHost string `env:"TODO_DB_HOST" envDefault:"127.0.0.1"`
DBPort int `env:"TODO_DB_PORT" envDefault:"3306"`
DBUser string `env:"TODO_DB_USER" envDefault:"todo"`
DBPassword string `env:"TODO_DB_PASSWORD" envDefault:"todo"`
DBName string `env:"TODO_DB_NAME" envDefault:"todo"`
}
func New() (*Config, error) {
cfg := &Config{}
if err := env.Parse(cfg); err != nil {
return nil, err
}
return cfg, nil
}
|
package backoff
import (
"math"
"time"
)
// Backoff keeps track of connection retry attempts and calculates the delay between each one.
type Backoff struct {
attempt, MaxAttempts float64
// Increment factor for each time step.
Factor float64
// Min and max intervals allowed for backoff intervals.
MinInterval, MaxInterval time.Duration
}
const (
defaultMaxAttempts = 5
defaultFactor = 2.0
defaultMinInterval = 1000 * time.Millisecond
defaultMaxInterval = 16 * time.Second
maxInt64 = float64(math.MaxInt64 - 512)
)
// DefaultBackoff creates a default configuration for Backoff.
func DefaultBackoff() *Backoff {
return &Backoff{
attempt: 0,
MaxAttempts: defaultMaxAttempts,
Factor: defaultFactor,
MinInterval: defaultMinInterval,
MaxInterval: defaultMaxInterval,
}
}
// NextDuration returns the duration and increases the number of attempts
func (b *Backoff) NextDuration() time.Duration {
dur := b.ForAttempt(b.attempt)
b.attempt++
return dur
}
// TimeoutExceeded returns true if the backoff total duration has been exceeded
func (b *Backoff) TimeoutExceeded() bool {
return b.attempt >= math.Max(0, b.MaxAttempts)
}
// ForAttempt calculates the appropriate exponential duration given an attempt count
func (b *Backoff) ForAttempt(attempt float64) time.Duration {
min := b.MinInterval
max := b.MaxInterval
if min <= 0 {
min = defaultMinInterval
}
if max <= 0 {
max = defaultMaxInterval
}
if min >= max {
return max
}
factor := b.Factor
if factor <= 0 {
factor = defaultFactor
}
// Calculate the new duration
durf := float64(min) * math.Pow(factor, attempt)
// Check for overflow
if durf > maxInt64 {
return max
}
dur := time.Duration(durf)
if dur < min {
return min
}
if dur > max {
return max
}
return dur
}
// Reset resets the attempt number for Backoff.
func (b *Backoff) Reset() {
b.attempt = 0
}
|
package main
import (
"errors"
"io/ioutil"
"log"
"os"
"regexp"
)
var (
hasPkgPat = regexp.MustCompile("(?m)^ii")
)
func main() {
f, err := os.Open("./dpkg.txt")
if err != nil {
log.Fatal(err)
}
defer f.Close()
out, err := ioutil.ReadAll(f)
if err != nil {
log.Fatal(err)
}
if !hasPkgPat.Match(out) {
log.Fatal(errors.New("not matched"))
}
}
|
/*
* Copyright 2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package http
import (
dispatcher "github.com/sk8sio/function-sidecar/pkg/dispatcher"
"net/http"
"bytes"
"io/ioutil"
"log"
"time"
"net"
retry "github.com/giantswarm/retry-go"
)
const UseTimeout = 10000000 // "Infinite" number of retries to override default and use the Timeout approach instead
const ConnectionAttemptTimeout = 1 * time.Minute
const ConnectionAttemptInterval = 100 * time.Millisecond
type httpDispatcher struct {
}
func (httpDispatcher) Dispatch(in interface{}) (interface{}, error) {
slice := ([]byte)(in.(string))
client := http.Client{
Timeout: time.Duration(60 * time.Second),
}
resp, err := client.Post("http://localhost:8080", "text/plain", bytes.NewReader(slice))
if err != nil {
log.Printf("Error invoking http://localhost:8080: %v", err)
return nil, err
}
defer resp.Body.Close()
out, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Printf("Error reading response", err)
return nil, err
}
return string(out), nil
}
func NewHttpDispatcher() dispatcher.Dispatcher {
attemptDial := func() error {
log.Println("Waiting for function to accept connection on localhost:8080")
_, err := net.Dial("tcp", "localhost:8080")
return err
}
err := retry.Do(attemptDial,
retry.Timeout(ConnectionAttemptTimeout),
retry.Sleep(ConnectionAttemptInterval),
retry.MaxTries(UseTimeout))
if err != nil {
panic(err)
}
return httpDispatcher{}
}
|
package models
import "encoding/json"
type PaymentCodec struct{}
func (c *PaymentCodec) Encode(value interface{}) ([]byte, error) {
return json.Marshal(value)
}
func (c *PaymentCodec) Decode(data []byte) (interface{}, error) {
var p Payment
return &p, json.Unmarshal(data, &p)
}
type PaymentListCodec struct{}
func (c *PaymentListCodec) Encode(value interface{}) ([]byte, error) {
return json.Marshal(value)
}
func (c *PaymentListCodec) Decode(data []byte) (interface{}, error) {
var m []Payment
err := json.Unmarshal(data, &m)
return m, err
}
type ProcessedPaymentCodec struct{}
func (c *ProcessedPaymentCodec) Encode(value interface{}) ([]byte, error) {
return json.Marshal(value)
}
func (c *ProcessedPaymentCodec) Decode(data []byte) (interface{}, error) {
var p ProcessedPayment
return &p, json.Unmarshal(data, &p)
}
|
package web_controller
import (
"2021/yunsongcailu/yunsong_server/common"
"2021/yunsongcailu/yunsong_server/param/web_param"
"2021/yunsongcailu/yunsong_server/tools"
"2021/yunsongcailu/yunsong_server/web/web_model"
"2021/yunsongcailu/yunsong_server/web/web_service"
"errors"
"fmt"
"github.com/gin-gonic/gin"
"strings"
)
var as = web_service.NewArticleServer()
//获取第一大类文章
func PostArticleIndex(ctx *gin.Context) {
var articleParam web_param.ArticleParam
err := ctx.ShouldBind(&articleParam)
if err != nil {
common.Failed(ctx,"获取文章参数失败")
return
}
res ,err := as.FindIndexArticle(articleParam.Count,articleParam.Start)
if err != nil {
common.Failed(ctx,"获取文章数据失败")
return
}
common.Success(ctx,res)
}
// 获取非第一大类之外 每个大类的首页展示文章
func PostOtherArticleIndex(ctx *gin.Context) {
var articleParam web_param.ArticleParam
err := ctx.ShouldBind(&articleParam)
if err != nil {
common.Failed(ctx,"获取文章参数失败")
return
}
res ,err := as.FindMenuIndexArticle(articleParam.Count,articleParam.Start)
if err != nil {
common.Failed(ctx,"获取文章数据失败")
return
}
common.Success(ctx,res)
}
// 获取指定大类和小类的文章
func PostMenuIdCategoryIdArticle(ctx *gin.Context) {
var articleParam web_param.ArticleParam
err := ctx.ShouldBind(&articleParam)
if err != nil {
common.Failed(ctx,"获取文章参数失败")
return
}
res,err := as.FindMenuCategoryArticle(articleParam.MenuId,articleParam.CategoryId,articleParam.Count,articleParam.Start)
if err != nil {
common.Failed(ctx,"获取文章数据失败")
return
}
common.Success(ctx,res)
}
// 获取指定大类无小类文章
func PostMenuArticle(ctx *gin.Context) {
var articleParam web_param.ArticleParam
err := ctx.ShouldBind(&articleParam)
if err != nil {
common.Failed(ctx,"获取文章参数失败")
return
}
res,err := as.FindMenuArticle(articleParam.MenuId,articleParam.Count,articleParam.Start)
if err != nil {
common.Failed(ctx,"获取文章数据失败")
return
}
common.Success(ctx,res)
}
// 根据ID 获取文章
func PostArticleById(ctx *gin.Context) {
var articleParam web_param.ArticleParam
err := ctx.ShouldBind(&articleParam)
if err != nil {
common.Failed(ctx,"获取文章参数失败")
return
}
res,err := as.GetArticleById(articleParam.ArticleId)
if err != nil {
common.Failed(ctx,"获取文章数据失败")
return
}
common.Success(ctx,res)
}
// 更具ID 更新 赞或者 踩
func PostArticleGoodOrBad(ctx *gin.Context) {
var articleParam web_param.ArticleParam
err := ctx.ShouldBind(&articleParam)
if err != nil {
common.Failed(ctx,"获取文章参数失败")
return
}
newArticle := new(web_model.ArticleModel)
if articleParam.JobTag == "good" {
newArticle.Star = articleParam.Star + 1
err = as.EditArticleOnlyValueById(articleParam.ArticleId,"star",newArticle)
} else if articleParam.JobTag == "bad" {
newArticle.Tread = articleParam.Tread + 1
err = as.EditArticleOnlyValueById(articleParam.ArticleId,"tread",newArticle)
} else {
err = errors.New("非正常指令")
}
if err != nil {
common.Failed(ctx,err.Error())
return
}
common.Success(ctx,"ok")
return
}
// 根据关键字搜索文章
func PostSearchArticle(ctx *gin.Context) {
var articleParam web_param.ArticleParam
err := ctx.ShouldBind(&articleParam)
if err != nil {
common.Failed(ctx,"获取搜索关键词失败")
return
}
var keywordArr []string
var articleList []web_model.ArticleModel
arr := strings.Fields(articleParam.SearchKeyword)
for _,item := range arr {
itemArr := strings.Split(item,",")
for _,keyword := range itemArr {
if keyword == "" {
continue
}
keywordArr = append(keywordArr, keyword)
}
}
keywordArr = tools.RemoveStringByMap(keywordArr)
for _,keyword := range keywordArr {
fmt.Println(keyword)
res,err := as.FindArticleByKeyword("title",keyword,articleParam.Count,articleParam.Start)
if err != nil {
continue
}
articleList = append(articleList, res...)
}
common.Success(ctx,articleList)
} |
package tournament
import (
"bufio"
"fmt"
"io"
"sort"
"strings"
)
const header string = "Team | MP | W | D | L | P"
type team struct {
name string
wins int
losses int
draws int
}
func (t *team) points() int {
return (t.wins * 3) + t.draws
}
func (t *team) matchesPlayed() int {
return t.wins + t.draws + t.losses
}
func (t team) String() string {
return fmt.Sprintf("%-31s|%3d |%3d |%3d |%3d |%3d", t.name, t.matchesPlayed(), t.wins, t.draws, t.losses, t.points())
}
// Tally parses text from the reader to compute the standings for all teams, then writes a scorecard to the writer in the format
func Tally(rd io.Reader, wr io.Writer) error {
scanner := bufio.NewScanner(rd)
standings := map[string]*team{}
var line, team1, team2, result string
var match = []string{}
var err error
// Read input and parse
for scanner.Scan() {
line = scanner.Text()
line = strings.Trim(line, "\n")
if strings.HasPrefix(line, "#") || strings.TrimSpace(line) == "" {
continue
}
match = strings.Split(line, ";")
if len(match) != 3 {
return fmt.Errorf("Malformed record - expecting three fields, parsed %d fields from %s", len(match), line)
}
team1 = strings.TrimSpace(match[0])
team2 = strings.TrimSpace(match[1])
result = strings.ToLower(strings.TrimSpace(match[2]))
t1, present := standings[strings.ToLower(team1)]
if !present {
t1 = &team{name: team1}
standings[strings.ToLower(team1)] = t1
}
t2, present := standings[strings.ToLower(team2)]
if !present {
t2 = &team{name: team2}
standings[strings.ToLower(team2)] = t2
}
switch result {
case "draw":
t1.draws++
t2.draws++
case "win":
t1.wins++
t2.losses++
case "loss":
t1.losses++
t2.wins++
default:
return fmt.Errorf("Invalid match result %s in line %s", result, line)
}
}
// Write output
_, err = fmt.Fprintln(wr, header)
if err != nil {
return err
}
standingsSorted := []*team{}
for _, v := range standings {
standingsSorted = append(standingsSorted, v)
}
// Less function to sort on points, and alphabetically if points are tied
less := func(i, j int) bool {
iPoints := standingsSorted[i].points()
jPoints := standingsSorted[j].points()
if iPoints == jPoints {
return standingsSorted[i].name < standingsSorted[j].name
}
return iPoints > jPoints // use greater than to give order high to low
}
sort.Slice(standingsSorted, less)
for _, v := range standingsSorted {
_, err = fmt.Fprintln(wr, v)
if err != nil {
return err
}
}
return nil
}
|
package ds
import (
"database/sql"
"time"
)
type File struct {
Id int `json:"-"`
Bin string `json:"-"`
Filename string `json:"filename"`
InStorage bool `json:"-"`
Mime string `json:"content-type"`
Category string `json:"-"`
Bytes uint64 `json:"bytes"`
BytesReadable string `json:"bytes_readable"`
MD5 string `json:"md5"`
SHA256 string `json:"sha256"`
Downloads uint64 `json:"-"`
Updates uint64 `json:"-"`
IP string `json:"-"`
ClientId string `json:"-"`
Headers string `json:"-"`
UpdatedAt time.Time `json:"updated_at"`
UpdatedAtRelative string `json:"updated_at_relative"`
CreatedAt time.Time `json:"created_at"`
CreatedAtRelative string `json:"created_at_relative"`
DeletedAt sql.NullTime `json:"-"`
DeletedAtRelative string `json:"-"`
URL string `json:"-"`
}
func (f *File) IsReadable() bool {
// Not readable if the file is deleted
if f.IsDeleted() {
return false
}
if f.InStorage == false {
return false
}
return true
}
func (f *File) IsDeleted() bool {
if f.DeletedAt.Valid {
if f.DeletedAt.Time.IsZero() == false {
return true
}
}
return false
}
|
package handler
import (
"HumoAcademy/models"
"fmt"
"github.com/gin-gonic/gin"
"log"
"net/http"
"net/smtp"
"strconv"
)
const (
UsersCVDirectory = `images/users_cv/%s_%s`
)
//func getNewUsersCV(c *gin.Context) (string, error) {
// cv, err := c.FormFile("cv")
// if err != nil {
// log.Println("Error while receiving multipart form. error is", err.Error())
// return "", err
// }
//
// timeSign := fmt.Sprintf("%d",time.Now().UnixNano())
//
// cvPath := fmt.Sprintf(UsersCVDirectory, timeSign, cv.Filename)
//
// file, err := os.Create(cvPath)
// if err != nil {
// log.Println("Error while creating file for cv.", err.Error())
// return "", err
// }
// defer file.Close()
//
// err = c.SaveUploadedFile(cv, file.Name())
// if err != nil {
// log.Println("Error while saving the cv.", err.Error())
// return "", err
// }
// return cvPath, nil
//}
//
//func getNewUserMainJson(c *gin.Context) (models.Users, error) {
// var User models.Users
//
// form, err := c.MultipartForm()
// if err != nil {
// log.Println("Error while receiving multipart form. error is", err.Error())
// return models.Users{}, err
// }
//
// mainJson := form.Value["main_json"]
//
// err = json.Unmarshal([]byte(mainJson[0]), &User)
// if err != nil {
// log.Println("json unmarshal error:", err.Error())
// return models.Users{}, err
// }
//
// return User, nil
//}
func (h *Handler) getAllSubscribedUsers (c *gin.Context) {
_ , err := getAdminId(c) //TODO: (adminId) check id
if err != nil {
NewErrorResponse(c, http.StatusUnauthorized, "bad","invalid admins id param")
return
}
_ , err = getAdminLevel(c) //TODO: (adminLevel) check for admin level
if err != nil {
NewErrorResponse(c, http.StatusUnauthorized, "bad","invalid admins level param")
return
}
emails, err := h.services.User.GetAllSubscribedUsers()
if err != nil {
NewErrorResponse(c, http.StatusInternalServerError, "bad", err.Error())
log.Println(err)
return
}
if emails == nil {
emails = []string{}
}
c.JSON(http.StatusOK, emails)
}
func (h *Handler) SendMail (c *gin.Context) {
var msg models.MSG
err := c.BindJSON(&msg)
if err != nil {
NewErrorResponse(c, http.StatusBadRequest, "bad", err.Error())
return
}
fmt.Println(msg)
message := []byte(
fmt.Sprintf("Subject : %s", msg.Subject) +
"\r\n" +
msg.Message)
auth := smtp.PlainAuth("", msg.Email, msg.Password, "smtp.mail.ru")
to, err := h.services.User.GetAllSubscribedUsers()
if err != nil {
NewErrorResponse(c, http.StatusInternalServerError, "bad", err.Error())
log.Println(err)
return
}
err = smtp.SendMail("smtp.mail.ru:25", auth, msg.Email, to, message)
if err != nil {
NewErrorResponse(c, http.StatusInternalServerError, "bad", err.Error())
log.Println(err)
return
}
c.JSON(http.StatusOK, map[string]interface{}{
"status": "ok",
"message": "sms mailing was successfully completed",
})
}
func (h *Handler) createUser (c *gin.Context) {
var user models.Users
err := c.BindJSON(&user)
if err != nil {
NewErrorResponse(c, http.StatusBadRequest, "bad", err.Error())
return
}
id, err := h.services.User.CreateUser(user)
if err != nil {
NewErrorResponse(c, http.StatusUnprocessableEntity, "bad", err.Error())
return
}
c.JSON(http.StatusOK, map[string]interface{}{
"id" : id,
"status": "ok",
})
}
func (h *Handler) getAllCourseUsers (c *gin.Context) {
_ , err := getAdminId(c) //TODO: (adminId) check id
if err != nil {
NewErrorResponse(c, http.StatusUnauthorized, "bad","invalid admins id param")
return
}
_ , err = getAdminLevel(c) //TODO: (adminLevel) check for admin level
if err != nil {
NewErrorResponse(c, http.StatusUnauthorized, "bad","invalid admins level param")
return
}
courseIdString := c.Param("course_id")
courseIdInt, err := strconv.Atoi(courseIdString)
if err != nil {
NewErrorResponse(c, http.StatusBadRequest, "bad", err.Error())
return
}
usersList, err := h.services.User.GetAllCourseUsers(courseIdInt)
if err != nil {
NewErrorResponse(c, http.StatusInternalServerError, "bad", err.Error())
return
}
if usersList.UsersList == nil {
usersList.UsersList = []models.Users{}
}
c.JSON(http.StatusOK, usersList)
}
func (h *Handler) getUserById (c *gin.Context) {
_ , err := getAdminId(c) //TODO: (adminId) check id
if err != nil {
NewErrorResponse(c, http.StatusUnauthorized, "bad","invalid admins id param")
return
}
_ , err = getAdminLevel(c) //TODO: (adminLevel) check for admin level
if err != nil {
NewErrorResponse(c, http.StatusUnauthorized, "bad","invalid admins level param")
return
}
id, err := strconv.Atoi(c.Param("id"))
if err != nil {
NewErrorResponse(c, http.StatusBadRequest, "bad","invalid id param")
return
}
course, err := h.services.User.GetUserById(id)
if err != nil {
NewErrorResponse(c, http.StatusNotFound, "bad", err.Error())
return
}
c.JSON(http.StatusOK, course)
}
func (h *Handler) deleteUserById (c *gin.Context) {
_ , err := getAdminId(c) //TODO: (adminId) check id
if err != nil {
NewErrorResponse(c, http.StatusUnauthorized, "bad","invalid admins id param")
return
}
_ , err = getAdminLevel(c) //TODO: (adminLevel) check for admin level
if err != nil {
NewErrorResponse(c, http.StatusUnauthorized, "bad","invalid admins level param")
return
}
id, err := strconv.Atoi(c.Param("id"))
if err != nil {
NewErrorResponse(c, http.StatusBadRequest, "bad","invalid id param")
return
}
err = h.services.User.DeleteUserByID(id)
if err != nil {
NewErrorResponse(c, http.StatusInternalServerError, "bad", err.Error())
return
}
c.JSON(http.StatusOK, map[string]interface{}{
"status": "ok",
"message": "user was successfully deleted",
})
} |
package v1
import "github.com/julienschmidt/httprouter"
var(
getDataByCategory GetDataByCategoryRouter
)
type Router struct {
}
func (r Router)RegisterRouter(mux *httprouter.Router){
getDataByCategory.RegisterHandler(mux)
} |
// Copyright 2017 Jeff Foley. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
package sources
import (
"fmt"
"github.com/OWASP/Amass/amass/core"
"github.com/OWASP/Amass/amass/utils"
)
// Censys is data source object type that implements the DataSource interface.
type Censys struct {
BaseDataSource
}
// NewCensys returns an initialized Censys as a DataSource.
func NewCensys(srv core.AmassService) DataSource {
c := new(Censys)
c.BaseDataSource = *NewBaseDataSource(srv, core.CERT, "Censys")
return c
}
// Query returns the subdomain names discovered when querying this data source.
func (c *Censys) Query(domain, sub string) []string {
var unique []string
if domain != sub {
return unique
}
url := c.getURL(domain)
page, err := utils.GetWebPage(url, nil)
if err != nil {
c.Service.Config().Log.Printf("%s: %v", url, err)
return unique
}
c.Service.SetActive()
re := utils.SubdomainRegex(domain)
for _, sd := range re.FindAllString(page, -1) {
if u := utils.NewUniqueElements(unique, sd); len(u) > 0 {
unique = append(unique, u...)
}
}
return unique
}
func (c *Censys) getURL(domain string) string {
format := "https://www.censys.io/domain/%s/table"
return fmt.Sprintf(format, domain)
}
|
package hydrator
import (
"context"
"net/http"
"net/url"
httpclient "github.com/asecurityteam/component-httpclient"
)
// NexposeConfig holds configuration to connect to Nexpose
// and make a call to the fetch assets API
type NexposeConfig struct {
HTTPClient *httpclient.Config `description:"The HTTP client config from github.com/asecurityteam/component-httpclient."`
Host string `description:"The scheme and host of a Nexpose instance."`
PageSize int `description:"The number of assets that should be returned from the Nexpose API at one time."`
}
// Name is used by the settings library and will add a "NEXPOSE_"
// prefix to NexposeConfig environment variables
func (c *NexposeConfig) Name() string {
return "Nexpose"
}
// NexposeComponent satisfies the settings library Component
// API, and may be used by the settings.NewComponent function.
type NexposeComponent struct {
HTTP *httpclient.Component
}
// NewNexposeComponent generates a NexposeComponent.
func NewNexposeComponent() *NexposeComponent {
return &NexposeComponent{
HTTP: httpclient.NewComponent(),
}
}
// Settings can be used to populate default values if there are any
func (c *NexposeComponent) Settings() *NexposeConfig {
return &NexposeConfig{
HTTPClient: c.HTTP.Settings(),
PageSize: 100,
}
}
// New constructs a NexposeClient from a config.
func (c *NexposeComponent) New(ctx context.Context, config *NexposeConfig) (*NexposeClient, error) {
rt, e := c.HTTP.New(ctx, config.HTTPClient)
if e != nil {
return nil, e
}
host, err := url.Parse(config.Host)
if err != nil {
return nil, err
}
return &NexposeClient{
HTTPClient: &http.Client{Transport: rt},
Host: host,
PageSize: config.PageSize,
}, nil
}
|
package main
import (
"errors"
"image"
"image/draw"
"image/png"
"os"
"github.com/go-gl/gl/v4.1-core/gl"
)
func textureFromData(img image.Image, id uint32) (uint32, error) {
rgba := image.NewRGBA(img.Bounds())
draw.Draw(rgba, rgba.Bounds(), img, image.Pt(0, 0), draw.Src)
if rgba.Stride != rgba.Rect.Size().X*4 {
return 0, errors.New("Incorrect stride")
}
var textureID uint32
gl.GenTextures(1, &textureID)
gl.ActiveTexture(id)
gl.BindTexture(gl.TEXTURE_2D, textureID)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST)
width := int32(rgba.Rect.Size().X)
height := int32(rgba.Rect.Size().Y)
gl.TexImage2D(gl.TEXTURE_2D, 0, gl.RGBA, width, height, 0, gl.RGBA, gl.UNSIGNED_BYTE, gl.Ptr(rgba.Pix))
gl.GenerateMipmap(textureID)
return textureID, nil
}
// NewTextureFromFile ...
func NewTextureFromFile(file string, id uint32) (uint32, error) {
imgFile, err := os.Open(file)
if err != nil {
return 0, err
}
defer imgFile.Close()
img, err := png.Decode(imgFile)
if err != nil {
return 0, err
}
return textureFromData(img, id)
}
|
package domain
import (
"context"
"github.com/go-kit/log"
"github.com/google/uuid"
)
// ServiceInterface defines the domains Service interface
type ServiceInterface interface {
Course(ctx context.Context, id uuid.UUID) (Course, error)
Courses(ctx context.Context) ([]Course, error)
CreateCourse(ctx context.Context, c *Course) error
UpdateCourse(ctx context.Context, c *Course) error
DeleteCourse(ctx context.Context, courseID uuid.UUID) error
Subscription(ctx context.Context, id uuid.UUID) (Subscription, error)
Subscriptions(ctx context.Context) ([]Subscription, error)
CreateSubscription(ctx context.Context, cs *Subscription) error
UpdateSubscription(ctx context.Context, cs *Subscription) error
DeleteSubscription(ctx context.Context, id uuid.UUID) error
}
type serviceConfiguration func(svc *Service) error
type Service struct {
courses CourseRepository
subscriptions SubscriptionRepository
logger log.Logger
}
// NewService creates a new domain Service instance
func NewService(cfgs ...serviceConfiguration) (*Service, error) {
svc := &Service{}
for _, cfg := range cfgs {
err := cfg(svc)
if err != nil {
return nil, err
}
}
return svc, nil
}
// WithCourseRepository injects the course repository to the domain Service
func WithCourseRepository(cr CourseRepository) serviceConfiguration {
return func(svc *Service) error {
svc.courses = cr
return nil
}
}
// WithSubscriptionRepository injects the subscription repository to the domain Service
func WithSubscriptionRepository(sr SubscriptionRepository) serviceConfiguration {
return func(svc *Service) error {
svc.subscriptions = sr
return nil
}
}
// WithLogger injects the logger to the domain Service
func WithLogger(l log.Logger) serviceConfiguration {
return func(svc *Service) error {
svc.logger = l
return nil
}
}
|
/*
Description
A 3x3 magic square is a 3x3 grid of the numbers 1-9 such that each row, column, and major diagonal adds up to 15. Here's an example:
8 1 6
3 5 7
4 9 2
The major diagonals in this example are 8 + 5 + 2 and 6 + 5 + 4. (Magic squares have appeared here on r/dailyprogrammer before, in #65 [Difficult] in 2012.)
Write a function that, given a grid containing the numbers 1-9, determines whether it's a magic square. Use whatever format you want for the grid, such as a 2-dimensional array, or a 1-dimensional array of length 9, or a function that takes 9 arguments. You do not need to parse the grid from the program's input, but you can if you want to. You don't need to check that each of the 9 numbers appears in the grid: assume this to be true.
Example inputs/outputs
[8, 1, 6, 3, 5, 7, 4, 9, 2] => true
[2, 7, 6, 9, 5, 1, 4, 3, 8] => true
[3, 5, 7, 8, 1, 6, 4, 9, 2] => false
[8, 1, 6, 7, 5, 3, 4, 9, 2] => false
Optional bonus 1
Verify magic squares of any size, not just 3x3.
Optional bonus 2
Write another function that takes a grid whose bottom row is missing, so it only has the first 2 rows (6 values). This function should return true if it's possible to fill in the bottom row to make a magic square. You may assume that the numbers given are all within the range 1-9 and no number is repeated. Examples:
[8, 1, 6, 3, 5, 7] => true
[3, 5, 7, 8, 1, 6] => false
Hint: it's okay for this function to call your function from the main challenge.
This bonus can also be combined with optional bonus 1. (i.e. verify larger magic squares that are missing their bottom row.)
*/
package main
import (
"fmt"
"image"
)
func main() {
check([][]uint{
{8, 1, 6},
{3, 5, 7},
{4, 9, 2},
})
check([][]uint{
{2, 7, 6},
{9, 5, 1},
{4, 3, 8},
})
check([][]uint{
{3, 5, 7},
{8, 1, 6},
{4, 9, 2},
})
check([][]uint{
{8, 1, 6},
{7, 5, 3},
{4, 9, 2},
})
checkgen([][]uint{
{8, 1, 6},
{3, 5, 7},
{0, 0, 0},
})
checkgen([][]uint{
{3, 5, 7},
{8, 1, 6},
{0, 0, 0},
})
}
func check(m [][]uint) {
fmt.Printf("%v %v\n", m, verify(m))
}
func checkgen(m [][]uint) {
p := dup(m)
found := gen(m)
if found {
fmt.Printf("%v %v\n", m, found)
} else {
fmt.Printf("%v %v\n", p, found)
}
}
func gen(m [][]uint) bool {
if !issquare(m) {
return false
}
var (
p []image.Point
t []uint
)
u := make([]bool, len(m)*len(m))
for i := range m {
for j, v := range m[i] {
switch {
case v > uint(len(m)*len(m)):
return false
case v == 0:
p = append(p, image.Pt(i, j))
case u[v-1]:
return false
default:
u[v-1] = true
}
}
}
for i := range u {
if !u[i] {
t = append(t, uint(i+1))
}
}
if len(t) != len(p) {
return false
}
d := newperm(len(t))
for {
if !nextperm(d) {
break
}
for i := range d {
l := p[i]
x := d[i].index
m[l.X][l.Y] = t[x]
}
if verify(m) {
return true
}
}
return false
}
func dup(m [][]uint) [][]uint {
p := make([][]uint, len(m))
for i := range p {
p[i] = make([]uint, len(m[i]))
copy(p[i], m[i])
}
return p
}
func issquare(m [][]uint) bool {
// check if it is a square
if len(m) == 0 {
return false
}
for i := range m {
if len(m[i]) != len(m) {
return false
}
}
return true
}
func verify(m [][]uint) bool {
if !issquare(m) {
return false
}
// check if each entries have a positive number between 1 and n*n
// where n is the square size and it is unique
u := make([]bool, len(m)*len(m))
for i := range m {
for _, v := range m[i] {
if !(0 < v && v <= uint(len(m)*len(m))) || u[v-1] {
return false
}
u[v-1] = true
}
}
for i := range u {
if !u[i] {
return false
}
}
// check rows
M := uint(0)
for i := range m {
N := uint(0)
for j := range m[i] {
N += m[i][j]
}
if M == 0 {
M = N
}
if M != N {
return false
}
}
// check columns
for i := range m {
N := uint(0)
for j := range m {
N += m[j][i]
}
if M != N {
return false
}
}
// check diagonal/anti-diagonal
N := uint(0)
O := uint(0)
for i := range m {
N += m[i][i]
O += m[len(m)-1-i][len(m)-1-i]
}
if M != N || M != O {
return false
}
return true
}
// steinhaus-johnson-trotter permutation algorithm
type dint struct {
index int
dir bool
}
func newperm(n int) []dint {
p := make([]dint, n)
for i := range p {
p[i] = dint{i, false}
}
return p
}
func nextperm(p []dint) bool {
x, y, v := largest(p)
if x == -1 {
return false
}
p[x], p[y] = p[y], p[x]
reverse(p, v)
return true
}
func largest(p []dint) (x, y, v int) {
x, y, v = -1, -1, -1
for i := range p {
switch {
case i > 0 && !p[i].dir && p[i].index > p[i-1].index && p[i].index > v:
x, y, v = i, i-1, p[i].index
case i < len(p)-1 && p[i].dir && p[i].index > p[i+1].index && p[i].index > v:
x, y, v = i, i+1, p[i].index
}
}
return
}
func reverse(p []dint, v int) {
for i := range p {
if p[i].index > v {
p[i].dir = !p[i].dir
}
}
}
|
package command
import "errors"
// ErrNotJSONFile represents not JSON file error
var ErrNotJSONFile = errors.New("Not JSON file")
// GsonNvim is GsonNvim base struct
type GsonNvim struct{}
|
package strategy
import (
"sync"
)
/***
*双向链表
*/
type Node struct {
data interface{}
prev *Node
next *Node
}
type ListObj struct {
head *Node
tail *Node
length uint
mutex *sync.RWMutex
}
func ListInstance() *ListObj {
return &ListObj{mutex: new(sync.RWMutex)}
}
//尾部压入数据
func (this *ListObj) Append(data interface{}) bool {
if data == nil {
return false
}
this.mutex.Lock()
defer this.mutex.Unlock()
var node = new(Node)
node.data = data
if this.length == 0 {
this.head = node
this.tail = node
this.length = 1
return true
}
tail := this.tail
node.prev = tail
tail.next = node
this.tail = node
this.length += 1
return true
}
func (this *ListObj) Insert(index uint, data interface{}) bool {
if data == nil {
return false
}
if index > this.length {
return false
}
this.mutex.Lock()
defer this.mutex.Unlock()
var node = new(Node)
if index == 0 {
node.next = this.head
this.head.prev = node
this.head = node
this.length += 1
return true
}
var i uint
ptr := this.head
for i = 1; i < index; i++ {
ptr = ptr.next
}
next := ptr.next
ptr.next = node
node.prev = ptr
next.prev = node
node.next = next
this.length += 1
return true
}
func (this *ListObj) Delete(index uint) bool {
if this == nil || index > this.length-1 {
return false
}
this.mutex.Lock()
defer this.mutex.Unlock()
if index == 0 {
head := this.head.next
this.head = head
this.head.prev = nil
if this.length == 1 {
this.tail = nil
}
this.length -= 1
return true
}
ptr := this.head
var i uint
for i = 1; i < index; i++ {
ptr = ptr.next
}
next := ptr.next
ptr.next = next.next
next.next.prev = ptr
if index == this.length-1 {
this.tail = ptr
}
this.length -= 1
return true
}
func (this *ListObj) Get(index uint) *Node {
if this == nil || index > this.length-1 {
return nil
}
this.mutex.RLock()
defer this.mutex.RUnlock()
node := this.head
for i := 0; i < int(index); i++ {
node = node.next
}
return node
}
/**
* 查找链表中的元素
*/
func (this *ListObj) Find(data interface{}) *Node {
if this == nil {
return nil
}
this.mutex.RLock()
defer this.mutex.RUnlock()
head := this.head
tail := this.tail
var start uint = 0
var end uint = this.length - 1
for start != end {
if head.data == data {
return head
} else if tail.data == data {
return tail
}
head = head.next
tail = tail.prev
start++
end--
}
return nil
}
|
package model
import (
"github.com/smartystreets/assertions"
"testing"
)
func TestUtils(t *testing.T) {
fromString, err := GetSexTypeFromString("man")
assertions.ShouldEqual(fromString, Male)
assertions.ShouldBeNil(err)
male, err2 := GetSexTypeFromString("female")
assertions.ShouldEqual(male, FEMALE)
assertions.ShouldBeNil(err2)
other, err3 := GetSexTypeFromString("other")
assertions.ShouldBeNil(err3)
assertions.ShouldEqual(other, Other)
}
|
package main
/**
*本题题意:给出一个int,反轉整形數
*/
func reverse(x int) int {
var(
y int
re int = 0
maxint int = 2147483647
)
if x < 0{
y = -x
} else {
y = x
}
for y > 0{
if re != 0 && maxint / re < 10{
return 0
}
re *= 10
re += y%10
y /= 10
}
if x < 0{
return -re
}
return re
}
|
package storage
const (
queryFmtRenameTable = `
ALTER TABLE %s
RENAME TO %s;`
queryFmtMySQLRenameTable = `
ALTER TABLE %s
RENAME %s;`
queryFmtPostgreSQLLockTable = `LOCK TABLE %s IN %s MODE;`
queryFmtSelectRowCount = `
SELECT COUNT(id)
FROM %s;`
)
|
package main
type Dimension struct {
x int64
y int64
}
|
package models
import (
"github.com/astaxie/beego/orm"
_ "github.com/go-sql-driver/mysql"
"time"
)
type User struct {
Id int
UserName string `orm:"unique"`
Pwd string
Articles []*Article `orm:"reverse(many)"`//设置多对多反向关系(可互换)
}
//rel(fk) reverse(many) rel(m2m) reverse(many) rel reverse
type Article struct {
Id int `orm:"pk;auto"`
Title string `orm:"size(100)"`
Time time.Time `orm:"type(datetime);auto_now"`
Count int `orm:"default(0)"`
Img string `orm:"null"`
Content string
ArticleType *ArticleType `orm:"rel(fk);set_null;null"`//设置一对多正向关系 看一对多那个表需要外键就设置rel(fk)
Users []*User `orm:"rel(m2m)"`//设置多对多正向关系(可互换)
price float64 `orm:"digits(10);decimals(2)"`
}
type ArticleType struct {
Id int
TypeName string `orm:"size(20);unique"`
Articles []*Article `orm:"reverse(many)"`//设置一对多反向关系
}
//需要创建一对多的类型表以及多对多的用户与文章关系表
func init(){
//建表的三步骤
//注册数据库
//第一个,为什么要用别名
orm.RegisterDataBase("default","mysql","root:123456@tcp(127.0.0.1:3306)/newsWeb?charset=utf8")
//注册表
orm.RegisterModel(new(User),new(Article),new(ArticleType))
//跑起来
orm.RunSyncdb("default",false,true)
}
|
package mykafka
import (
"context"
"fmt"
"github.com/Shopify/sarama"
"github.com/spaolacci/murmur3"
"log"
"os"
"os/signal"
"strings"
"sync"
"syscall"
"testing"
"time"
)
func TestComsumerGroup(t *testing.T) {
assignor := "sticky"
brokers := "192.168.182.132:9092"
group := "group1"
topics := "sun"
oldest := true
log.Println("start new sarama consumer!")
sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags)
version, err := sarama.ParseKafkaVersion("2.1.1")
if err != nil {
log.Panicf("error parsing kafka version:%v", err)
}
config := sarama.NewConfig()
config.Version = version
if oldest {
config.Consumer.Offsets.Initial = sarama.OffsetOldest
}
switch assignor {
case "sticky":
config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategySticky
case "roundrobin":
config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRoundRobin
case "range":
config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRange
default:
log.Panicf("Unrecognized consumer group partition assignor: %s", assignor)
}
consumer := Consumer{
ready: make(chan bool),
}
ctx, cancel := context.WithCancel(context.Background())
client, err := sarama.NewConsumerGroup(strings.Split(brokers, ","), group, config)
if err != nil {
log.Panicf("Error creating consumer group client: %v", err)
}
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
for {
// `Consume` should be called inside an infinite loop, when a
// server-side rebalance happens, the consumer session will need to be
// recreated to get the new claims
if err := client.Consume(ctx, strings.Split(topics, ","), &consumer); err != nil {
log.Panicf("Error from consumer: %v", err)
}
// check if context was cancelled, signaling that the consumer should stop
if ctx.Err() != nil {
return
}
consumer.ready = make(chan bool)
}
}()
<-consumer.ready // Await till the consumer has been set up
log.Println("Sarama consumer up and running!...")
sigterm := make(chan os.Signal, 1)
signal.Notify(sigterm, syscall.SIGINT, syscall.SIGTERM)
select {
case <-ctx.Done():
log.Println("terminating: context cancelled")
case <-sigterm:
log.Println("terminating: via signal")
}
cancel()
wg.Wait()
if err = client.Close(); err != nil {
log.Panicf("Error closing client: %v", err)
}
}
func TestComsumerGroup2(t *testing.T) {
assignor := "sticky"
brokers := "192.168.182.132:9092"
group := "group1"
topics := "sun"
oldest := true
log.Println("start new sarama consumer!")
sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags)
version, err := sarama.ParseKafkaVersion("2.1.1")
if err != nil {
log.Panicf("error parsing kafka version:%v", err)
}
config := sarama.NewConfig()
config.Version = version
if oldest {
config.Consumer.Offsets.Initial = sarama.OffsetOldest
}
switch assignor {
case "sticky":
config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategySticky
case "roundrobin":
config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRoundRobin
case "range":
config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRange
default:
log.Panicf("Unrecognized consumer group partition assignor: %s", assignor)
}
consumer := Consumer{
ready: make(chan bool),
}
ctx, cancel := context.WithCancel(context.Background())
client, err := sarama.NewConsumerGroup(strings.Split(brokers, ","), group, config)
if err != nil {
log.Panicf("Error creating consumer group client: %v", err)
}
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
for {
if err := client.Consume(ctx, strings.Split(topics, ","), &consumer); err != nil {
log.Panicf("Error from consumer: %v", err)
}
if ctx.Err() != nil {
return
}
consumer.ready = make(chan bool)
}
}()
<-consumer.ready // Await till the consumer has been set up
log.Println("Sarama consumer up and running!...")
sigterm := make(chan os.Signal, 1)
signal.Notify(sigterm, syscall.SIGINT, syscall.SIGTERM)
select {
case <-ctx.Done():
log.Println("terminating: context cancelled")
case <-sigterm:
log.Println("terminating: via signal")
}
cancel()
wg.Wait()
if err = client.Close(); err != nil {
log.Panicf("Error closing client: %v", err)
}
}
// Consumer represents a Sarama consumer group consumer
type Consumer struct {
ready chan bool
}
// Setup is run at the beginning of a new session, before ConsumeClaim
func (consumer *Consumer) Setup(sarama.ConsumerGroupSession) error {
// Mark the consumer as ready
close(consumer.ready)
return nil
}
// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
func (consumer *Consumer) Cleanup(sarama.ConsumerGroupSession) error {
return nil
}
// ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages().
func (consumer *Consumer) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
for message := range claim.Messages() {
log.Printf("Message claimed: value = %s, timestamp = %v, topic = %s", string(message.Value), message.Timestamp, message.Topic)
session.MarkMessage(message, "")
}
return nil
}
func TestProducer(t *testing.T) {
//初始化配置
config := sarama.NewConfig()
config.Producer.RequiredAcks = sarama.WaitForAll
config.Producer.Partitioner = sarama.NewRandomPartitioner
config.Producer.Return.Successes = true
//生产者
client, err := sarama.NewSyncProducer([]string{"127.0.0.1:9092"}, config)
if err != nil {
fmt.Println("producer close,err:", err)
return
}
defer client.Close()
var n int=0
for n<20{
n++
//创建消息
msg := &sarama.ProducerMessage{}
msg.Topic = "sun"
msg.Value = sarama.StringEncoder(fmt.Sprintf("this is a good test,hello chaoge!! %d",n) )
//发送消息
pid, offset, err := client.SendMessage(msg)
if err != nil {
fmt.Println("send message failed,", err)
return
}
fmt.Printf("pid:%v offset:%v\n,", pid, offset)
time.Sleep(10 * time.Millisecond)
}
}
func TestSum32(t *testing.T) {
fmt.Printf("sum32:%d", murmur3.Sum32([]byte("3")))
} |
/*
Matryoshka dolls are traditionally wooden dolls that can be nested by fitting smaller dolls into larger ones.
Suppose arrays can be nested similarly, placing smaller arrays into larger ones, in the following sense:
Array A can be nested inside Array B if:
min(array A) > min(array B)
max(array A) < max(array B)
For example, if A = [2, 3, 9, 5] and B = [10, 2, 1], then A can be nested inside B, since:
min(A) = 2 > 1 = min(B) and
max(A) = 9 < 10 = max(B)
Create a function that returns true if every single sub-array inside an array can be nested Matroyshka style, and false otherwise.
Examples
matryoshka([[2, 2, 7], [3, 4, 5, 6], [4, 5]]) ➞ true
// [4, 5] nested inside [3, 4, 5, 6], [3, 4, 5, 6] nested inside [2, 2, 7], etc.
// Dolls nested from largest to smallest.
matryoshka([[4, 5], [6, 3], [7, 6, 5, 4, 3, 2], [8, 1]]) ➞ true
// Dolls nested from smallest to largest.
matryoshka([[7, 1], [7, 6, 5, 4, 3, 2], [6, 3], [4, 5]]) ➞ false
// [7, 1] and [7, 6, 5, 4, 3, 2] share the same max.
// Second doll cannot be nested properly inside first doll.
matryoshka([[1, 5], [2, 6], [3, 7]]) ➞ false
// Elements are overlapping, cannot be nested.
Notes
Subarrays can be nested from smallest to largest or largest to smallest.
Elements must be strictly nested - e.g. no two arrays can share either the same MAX or the same MIN (see example #3).
Subarrays may not necessarily have unique elements (see example #1).
Subarrays can be in any order (see example #2).
*/
package main
import (
"math"
"sort"
)
func main() {
assert(matryoshka([][]int{{2, 3, 9, 5}, {10, 2, 1}}) == true)
assert(matryoshka([][]int{{2, 2, 7}, {3, 4, 5, 6}, {4, 5}}) == true)
assert(matryoshka([][]int{{4, 5}, {6, 3}, {7, 6, 5, 4, 3, 2}, {8, 1}}) == true)
assert(matryoshka([][]int{{7, 1}, {7, 6, 5, 4, 3, 2}, {6, 3}, {4, 5}}) == false)
assert(matryoshka([][]int{{1, 5}, {2, 6}, {3, 7}}) == false)
assert(matryoshka([][]int{{1, 2, 3, 4, 5, 6, 7, 8}, {2, 3, 4, 5, 6, 7}, {3, 4, 5, 6}, {4, 5}}) == true)
assert(matryoshka([][]int{{4, 5}, {2, 6}, {1, 9}, {-5, 10, 11}}) == true)
assert(matryoshka([][]int{{3, 3}, {4, 4}, {5, 5, 5}}) == false)
assert(matryoshka([][]int{{1, 1, 1, 1, 2}, {3, 2, 5, 0}, {-1, 500}}) == true)
assert(matryoshka([][]int{{1, 8}, {2, 3, 4, 5, 6, 7}, {3, 6}, {4, 5}}) == true)
assert(matryoshka([][]int{{6, 2}, {9, 4}}) == false)
assert(matryoshka([][]int{{7, 1}, {7, 6, 5, 4, 3, 2}, {6, 3}, {4, 5}}) == false)
assert(matryoshka([][]int{{1, 5}, {2, 6}, {3, 7}}) == false)
assert(matryoshka([][]int{{5, 1}, {4, 2}, {4, 2}}) == false)
assert(matryoshka([][]int{{5, 1}, {4, 2}, {3, 3}}) == true)
assert(matryoshka([][]int{{5, 1}, {4, 2}, {3, 3, 3, 3, 3, 3, 3}}) == true)
assert(matryoshka([][]int{{1, 2, 3, 4}, {2, 3, 4}}) == false)
assert(matryoshka([][]int{{1, 2, 3, 4}, {2, 3}}) == true)
assert(matryoshka([][]int{{1, 50, 100}, {3, 25, 75}, {10, 40, 50}, {25, 45}}) == true)
assert(matryoshka([][]int{{1, 50, 100}, {3, 25, 75}, {10, 40, 50}, {25, 55}}) == false)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func matryoshka(a [][]int) bool {
r := make([][2]int, len(a))
for i := range a {
r[i][0] = math.MaxInt32
r[i][1] = math.MinInt32
for j := range a[i] {
r[i][0] = min(r[i][0], a[i][j])
r[i][1] = max(r[i][1], a[i][j])
}
}
sort.Slice(r, func(i, j int) bool {
return r[i][1] < r[j][1]
})
for i := 1; i < len(r); i++ {
if !(r[i-1][0] > r[i][0] && r[i-1][1] < r[i][1]) {
return false
}
}
return true
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
|
/*
Copyright © 2020 The PES Open Source Team pesos@pes.edu
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package process
import (
"context"
"time"
proc "github.com/shirou/gopsutil/process"
)
// Serve serves data on a per process basis
func Serve(process *Process, dataChannel chan *Process, ctx context.Context, refreshRate int32) error {
for {
select {
case <-ctx.Done():
return ctx.Err() // Stop execution if end signal received
default:
process.UpdateProcInfo()
dataChannel <- process
time.Sleep(time.Duration(refreshRate) * time.Millisecond)
}
}
}
func ServeProcs(dataChannel chan []*proc.Process, ctx context.Context, refreshRate int32) error {
for {
select {
case <-ctx.Done():
return ctx.Err() // Stop execution if end signal received
default:
procs, err := proc.Processes()
if err == nil {
dataChannel <- procs
time.Sleep(time.Duration(refreshRate) * time.Millisecond)
}
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.