text stringlengths 11 4.05M |
|---|
//go:build darwin
// +build darwin
package darwin
/*
#cgo CFLAGS: -x objective-c
#cgo LDFLAGS: -framework Foundation -framework Cocoa -framework WebKit -framework AppKit
#import <Foundation/Foundation.h>
#include <AppKit/AppKit.h>
#include <stdlib.h>
#import "Application.h"
#import "WailsContext.h"
typedef struct Screen {
int isCurrent;
int isPrimary;
int height;
int width;
} Screen;
int GetNumScreens(){
return [[NSScreen screens] count];
}
int screenUniqueID(NSScreen *screen){
// adapted from https://stackoverflow.com/a/1237490/4188138
NSDictionary* screenDictionary = [screen deviceDescription];
NSNumber* screenID = [screenDictionary objectForKey:@"NSScreenNumber"];
CGDirectDisplayID aID = [screenID unsignedIntValue];
return aID;
}
Screen GetNthScreen(int nth, void *inctx){
WailsContext *ctx = (__bridge WailsContext*) inctx;
NSArray<NSScreen *> *screens = [NSScreen screens];
NSScreen* nthScreen = [screens objectAtIndex:nth];
NSScreen* currentScreen = [ctx getCurrentScreen];
Screen returnScreen;
returnScreen.isCurrent = (int)(screenUniqueID(currentScreen)==screenUniqueID(nthScreen));
// TODO properly handle screen mirroring
// from apple documentation:
// https://developer.apple.com/documentation/appkit/nsscreen/1388393-screens?language=objc
// The screen at index 0 in the returned array corresponds to the primary screen of the user’s system. This is the screen that contains the menu bar and whose origin is at the point (0, 0). In the case of mirroring, the first screen is the largest drawable display; if all screens are the same size, it is the screen with the highest pixel depth. This primary screen may not be the same as the one returned by the mainScreen method, which returns the screen with the active window.
returnScreen.isPrimary = nth==0;
returnScreen.height = (int) nthScreen.frame.size.height;
returnScreen.width = (int) nthScreen.frame.size.width;
return returnScreen;
}
*/
import "C"
import (
"github.com/wailsapp/wails/v2/internal/frontend"
"unsafe"
)
func GetAllScreens(wailsContext unsafe.Pointer) ([]frontend.Screen, error) {
err := error(nil)
screens := []frontend.Screen{}
numScreens := int(C.GetNumScreens())
for screeNum := 0; screeNum < numScreens; screeNum++ {
screenNumC := C.int(screeNum)
cScreen := C.GetNthScreen(screenNumC, wailsContext)
screen := frontend.Screen{
Height: int(cScreen.height),
Width: int(cScreen.width),
IsCurrent: cScreen.isCurrent == C.int(1),
IsPrimary: cScreen.isPrimary == C.int(1),
}
screens = append(screens, screen)
}
return screens, err
}
|
package gui
import (
"fmt"
"github.com/jesseduffield/gocui"
"github.com/jesseduffield/lazynpm/pkg/commands"
"github.com/jesseduffield/lazynpm/pkg/gui/presentation"
)
// list panel functions
func (gui *Gui) getSelectedTarball() *commands.Tarball {
tarballs := gui.State.Tarballs
if len(tarballs) == 0 {
return nil
}
return tarballs[gui.State.Panels.Tarballs.SelectedLine]
}
func (gui *Gui) handleTarballSelect(g *gocui.Gui, v *gocui.View) error {
if !gui.showTarballsView() {
// we hide the tarball view when there are no tarballs
if err := gui.switchFocus(nil, gui.getScriptsView()); err != nil {
return err
}
}
tarball := gui.getSelectedTarball()
if tarball == nil {
return nil
}
gui.renderString("secondary", presentation.TarballSummary(tarball))
gui.activateContextView(tarball.ID())
return nil
}
func (gui *Gui) selectedTarballID() string {
tarball := gui.getSelectedTarball()
if tarball == nil {
return ""
}
return tarball.ID()
}
func (gui *Gui) wrappedTarballHandler(f func(*commands.Tarball) error) func(*gocui.Gui, *gocui.View) error {
return gui.wrappedHandler(func() error {
tarball := gui.getSelectedTarball()
if tarball == nil {
return nil
}
return gui.finalStep(f(tarball))
})
}
func (gui *Gui) handleDeleteTarball(tarball *commands.Tarball) error {
return gui.createConfirmationPanel(createConfirmationPanelOpts{
returnToView: gui.getTarballsView(),
returnFocusOnClose: true,
title: "Remove tarball",
prompt: fmt.Sprintf("are you sure you want to delete `%s`?", tarball.Name),
handleConfirm: func() error {
return gui.finalStep(gui.OSCommand.Remove(tarball.Path))
},
})
}
func (gui *Gui) handleInstallTarball(tarball *commands.Tarball) error {
cmdStr := fmt.Sprintf("npm install %s", tarball.Name)
return gui.newMainCommand(cmdStr, tarball.ID(), newMainCommandOptions{})
}
func (gui *Gui) handlePublishTarball(tarball *commands.Tarball) error {
// saying scoped: true because that forces us to specify whether we want to publish
// as public or restricted. Can't know whether it's a scoped tarball just from
// the name because the @ is missing
return gui.handlePublish(tarball.Name, true, tarball.ID())
}
func (gui *Gui) showTarballsView() bool {
return len(gui.State.Tarballs) > 0
}
|
//
// 插入排序
// 空间复杂度: O(1)
// 时间复杂度: O(n^2)
// https://zh.wikipedia.org/wiki/%E6%8F%92%E5%85%A5%E6%8E%92%E5%BA%8F
//
// cloud@txthinking.com
//
package main
import (
"fmt"
)
var a []int = []int{8,3,4,2,8,5,10}
func main(){
fmt.Println(a)
var i,j int
for i=1;i<len(a);i++{
tmp := a[i]
for j=i-1;j>=0;j--{
if a[j] > tmp{
a[j+1] = a[j]
}else{
break
}
}
a[j+1] = tmp
fmt.Println(a)
}
fmt.Println(a)
}
|
package main
import (
"bytes"
"flag"
"fmt"
"strings"
res "github.com/antlr/antlr4/doc/resources"
"github.com/antlr/antlr4/runtime/Go/antlr"
gen "github.com/er1c-zh/sql-to-gorm/antlr4_gen"
)
var (
path string
_package string
)
func Init() {
flag.StringVar(&path, "file", "", "path to sql file")
flag.StringVar(&_package, "package", "models", "go file package")
flag.Parse()
}
func main() {
Init()
input, err := antlr.NewFileStream(path)
if err != nil {
fmt.Printf("NewFileStream fail: %s", err.Error())
flag.Usage()
return
}
lexer := gen.NewMySqlLexer(res.NewCaseChangingStream(input, true))
stream := antlr.NewCommonTokenStream(lexer, 0)
p := gen.NewMySqlParser(stream)
// p.AddErrorListener(antlr.NewDiagnosticErrorListener(true))
p.BuildParseTrees = true
option := DefaultOption()
option.Package = _package
ln := NewListener(option)
antlr.ParseTreeWalkerDefault.Walk(ln, p.Root())
fmt.Printf("%s", ln.ToGorm())
}
// GoModelFile model file
type GoModelFile struct {
TableList []*Table
Import map[string]interface{}
Package string
}
func (f GoModelFile) ToGorm() string {
buf := new(bytes.Buffer)
pkg := f.Package
if pkg == "" {
pkg = "models"
}
buf.WriteString(fmt.Sprintf("package %s\n", pkg))
if len(f.Import) > 0 {
buf.WriteString("import (\n")
for _import := range f.Import {
buf.WriteString(fmt.Sprintf(" \"%s\"\n", _import))
}
buf.WriteString(")\n")
}
for _, t := range f.TableList {
buf.WriteString(t.ToGorm())
buf.WriteString("\n")
}
return buf.String()
}
type Table struct {
Name string
Cols []*Col
}
func (t Table) ToGorm() string {
buf := new(bytes.Buffer)
buf.WriteByte('\n')
buf.WriteString(fmt.Sprintf("type %s struct {\n", snakeToCamel(t.Name)))
cols := make([]string, 0, len(t.Cols))
for _, col := range t.Cols {
cols = append(cols, col.ToGorm())
}
buf.WriteString(strings.Join(cols, "\n"))
buf.WriteString("\n}\n")
return buf.String()
}
type Col struct {
Name string
DataType string
NotNull bool
Default string
Comment string
}
func (c Col) ToGorm() string {
comment := c.Name
if c.Comment != "" {
comment = c.Comment
}
tagList := make([]string, 0, 1)
tagList = append(tagList, fmt.Sprintf("column:%s", c.Name))
if c.Default != "" {
tagList = append(tagList, fmt.Sprintf("default:%s", c.Default))
}
return fmt.Sprintf(" %s %s `gorm:\"%s\"` //%s",
snakeToCamel(c.Name), c.DataType, strings.Join(tagList, ";"), comment)
}
type Listener struct {
*gen.BaseMySqlParserListener
CurrentTable *Table
CurrentCol *Col
GoModelFile
}
type Option struct {
Package string
}
func DefaultOption() Option {
return Option{
Package: "models",
}
}
func NewListener(option Option) *Listener {
ln := &Listener{
GoModelFile: GoModelFile{
Import: map[string]interface{}{},
},
}
ln.Package = option.Package
return ln
}
func (l *Listener) EnterColumnCreateTable(ctx *gen.ColumnCreateTableContext) {
if l.CurrentTable != nil {
panic("last table not done")
}
tableNameList := strings.Split(
strings.Trim(ctx.TableName().GetText(), "`"), ".")
l.CurrentTable = &Table{
Name: tableNameList[len(tableNameList)-1],
}
}
func (l *Listener) ExitColumnCreateTable(ctx *gen.ColumnCreateTableContext) {
l.TableList = append(l.TableList, l.CurrentTable)
l.CurrentTable = nil
}
func (l *Listener) EnterColumnDeclaration(ctx *gen.ColumnDeclarationContext) {
if l.CurrentCol != nil {
panic("last col not done")
}
l.CurrentCol = &Col{
Name: strings.Trim(ctx.Uid().GetText(), "`"),
NotNull: false,
Default: "",
}
}
func (l *Listener) ExitColumnDeclaration(ctx *gen.ColumnDeclarationContext) {
if l.CurrentTable == nil {
panic("col done but no table")
}
l.CurrentTable.Cols = append(l.CurrentTable.Cols, l.CurrentCol)
l.CurrentCol = nil
}
/////////////////////////////////////////////
// string ///////////////////////////////////
/////////////////////////////////////////////
func (l *Listener) EnterStringDataType(c *gen.StringDataTypeContext) {
l.ParseDataType(c.GetTypeName().GetText(), []Rule{
{contain: "", _type: "string"},
})
}
func (l *Listener) EnterNationalStringDataType(c *gen.NationalStringDataTypeContext) {
l.ParseDataType(c.GetTypeName().GetText(), []Rule{
{contain: "", _type: "string"},
})
}
func (l *Listener) EnterNationalVaryingStringDataType(c *gen.NationalVaryingStringDataTypeContext) {
l.ParseDataType(c.GetTypeName().GetText(), []Rule{
{contain: "", _type: "string"},
})
}
/////////////////////////////////////////////
// Dimension ////////////////////////////////
/////////////////////////////////////////////
func (l *Listener) EnterDimensionDataType(c *gen.DimensionDataTypeContext) {
l.ParseDataType(c.GetTypeName().GetText(), []Rule{
{contain: "int", _type: "int64"},
{contain: "timestamp", _type: "int64"},
{contain: "datetime", _type: "time.Time{}", repo: []string{"time"}},
{contain: "year", _type: "time.Time{}", repo: []string{"time"}},
{contain: "", _type: "float64"},
})
}
/////////////////////////////////////////////
// simple data type//////////////////////////
/////////////////////////////////////////////
func (l *Listener) EnterSimpleDataType(c *gen.SimpleDataTypeContext) {
l.ParseDataType(c.GetTypeName().GetText(), []Rule{
{contain: "date", _type: "time.Time{}", repo: []string{"time"}},
{contain: "bool", _type: "bool"},
{contain: "serial", _type: "int64"},
{contain: "", _type: "string"},
})
}
/////////////////////////////////////////////
// collection data type//////////////////////
/////////////////////////////////////////////
func (l *Listener) EnterCollectionDataType(c *gen.CollectionDataTypeContext) {
l.ParseDataType(c.GetTypeName().GetText(), []Rule{
{contain: "", _type: "string"},
})
}
/////////////////////////////////////////////
// spatial data type/////////////////////////
/////////////////////////////////////////////
func (l *Listener) EnterSpatialDataType(c *gen.SpatialDataTypeContext) {
l.ParseDataType(c.GetTypeName().GetText(), []Rule{
{contain: "", _type: "string"},
})
}
/////////////////////////////////////////////
// long varchar data type////////////////////
/////////////////////////////////////////////
func (l *Listener) EnterLongVarcharDataType(c *gen.LongVarcharDataTypeContext) {
l.ParseDataType(c.GetTypeName().GetText(), []Rule{
{contain: "", _type: "string"},
})
}
/////////////////////////////////////////////
// long varbinary data type//////////////////
/////////////////////////////////////////////
func (l *Listener) EnterLongVarbinaryDataType(c *gen.LongVarbinaryDataTypeContext) {
l.ParseDataType(c.GetText(), []Rule{
{contain: "", _type: "string"},
})
}
/////////////////////////////////////////////
// comment //////////////////////////////////
/////////////////////////////////////////////
func (l *Listener) EnterCommentColumnConstraint(c *gen.CommentColumnConstraintContext) {
if l.CurrentCol == nil {
return
}
l.CurrentCol.Comment = c.STRING_LITERAL().GetText()
}
/////////////////////////////////////////////
// default //////////////////////////////////
/////////////////////////////////////////////
func (l *Listener) EnterDefaultColumnConstraint(c *gen.DefaultColumnConstraintContext) {
if l.CurrentCol == nil {
return
}
l.CurrentCol.Default = c.DefaultValue().GetText()
}
/////////////////////////////////////////////
/////////////////////////////////////////////
/////////////////////////////////////////////
func (l *Listener) SetDataType(_t string) {
if l.CurrentCol == nil {
fmt.Printf("[WARN] get data type but no col: %s", _t)
return
}
l.CurrentCol.DataType = _t
}
type Rule struct {
contain string
_type string
repo []string
}
func (l *Listener) ParseDataType(_t string, rule []Rule) {
typeName := strings.ToLower(_t)
f := func(src string, contain string, _type string, repo []string) bool {
if strings.Contains(src, contain) {
l.SetDataType(_type)
if len(repo) > 0 {
for _, _import := range repo {
l.Import[_import] = struct{}{}
}
}
return true
}
return false
}
for _, item := range rule {
if f(typeName, item.contain, item._type, item.repo) {
break
}
}
}
func snakeToCamel(src string) string {
l := strings.Split(src, "_")
for i := 0; i < len(l); i++ {
if len(l[i]) == 0 {
continue
}
leading := strings.ToUpper(l[i][0:1])
l[i] = leading + l[i][1:]
}
return strings.Join(l, "")
}
|
package cmd
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
"github.com/brainicorn/skelp/skelputil"
)
func TestSkelpCmdError(t *testing.T) {
code := Execute([]string{"badcommand"}, nil)
if code == 0 {
t.Errorf("execute should have errored ")
}
}
func TestSkelpCmdUserError(t *testing.T) {
code := Execute([]string{"apply"}, nil)
if code == 0 {
t.Errorf("execute should have errored ")
}
}
func TestSkelpCmdBadHomedir(t *testing.T) {
out := new(bytes.Buffer)
code := Execute([]string{"alias", "list", "--no-color", "--homedir", "/does.not.exist"}, out)
if code == 0 {
t.Errorf("execute should have errored ")
}
lines := strings.Split(out.String(), "\n")
if !strings.HasSuffix(lines[0], "is not a valid path for --homedir flag") {
fmt.Println(lines[0])
t.Errorf("alias error does not match")
}
}
func TestSkelpCmdSkelpdirOVerride(t *testing.T) {
out := new(bytes.Buffer)
tmpDir, _ := ioutil.TempDir("", "custom-home")
defer os.RemoveAll(tmpDir)
code := Execute([]string{"alias", "list", "--no-color", "--homedir", tmpDir, "--skelpdir", "customdir"}, out)
if code != 0 {
t.Errorf("execute should not have errored ")
}
if !skelputil.PathExists(filepath.Join(tmpDir, "customdir")) {
t.Errorf("custom skepdir should exist: %s", filepath.Join(tmpDir, "customdir"))
}
}
|
package objectstorage
import (
"sync/atomic"
)
type StorableObjectFlags struct {
persist atomic.Bool
delete atomic.Bool
modified atomic.Bool
}
func (of *StorableObjectFlags) SetModified(modified ...bool) (wasSet bool) {
return of.modified.Swap(len(modified) == 0 || modified[0])
}
func (of *StorableObjectFlags) IsModified() bool {
return of.modified.Load()
}
//nolint:predeclared // lets keep this for now
func (of *StorableObjectFlags) Delete(delete ...bool) (wasSet bool) {
wasSet = of.delete.Swap(len(delete) == 0 || delete[0])
of.modified.Store(true)
return wasSet
}
func (of *StorableObjectFlags) IsDeleted() bool {
return of.delete.Load()
}
func (of *StorableObjectFlags) Persist(persist ...bool) (wasSet bool) {
if len(persist) == 0 || persist[0] {
wasSet = of.persist.Swap(true)
of.delete.Store(false)
} else {
wasSet = of.persist.Swap(false)
}
return wasSet
}
// ShouldPersist returns "true" if this object is going to be persisted.
func (of *StorableObjectFlags) ShouldPersist() bool {
return of.persist.Load()
}
|
package service
import (
"time"
"errors"
"github.com/gpmgo/gopm/modules/log"
)
type Manager struct {
Match_que chan interface{}
Clear_que chan interface{}
Sequence_que chan interface{}
Source_data_que chan interface{}
}
// global singleton instance
var manager *Manager
// 饿汉单例模式
func init() {
manager = &Manager{
make(chan interface{}),
make(chan interface{}),
make(chan interface{}),
make(chan interface{}),
}
}
func GetInstance() *Manager{
return manager
}
// 发送一个msg到定序系统
// msg : 消息
// tmout : 超时时间
func Send2Senquence(msg interface{}, tmout int) error{
ch := manager.Sequence_que
log.Debug("Send2Senquence msg: %#v", msg)
select {
case ch <- msg:
return nil
case <- time.After(time.Duration(tmout) * time.Second ):
return errors.New("time out.")
}
}
// 发送一个消息到 撮合模塊
// msg : 消息
// tmout : 超时时间
func Send2Match(msg interface{}, tmout int) error{
ch := manager.Match_que
log.Debug("Send2Match msg: %#v", msg)
select {
case ch <- msg:
return nil
case <- time.After(time.Duration(tmout) * time.Second ):
return errors.New("time out.")
}
}
// 发送一个消息到 清算系统
// msg : 消息
// tmout : 超时时间
func Send2Clearing(msg interface{}, tmout int) error{
ch := manager.Clear_que
log.Debug("Send2Clearing msg: %#v", msg)
select {
case ch <- msg:
return nil
case <- time.After(time.Duration(tmout) * time.Second ):
return errors.New("time out.")
}
}
// 发送一个消息到 数据源
// msg : 消息
// tmout : 超时时间
func Send2Source(msg interface{}, tmout int) error{
ch := manager.Source_data_que
log.Debug("Send2Source msg: %#v", msg)
select {
case ch <- msg:
return nil
case <- time.After(time.Duration(tmout) * time.Second ):
return errors.New("time out.")
}
}
|
package api
import (
"log"
"net/http"
"github.com/labstack/echo"
"github.com/labstack/echo/middleware"
"github.com/tuyentv96/go-cloudfunction/handler"
)
var e *echo.Echo
func init() {
// Echo instance
e = echo.New()
// Middleware
e.Use(middleware.Logger())
e.Use(middleware.Recover())
log.Println("initial echo")
}
func Handler(w http.ResponseWriter, r *http.Request) {
// Init article handler
e.GET("/", func(c echo.Context) error {
return c.JSON(200,"hello world")
})
handler.NewArticleHandler(e)
e.ServeHTTP(w, r)
}
//func main() {
// r := mux.NewRouter()
// r.HandleFunc("/", Handler)
//
// log.Fatal(http.ListenAndServe("localhost:8081", r))
//}
|
// Copyright (c) 2018 The MATRIX Authors
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php
package reelection
import (
"github.com/MatrixAINetwork/go-matrix/common"
"github.com/MatrixAINetwork/go-matrix/core/matrixstate"
"github.com/MatrixAINetwork/go-matrix/election/support"
"github.com/MatrixAINetwork/go-matrix/log"
"github.com/MatrixAINetwork/go-matrix/mc"
"github.com/pkg/errors"
)
func GetAllNativeDataForUpdate(electstate mc.ElectGraph, electonline mc.ElectOnlineStatus, top *mc.TopologyGraph) support.AllNative {
mapTopStatus := make(map[common.Address]common.RoleType, 0)
for _, v := range top.NodeList {
mapTopStatus[v.Account] = v.Type
}
native := support.AllNative{}
mapELectStatus := make(map[common.Address]common.RoleType, 0)
for _, v := range electstate.ElectList {
mapELectStatus[v.Account] = v.Type
switch v.Type {
case common.RoleValidator:
native.Master = append(native.Master, v)
case common.RoleBackupValidator:
native.BackUp = append(native.BackUp, v)
case common.RoleCandidateValidator:
native.Candidate = append(native.Candidate, v)
}
}
for _, v := range electonline.ElectOnline {
if v.Position != common.PosOnline { //过滤在线的
continue
}
if _, ok := mapTopStatus[v.Account]; ok == true { //过滤当前不在拓扑图中的
continue
}
if _, ok := mapELectStatus[v.Account]; ok == true { //在初选列表中的
switch mapELectStatus[v.Account] {
case common.RoleValidator:
native.MasterQ = append(native.MasterQ, v.Account)
case common.RoleBackupValidator:
native.BackUpQ = append(native.BackUpQ, v.Account)
case common.RoleCandidateValidator:
native.CandidateQ = append(native.CandidateQ, v.Account)
}
}
}
return native
}
func GetOnlineAlter(offline []common.Address, online []common.Address, electonline mc.ElectOnlineStatus) []mc.Alternative {
ans := []mc.Alternative{}
mappOnlineStatus := make(map[common.Address]uint16)
for _, v := range electonline.ElectOnline {
mappOnlineStatus[v.Account] = v.Position
}
for _, v := range offline {
if _, ok := mappOnlineStatus[v]; ok == false {
log.Error(Module, "计算下线节点的alter时 下线节点不在初选列表中 账户", v.String())
continue
}
if mappOnlineStatus[v] == common.PosOffline {
log.Error(Module, "该节点已处于下线阶段 不需要上块 账户", v.String())
continue
}
temp := mc.Alternative{
A: v,
Position: common.PosOffline,
}
ans = append(ans, temp)
}
for _, v := range online {
if _, ok := mappOnlineStatus[v]; ok == false {
log.Error(Module, "计算上线节点的alter时 上线节点不在初选列表中 账户", v.String())
continue
}
if mappOnlineStatus[v] == common.PosOnline {
log.Error(Module, "该节点已处于上线阶段,不需要上块 账户", v.String())
continue
}
temp := mc.Alternative{
A: v,
Position: common.PosOnline,
}
ans = append(ans, temp)
}
log.Info(Module, "计算上下线节点结果 online", online, "offline", offline, "ans", ans)
return ans
}
func (self *ReElection) TopoUpdate(allNative support.AllNative, top *mc.TopologyGraph, hash common.Hash) ([]mc.Alternative, error) {
elect, err := self.GetElectPlug(hash)
if err != nil {
log.Error(Module, "获取选举插件")
return []mc.Alternative{}, err
}
st, err := self.bc.StateAtBlockHash(hash)
if err != nil {
log.Error(Module, "get state by height err", err, "hash", hash)
return nil, err
}
electInfo, err := matrixstate.GetElectConfigInfo(st)
if err != nil || electInfo == nil {
log.Error("GetElectInfo", "获取选举基础信息失败 err", err)
return nil, err
}
allNative.ElectInfo = electInfo
return elect.ToPoUpdate(allNative, top), nil
}
func (self *ReElection) LastMinerGenTimeStamp(height uint64, types common.RoleType, hash common.Hash) (uint64, error) {
data, err := self.GetElectGenTimes(hash)
if err != nil {
log.Error(Module, "获取配置文件失败 err", err)
return 0, err
}
minerGenTime := uint64(data.MinerNetChange)
validatorGenTime := uint64(data.ValidatorNetChange)
bcInterval, err := self.GetBroadcastIntervalByHash(hash)
if err != nil {
log.Error(Module, "根据hash获取广播周期信息 err", err)
return 0, err
}
switch types {
case common.RoleMiner:
return bcInterval.GetNextReElectionNumber(height) - minerGenTime, nil
default:
return bcInterval.GetNextReElectionNumber(height) - validatorGenTime, nil
}
}
func (self *ReElection) GetNextElectNodeInfo(electGraph *mc.ElectGraph, types common.RoleType) (master []mc.ElectNodeInfo, backup []mc.ElectNodeInfo, cand []mc.ElectNodeInfo, err error) {
if electGraph == nil {
err = errors.New("param elect graph is nil")
return
}
master = []mc.ElectNodeInfo{}
backup = []mc.ElectNodeInfo{}
cand = []mc.ElectNodeInfo{}
switch types {
case common.RoleMiner:
size := len(electGraph.NextMinerElect)
if size != 0 {
master = make([]mc.ElectNodeInfo, size, size)
if copy(master, electGraph.NextMinerElect) != size {
err = errors.New("copy next miner graph err")
return
}
}
case common.RoleValidator:
for _, v := range electGraph.NextValidatorElect {
switch v.Type {
case common.RoleValidator:
master = append(master, v)
case common.RoleBackupValidator:
backup = append(backup, v)
case common.RoleCandidateValidator:
cand = append(cand, v)
}
}
}
return master, backup, cand, nil
}
|
// Package block implements efficient storage and transfer of arbitrary blocks of data.
package block
/*
TODO(refactor): package name `block` is easily confused with github.com/ipfs/go-block-format.
*/
import (
"context"
"io"
"go.uber.org/fx"
"github.com/ipfs/go-bitswap"
"github.com/ipfs/go-bitswap/network"
"github.com/ipfs/go-blockservice"
"github.com/ipfs/go-datastore"
blockstore "github.com/ipfs/go-ipfs-blockstore"
exchange "github.com/ipfs/go-ipfs-exchange-interface"
"github.com/libp2p/go-libp2p-core/host"
"github.com/libp2p/go-libp2p-core/routing"
)
// Config for block services.
type Config struct {
fx.In
Host host.Host
DHT routing.Routing
/*
TODO(performance): pass in some sort of persistent implementation (badgerdb?)
Currently there is a single memory-backed datastore that gets passed in here.
Investigate the possibility of maintaining two separate datastores:
- MapDatastore for DHT & other volatile data
- Persistent datastore for block data
Resist the urge to optimize this prematurely. It's unclear whether this will
work without heavy modification to IPFS. (e.g.: what happens after a restart?
Will the DHT be automatically populated from the bitswap exchange?)
*/
Store datastore.Batching
}
// Module contains primitives for working with blocks of data.
type Module struct {
fx.Out
Service blockservice.BlockService
GCLocker blockstore.GCLocker
}
// New .
func New(ctx context.Context, cfg Config, lx fx.Lifecycle) (mod Module, err error) {
bs := blockstore.NewBlockstore(cfg.Store)
/*
TODO(performance): investigate persistent blockstore with caching (see below)
*/
// if bs, err = blockstore.CachedBlockstore(ctx, bs, blockstore.DefaultCacheOpts()); err != nil {
// return
// }
mod.GCLocker = blockstore.NewGCLocker()
exc := bitswap.New(ctx,
network.NewFromIpfsHost(cfg.Host, cfg.DHT),
blockstore.NewGCBlockstore(bs, mod.GCLocker),
bitswap.ProvideEnabled(true),
).(exchange.SessionExchange)
lx.Append(closehook(exc))
mod.Service = blockservice.New(bs, exc)
return
}
func closehook(c io.Closer) fx.Hook {
return fx.Hook{
OnStop: func(context.Context) error {
return c.Close()
},
}
}
|
package odoo
import (
"fmt"
)
// MailTestSimple represents mail.test.simple model.
type MailTestSimple struct {
LastUpdate *Time `xmlrpc:"__last_update,omptempty"`
CreateDate *Time `xmlrpc:"create_date,omptempty"`
CreateUid *Many2One `xmlrpc:"create_uid,omptempty"`
Description *String `xmlrpc:"description,omptempty"`
DisplayName *String `xmlrpc:"display_name,omptempty"`
EmailFrom *String `xmlrpc:"email_from,omptempty"`
Id *Int `xmlrpc:"id,omptempty"`
MessageChannelIds *Relation `xmlrpc:"message_channel_ids,omptempty"`
MessageFollowerIds *Relation `xmlrpc:"message_follower_ids,omptempty"`
MessageIds *Relation `xmlrpc:"message_ids,omptempty"`
MessageIsFollower *Bool `xmlrpc:"message_is_follower,omptempty"`
MessageLastPost *Time `xmlrpc:"message_last_post,omptempty"`
MessageNeedaction *Bool `xmlrpc:"message_needaction,omptempty"`
MessageNeedactionCounter *Int `xmlrpc:"message_needaction_counter,omptempty"`
MessagePartnerIds *Relation `xmlrpc:"message_partner_ids,omptempty"`
MessageUnread *Bool `xmlrpc:"message_unread,omptempty"`
MessageUnreadCounter *Int `xmlrpc:"message_unread_counter,omptempty"`
Name *String `xmlrpc:"name,omptempty"`
WebsiteMessageIds *Relation `xmlrpc:"website_message_ids,omptempty"`
WriteDate *Time `xmlrpc:"write_date,omptempty"`
WriteUid *Many2One `xmlrpc:"write_uid,omptempty"`
}
// MailTestSimples represents array of mail.test.simple model.
type MailTestSimples []MailTestSimple
// MailTestSimpleModel is the odoo model name.
const MailTestSimpleModel = "mail.test.simple"
// Many2One convert MailTestSimple to *Many2One.
func (mts *MailTestSimple) Many2One() *Many2One {
return NewMany2One(mts.Id.Get(), "")
}
// CreateMailTestSimple creates a new mail.test.simple model and returns its id.
func (c *Client) CreateMailTestSimple(mts *MailTestSimple) (int64, error) {
ids, err := c.CreateMailTestSimples([]*MailTestSimple{mts})
if err != nil {
return -1, err
}
if len(ids) == 0 {
return -1, nil
}
return ids[0], nil
}
// CreateMailTestSimple creates a new mail.test.simple model and returns its id.
func (c *Client) CreateMailTestSimples(mtss []*MailTestSimple) ([]int64, error) {
var vv []interface{}
for _, v := range mtss {
vv = append(vv, v)
}
return c.Create(MailTestSimpleModel, vv)
}
// UpdateMailTestSimple updates an existing mail.test.simple record.
func (c *Client) UpdateMailTestSimple(mts *MailTestSimple) error {
return c.UpdateMailTestSimples([]int64{mts.Id.Get()}, mts)
}
// UpdateMailTestSimples updates existing mail.test.simple records.
// All records (represented by ids) will be updated by mts values.
func (c *Client) UpdateMailTestSimples(ids []int64, mts *MailTestSimple) error {
return c.Update(MailTestSimpleModel, ids, mts)
}
// DeleteMailTestSimple deletes an existing mail.test.simple record.
func (c *Client) DeleteMailTestSimple(id int64) error {
return c.DeleteMailTestSimples([]int64{id})
}
// DeleteMailTestSimples deletes existing mail.test.simple records.
func (c *Client) DeleteMailTestSimples(ids []int64) error {
return c.Delete(MailTestSimpleModel, ids)
}
// GetMailTestSimple gets mail.test.simple existing record.
func (c *Client) GetMailTestSimple(id int64) (*MailTestSimple, error) {
mtss, err := c.GetMailTestSimples([]int64{id})
if err != nil {
return nil, err
}
if mtss != nil && len(*mtss) > 0 {
return &((*mtss)[0]), nil
}
return nil, fmt.Errorf("id %v of mail.test.simple not found", id)
}
// GetMailTestSimples gets mail.test.simple existing records.
func (c *Client) GetMailTestSimples(ids []int64) (*MailTestSimples, error) {
mtss := &MailTestSimples{}
if err := c.Read(MailTestSimpleModel, ids, nil, mtss); err != nil {
return nil, err
}
return mtss, nil
}
// FindMailTestSimple finds mail.test.simple record by querying it with criteria.
func (c *Client) FindMailTestSimple(criteria *Criteria) (*MailTestSimple, error) {
mtss := &MailTestSimples{}
if err := c.SearchRead(MailTestSimpleModel, criteria, NewOptions().Limit(1), mtss); err != nil {
return nil, err
}
if mtss != nil && len(*mtss) > 0 {
return &((*mtss)[0]), nil
}
return nil, fmt.Errorf("mail.test.simple was not found with criteria %v", criteria)
}
// FindMailTestSimples finds mail.test.simple records by querying it
// and filtering it with criteria and options.
func (c *Client) FindMailTestSimples(criteria *Criteria, options *Options) (*MailTestSimples, error) {
mtss := &MailTestSimples{}
if err := c.SearchRead(MailTestSimpleModel, criteria, options, mtss); err != nil {
return nil, err
}
return mtss, nil
}
// FindMailTestSimpleIds finds records ids by querying it
// and filtering it with criteria and options.
func (c *Client) FindMailTestSimpleIds(criteria *Criteria, options *Options) ([]int64, error) {
ids, err := c.Search(MailTestSimpleModel, criteria, options)
if err != nil {
return []int64{}, err
}
return ids, nil
}
// FindMailTestSimpleId finds record id by querying it with criteria.
func (c *Client) FindMailTestSimpleId(criteria *Criteria, options *Options) (int64, error) {
ids, err := c.Search(MailTestSimpleModel, criteria, options)
if err != nil {
return -1, err
}
if len(ids) > 0 {
return ids[0], nil
}
return -1, fmt.Errorf("mail.test.simple was not found with criteria %v and options %v", criteria, options)
}
|
package octo
/**
* Storing team logs and other team state.
*/
import (
"appengine"
"appengine/datastore"
// "log"
"time"
)
// Basic info about each team. There is one of these records per team.
type TeamRecord struct {
ID string
Created time.Time
LastSeen time.Time
EmailList []string
Password string `datastore:",noindex"`
Description string `datastore:",noindex"`
Tags string `datastore:",noindex"` // JSON-encoded map of "experience points"
Badges string `datastore:",noindex"` // JSON-encoded achievements unlocked
AnnounceOK int
}
// How is this team doing with this Activity? There is one of these records
// for each team/activity pair (or no such record if team hasn't interacted
// w/this activity yet).
type TAStateRecord struct {
TeamID string
ActID string
SolvedP bool // Did they solve it?
Hints int // How many hints did they "buy"?
}
// What has this team done? A team will create many of these logs as it
// is created, logs in, makes guesses, sovlves puzzles, asks for hints...
type TLogRecord struct {
Created time.Time
TeamID string
ActID string
Verb string // "reg", "login", "guess", "hint", "resetpsswd", ...
Guess string `datastore:",noindex"`
Hint int `datastore:",noindex"`
Notes string `datastore:",noindex"`
}
func TLog(context appengine.Context, teamID string, actID string, verb string, notes string) error {
t := TLogRecord{
Created: time.Now(),
TeamID: teamID,
ActID: actID,
Verb: verb,
Notes: notes,
}
_, err := datastore.Put(context, datastore.NewIncompleteKey(context, "TLog", nil), &t)
// update team.LastSeen
if err == nil && verb == "login" {
datastore.RunInTransaction(context, func(c appengine.Context) error {
key := datastore.NewKey(context, "Team", teamID, 0, nil)
tr := TeamRecord{}
err := datastore.Get(context, key, &tr)
if err == nil {
tr.LastSeen = time.Now()
_, err = datastore.Put(context, key, &tr)
}
return err
}, nil)
}
if err != nil {
context.Errorf("Error writing TLog T %s A %s V %s N %s ERR %s",
teamID, actID, verb, notes, err.Error())
}
return err
}
func TLogGuess(context appengine.Context, teamID string, actID string, verb string, guess string) error {
t := TLogRecord{
Created: time.Now(),
TeamID: teamID,
ActID: actID,
Verb: verb,
Guess: guess,
}
// TODO is this hack useful? I'm seeing "solves" that happen before the
// relevant "guess". So let's add a moment to the solve time:
if verb == "solve" {
t.Created = time.Now().Add(time.Millisecond)
}
_, err := datastore.Put(context, datastore.NewIncompleteKey(context, "TLog", nil), &t)
// update team.LastSeen. But (hack) not if team solved, because in
// that case, we're about to spend a lot of time writing other things
// to datastore. And team.LastSeen doesn't need to be super-accurate
if err == nil && verb != "solve" {
datastore.RunInTransaction(context, func(c appengine.Context) error {
key := datastore.NewKey(context, "Team", teamID, 0, nil)
tr := TeamRecord{}
err := datastore.Get(context, key, &tr)
if err == nil {
tr.LastSeen = time.Now()
_, err = datastore.Put(context, key, &tr)
}
return err
}, nil)
}
if err != nil {
context.Errorf("Error writing TLog T %s A %s V %s G %s ERR %s",
teamID, actID, verb, guess, err.Error())
}
return err
}
func TLogHint(context appengine.Context, teamID string, actID string, hint int) error {
t := TLogRecord{
Created: time.Now(),
TeamID: teamID,
ActID: actID,
Verb: "hint",
Hint: hint,
}
_, err := datastore.Put(context, datastore.NewIncompleteKey(context, "TLog", nil), &t)
if err != nil {
context.Errorf("Error writing TLog T %s A %s V hint H %d ERR %s",
teamID, actID, hint, err.Error())
}
return err
}
// Is this team guessing via a dictionary attack? Let's count their recent
// guesses.
func TLogCountRecentGuesses(context appengine.Context, teamId string) int {
q := datastore.NewQuery("TLog").Order("-Created").Filter("TeamID=", teamId).Filter("Created >", time.Now().Add(time.Minute*time.Duration(-5))).KeysOnly()
count, err := q.Count(context)
if err != nil {
context.Warningf("CountGuesses GET whoops ERR=%s", err.Error())
}
return count
}
func CleanupTeamLogs(context appengine.Context) {
// TODO
}
// Given a team and a set of acts, determine which acts the team
// has not yet unlocked.
func GetLockedActs(context appengine.Context, tid string, actIDs []string) []string {
keys := make([]*datastore.Key, len(actIDs))
tass := make([]TAStateRecord, len(actIDs))
for ix, actID := range actIDs {
keys[ix] = datastore.NewKey(context, "TAState", actID+":"+tid, 0, nil)
}
datastore.GetMulti(context, keys, tass)
var retval []string
for ix, tas := range tass {
if tas.TeamID == "" {
retval = append(retval, actIDs[ix])
}
}
return retval
}
// If team has not already unlocked this act, then unlock it
func UnlockAct(context appengine.Context, tid string, actID string) {
tas := TAStateRecord{}
key := datastore.NewKey(context, "TAState", actID+":"+tid, 0, nil)
datastore.Get(context, key, &tas)
if tas.TeamID == "" {
tas.TeamID = tid
tas.ActID = actID
tas.SolvedP = false
tas.Hints = 0
}
datastore.Put(context, key, &tas)
}
// Sometimes, GC wants a "spreadsheety" view of teams instead of a "log/diary"
// view.
type SummaryElement struct {
SolvedP bool
SolveTime time.Time
Hints int
}
func SummarizeLogs(context appengine.Context) (t map[string](map[string]*SummaryElement)) {
t = map[string](map[string]*SummaryElement){}
// query the logs: we are interested in solves and hint-takings.
q := datastore.NewQuery("TLog").
// would be nice to filter for Verb IN {"hint", "solve"}
Filter("Created >", time.Now().Add(time.Hour*time.Duration(-999))).
Order("-Created")
for iter := q.Run(context); ; {
var tlr TLogRecord
_, err := iter.Next(&tlr)
if err != nil {
break
}
if len(t[tlr.TeamID]) == 0 {
t[tlr.TeamID] = map[string]*SummaryElement{}
}
_, ok := t[tlr.TeamID][tlr.ActID]
if !ok {
t[tlr.TeamID][tlr.ActID] = new(SummaryElement)
}
if tlr.Verb == "hint" {
if t[tlr.TeamID][tlr.ActID].Hints < tlr.Hint {
t[tlr.TeamID][tlr.ActID].Hints = tlr.Hint
}
}
if tlr.Verb == "solve" {
t[tlr.TeamID][tlr.ActID].SolvedP = true
t[tlr.TeamID][tlr.ActID].SolveTime = tlr.Created
}
}
return
}
|
package kata
import (
"fmt"
"strconv"
"strings"
)
// StockList kata: https://www.codewars.com/kata/help-the-bookseller/train/go
func StockList(listArt []string, listCat []string) string {
if len(listCat) == 0 || len(listArt) == 0 {
return ""
}
res := map[string]int{}
out := ""
for _, cat := range listCat {
res[cat] = 0
for _, book := range listArt {
a := strings.Fields(book)
if string(book[0]) == cat {
v, _ := strconv.Atoi(a[1])
res[cat] += v
}
}
out = out + fmt.Sprintf("(%s : %d) - ", cat, res[cat])
}
return strings.Trim(out, " - ")
}
|
package main
import "fmt"
//数组
//存放元素的容器
//必须指定存放的元素的类型和容量
//数组的长度书数组类型的一部分
func main() {
var a1 [3]bool //长度为3的数组[true fales true]
var a2 [4]bool //[true true fales fales]
fmt.Printf("a1:%T a2:%T\n", a1, a2)
//数组的初始化
//如果不初始化:默认元素都是零值(布尔值的默认flase,整型和浮点型都是0,字符串:“控制”)
fmt.Println(a1, a2)
//1.初始化方式1
a1 = [3]bool{true, true, true}
fmt.Println(a1)
//2.初始化方式2
//根据[...]初始值自动推断数组的长度是多少
a10 := [...]int{0, 1, 2, 3, 4, 5}
fmt.Println(a10)
//3.初始化方式3根据索引来初始化
a3 := [5]int{0: 1, 4: 2}
fmt.Println(a3)
//数组的遍历
citys := [...]string{"北京", "上海", "南京"}
//1.根据索引遍历
for i := 0; i < len(citys); i++ {
fmt.Println(citys[i])
}
//多维数组
// [[1,2] [3,4] [5,6]]
var a11 [3][2]int //一共有三个元素数组,每个元素都有长度为2的元素int型数组,数组里只能放相同类型
a11 = [3][2]int{
[2]int{1, 2},
[2]int{3, 4},
[2]int{5, 6},
}
fmt.Println(a11)
}
|
package Variables
import (
"fmt"
"github.com/sodhigagan/MyPractice/I/Constants"
)
func Old() {
fmt.Print(", so that makes me ", 2017-Constants.Year, " years and ", 5-Constants.Month, " month(s) old")
}
|
package stl
type Solid struct {
Name string
Facets []Facet
}
type Facet struct {
Normal Vec3
Vertices [3]Vec3
}
type Vec3 struct {
X, Y, Z float64
}
|
package gopaxos
import (
"fmt"
"github.com/buptmiao/gopaxos/paxospb"
"io/ioutil"
"os"
"strings"
)
type checkpointReceiver struct {
conf *config
logStorage LogStorage
senderNodeID uint64
uuid uint64
sequence uint64
hasInitDirMap map[string]bool
}
func newCheckpointReceiver(conf *config, ls LogStorage) *checkpointReceiver {
return &checkpointReceiver{
conf: conf,
logStorage: ls,
hasInitDirMap: make(map[string]bool),
}
}
func (c *checkpointReceiver) reset() {
c.hasInitDirMap = make(map[string]bool)
c.senderNodeID = nullNode
c.uuid = 0
c.sequence = 0
}
func (c *checkpointReceiver) newReceiver(senderNodeID uint64, uuid uint64) error {
if err := c.clearCheckpointTmp(); err != nil {
return err
}
if err := c.logStorage.ClearAllLog(c.conf.groupIdx); err != nil {
lPLGErr(c.conf.groupIdx, "ClearAllLog fail, groupidx %d, error: %v",
c.conf.getMyGroupIdx(), err)
return err
}
c.hasInitDirMap = make(map[string]bool)
c.senderNodeID = senderNodeID
c.uuid = uuid
c.sequence = 0
return nil
}
func (c *checkpointReceiver) clearCheckpointTmp() error {
logPath := c.logStorage.GetLogStorageDirPath(c.conf.groupIdx)
files, err := ioutil.ReadDir(logPath)
if err != nil {
lPLGErr(c.conf.groupIdx, "read dir failed, error: %v", err)
return err
}
for _, f := range files {
if strings.Contains(f.Name(), "cp_tmp_") {
childpath := fmt.Sprintf("%s/%s", logPath, f.Name())
err = deleteDir(childpath)
if err != nil {
break
}
lPLGHead(c.conf.groupIdx, "rm dir %s done!", childpath)
}
}
return err
}
func (c *checkpointReceiver) isReceiverFinish(senderNodeID uint64, uuid uint64, endSequence uint64) bool {
if senderNodeID == c.senderNodeID && uuid == c.uuid && endSequence == c.sequence+1 {
return true
}
return false
}
func (c *checkpointReceiver) getTmpDirPath(smID int64) string {
logPath := c.logStorage.GetLogStorageDirPath(c.conf.groupIdx)
return fmt.Sprintf("%s/cp_tmp_%d", logPath, smID)
}
func (c *checkpointReceiver) initFilePath(filePath string) (string, error) {
lPLGHead(c.conf.groupIdx, "START filepath %s", filePath)
newPath := "/" + filePath + "/"
tmpList := strings.Split(newPath, "/")
dirList := make([]string, 0, len(tmpList))
for _, v := range tmpList {
if v != "" {
dirList = append(dirList, v)
}
}
formatFilePath := "/"
for i, v := range dirList {
if i+1 == len(dirList) {
formatFilePath += v
} else {
formatFilePath += v + "/"
if _, ok := c.hasInitDirMap[formatFilePath]; !ok {
err := c.createDir(formatFilePath)
if err != nil {
return "", err
}
c.hasInitDirMap[formatFilePath] = true
}
}
}
lPLGImp(c.conf.groupIdx, "ok, format filepath %s", formatFilePath)
return formatFilePath, nil
}
func (c *checkpointReceiver) createDir(dirPath string) error {
if _, err := os.Stat(dirPath); os.IsNotExist(err) {
if err = os.Mkdir(dirPath, 0775); err != nil {
lPLGErr(c.conf.groupIdx, "Create dir fail, path %s", dirPath)
return err
}
}
return nil
}
func (c *checkpointReceiver) receiveCheckpoint(checkpointMsg *paxospb.CheckpointMsg) error {
if checkpointMsg.GetNodeID() != c.senderNodeID || checkpointMsg.GetUUID() != c.uuid {
lPLGErr(c.conf.groupIdx, "msg not valid, Msg.SenderNodeID %d Receiver.SenderNodeID %d Msg.UUID %lu Receiver.UUID %d",
checkpointMsg.GetNodeID(), c.senderNodeID, checkpointMsg.GetUUID(), c.uuid)
return errMsgNotValid
}
if checkpointMsg.GetSequence() == c.sequence {
lPLGErr(c.conf.groupIdx, "msg already receive, skip, Msg.Sequence %d Receiver.Sequence %d",
checkpointMsg.GetSequence(), c.sequence)
return nil
}
if checkpointMsg.GetSequence() != c.sequence+1 {
lPLGErr(c.conf.groupIdx, "msg sequence wrong, Msg.Sequence %d Receiver.Sequence %d",
checkpointMsg.GetSequence(), c.sequence)
return errMsgSequenceWrong
}
filePath := c.getTmpDirPath(checkpointMsg.GetSMID()) + "/" + checkpointMsg.GetFilePath()
formatFilePath, err := c.initFilePath(filePath)
if err != nil {
return err
}
fd, err := os.OpenFile(formatFilePath, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0600)
if err != nil {
lPLGErr(c.conf.groupIdx, "open file fail, filepath %s", formatFilePath)
return err
}
defer fd.Close()
fileOffset, err := fd.Seek(0, os.SEEK_END)
if fileOffset != int64(checkpointMsg.GetOffset()) {
lPLGErr(c.conf.groupIdx, "file.offset %d not equal to msg.offset %d, error: %v", fileOffset, checkpointMsg.GetOffset(), err)
return errFileOffsetMismatch
}
writeLen, err := fd.Write(checkpointMsg.GetBuffer())
if err != nil {
lPLGImp(c.conf.groupIdx, "write fail, writelen %d buffer size %d", writeLen, len(checkpointMsg.GetBuffer()))
return err
}
c.sequence++
lPLGImp(c.conf.groupIdx, "END ok, writelen %d", writeLen)
return nil
}
|
package main
import (
"fmt"
"sort"
)
type Food struct {
Name string
Price int
}
type Foods []Food
func (t Foods) Len() int {
return len(t)
}
func (t Foods) Less(i, j int) bool {
return t[i].Price >= t[j].Price
}
func (t Foods) Swap(i, j int) {
t[i], t[j] = t[j], t[i]
}
func main() {
var foods = []Food{
{"鰻丼並", 750},
{"牛丼並", 380},
{"牛丼頭", 450},
{"鰻丼大", 890},
{"牛丼大", 550},
{"牛丼特", 550},
}
for _, food := range foods {
fmt.Printf("%+v\n", food)
}
sort.Sort(Foods(foods))
for _, food := range foods {
fmt.Printf("商品名: %s, 価格(税込): %d\n", food.Name, food.Price)
}
}
|
package requests
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
"github.com/google/go-querystring/query"
"github.com/atomicjolt/canvasapi"
"github.com/atomicjolt/canvasapi/models"
"github.com/atomicjolt/string_utils"
)
// GetUsersMostRecentlyGradedSubmissions
// https://canvas.instructure.com/doc/api/users.html
//
// Path Parameters:
// # Path.ID (Required) ID
//
// Query Parameters:
// # Query.Include (Optional) . Must be one of assignmentAssociations to include with the group
// # Query.OnlyCurrentEnrollments (Optional) Returns submissions for only currently active enrollments
// # Query.OnlyPublishedAssignments (Optional) Returns submissions for only published assignments
//
type GetUsersMostRecentlyGradedSubmissions struct {
Path struct {
ID string `json:"id" url:"id,omitempty"` // (Required)
} `json:"path"`
Query struct {
Include []string `json:"include" url:"include,omitempty"` // (Optional) . Must be one of assignment
OnlyCurrentEnrollments bool `json:"only_current_enrollments" url:"only_current_enrollments,omitempty"` // (Optional)
OnlyPublishedAssignments bool `json:"only_published_assignments" url:"only_published_assignments,omitempty"` // (Optional)
} `json:"query"`
}
func (t *GetUsersMostRecentlyGradedSubmissions) GetMethod() string {
return "GET"
}
func (t *GetUsersMostRecentlyGradedSubmissions) GetURLPath() string {
path := "users/{id}/graded_submissions"
path = strings.ReplaceAll(path, "{id}", fmt.Sprintf("%v", t.Path.ID))
return path
}
func (t *GetUsersMostRecentlyGradedSubmissions) GetQuery() (string, error) {
v, err := query.Values(t.Query)
if err != nil {
return "", err
}
return v.Encode(), nil
}
func (t *GetUsersMostRecentlyGradedSubmissions) GetBody() (url.Values, error) {
return nil, nil
}
func (t *GetUsersMostRecentlyGradedSubmissions) GetJSON() ([]byte, error) {
return nil, nil
}
func (t *GetUsersMostRecentlyGradedSubmissions) HasErrors() error {
errs := []string{}
if t.Path.ID == "" {
errs = append(errs, "'Path.ID' is required")
}
for _, v := range t.Query.Include {
if v != "" && !string_utils.Include([]string{"assignment"}, v) {
errs = append(errs, "Include must be one of assignment")
}
}
if len(errs) > 0 {
return fmt.Errorf(strings.Join(errs, ", "))
}
return nil
}
func (t *GetUsersMostRecentlyGradedSubmissions) Do(c *canvasapi.Canvas, next *url.URL) ([]*models.Submission, *canvasapi.PagedResource, error) {
var err error
var response *http.Response
if next != nil {
response, err = c.Send(next, t.GetMethod(), nil)
} else {
response, err = c.SendRequest(t)
}
if err != nil {
return nil, nil, err
}
if err != nil {
return nil, nil, err
}
body, err := ioutil.ReadAll(response.Body)
response.Body.Close()
if err != nil {
return nil, nil, err
}
ret := []*models.Submission{}
err = json.Unmarshal(body, &ret)
if err != nil {
return nil, nil, err
}
pagedResource, err := canvasapi.ExtractPagedResource(response.Header)
if err != nil {
return nil, nil, err
}
return ret, pagedResource, nil
}
|
package main
import (
"code.google.com/p/gcfg"
"flag"
"github.com/op/go-logging"
"os/user"
"path/filepath"
)
// Command ling flags
var configFlag = flag.String("c", "", "Use alternative config file")
var verboseFlag = flag.Bool("v", false, "Show verbose debug information")
// Config
var Config struct {
Database struct {
ConnectionString string
}
}
// Logger
var log = logging.MustGetLogger("furiousmustard")
func main() {
// Parse command line flags
flag.Parse()
// Set up logging
var format = logging.MustStringFormatter(" %{level: -8s} %{message}")
logging.SetFormatter(format)
if *verboseFlag {
logging.SetLevel(logging.DEBUG, "furiousmustard")
} else {
logging.SetLevel(logging.INFO, "furiousmustard")
}
log.Info("FuriousMustard starting")
// Find config file
var cfgFile string
if len(*configFlag) > 0 {
cfgFile = *configFlag
} else {
// Default to user homedir for config file
u, err := user.Current()
if err != nil {
log.Fatal(err)
}
cfgFile = filepath.Join(u.HomeDir, ".furiousmustard.conf")
}
// Read config file
log.Debug("Reading config from %s", cfgFile)
err := gcfg.ReadFileInto(&Config, cfgFile)
if err != nil {
log.Fatal(err)
}
}
|
package main
import "fmt"
func main() {
for i := 1; i <= 2; i++ {
for j := 1; j <= 5; j++ {
fmt.Println("On day ", i, " kiss Pidhu ", j, " times")
}
}
}
|
package routes
import (
"os"
"fmt"
"strings"
"net/http"
"path/filepath"
"github.com/go-chi/chi"
"github.com/robert-hansen/goapp/controller"
)
func NewRouter() *chi.Mux {
router := chi.NewRouter()
workDir, _ := os.Getwd()
filesDir := filepath.Join(workDir, "public")
FileServer(router, "/static", http.Dir(filesDir))
router.Get("/", Index)
router.Get("/hello/:name", Hello)
return router
}
func Index(res http.ResponseWriter, req *http.Request) {
controller.Home(res, req)
}
func Hello(res http.ResponseWriter, req *http.Request) {
fmt.Fprintf(res, "Hello ", req.Context().Value("name"))
}
// FileServer conveniently sets up a http.FileServer handler to serve
// static files from a http.FileSystem.
func FileServer(r chi.Router, path string, root http.FileSystem) {
if strings.ContainsAny(path, "{}*") {
panic("FileServer does not permit URL parameters.")
}
fs := http.StripPrefix(path, http.FileServer(root))
if path != "/" && path[len(path)-1] != '/' {
r.Get(path, http.RedirectHandler(path+"/", 301).ServeHTTP)
path += "/"
}
path += "*"
r.Get(path, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fs.ServeHTTP(w, r)
}))
} |
package groupingobjects
import (
"context"
"fmt"
)
type NSGroup struct {
client groupingObjectsAPI
ctx context.Context
id string
name string
}
func NewNSGroup(client groupingObjectsAPI, ctx context.Context, name, id string) NSGroup {
return NSGroup{
client: client,
ctx: ctx,
name: name,
id: id,
}
}
func (n NSGroup) Delete() error {
_, err := n.client.DeleteNSGroup(n.ctx, n.id, map[string]interface{}{})
if err != nil {
return fmt.Errorf("Delete: %s", err)
}
return nil
}
func (n NSGroup) Name() string {
return n.name
}
func (n NSGroup) Type() string {
return "NS Group"
}
|
package main
import (
"fmt"
"time"
)
func main() {
number := make(chan int)
go func() {
number <- 42
}()
time.Sleep(time.Millisecond * 100)
select {
case n := <-number:
fmt.Println(n)
default:
fmt.Println("ничего, пусто, completle nothing")
}
}
|
// Copyright 2020 The Moov Authors
// Use of this source code is governed by an Apache License
// license that can be found in the LICENSE file.
package remt_v02
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestNestedTypes(t *testing.T) {
assert.NotNil(t, AddressType3Choice{}.Validate())
assert.NotNil(t, Authorisation1Choice{}.Validate())
assert.Nil(t, BranchAndFinancialInstitutionIdentification6{}.Validate())
assert.Nil(t, BranchData3{}.Validate())
assert.NotNil(t, ClearingSystemIdentification2Choice{}.Validate())
assert.NotNil(t, ClearingSystemMemberIdentification2{}.Validate())
assert.Nil(t, Contact4{}.Validate())
assert.NotNil(t, DateAndPlaceOfBirth1{}.Validate())
assert.NotNil(t, FinancialIdentificationSchemeName1Choice{}.Validate())
assert.Nil(t, FinancialInstitutionIdentification18{}.Validate())
assert.NotNil(t, GenericFinancialIdentification1{}.Validate())
assert.NotNil(t, GenericIdentification30{}.Validate())
assert.NotNil(t, GenericOrganisationIdentification1{}.Validate())
assert.NotNil(t, GenericPersonIdentification1{}.Validate())
assert.NotNil(t, GroupHeader79{}.Validate())
assert.NotNil(t, NameAndAddress16{}.Validate())
assert.Nil(t, OrganisationIdentification29{}.Validate())
assert.NotNil(t, OrganisationIdentificationSchemeName1Choice{}.Validate())
assert.NotNil(t, OtherContact1{}.Validate())
assert.Nil(t, Party38Choice{}.Validate())
assert.Nil(t, PartyIdentification135{}.Validate())
assert.Nil(t, PersonIdentification13{}.Validate())
assert.NotNil(t, PersonIdentificationSchemeName1Choice{}.Validate())
assert.Nil(t, PostalAddress24{}.Validate())
assert.NotNil(t, RemittanceLocation5{}.Validate())
assert.NotNil(t, RemittanceLocationAdviceV02{}.Validate())
assert.NotNil(t, RemittanceLocationData1{}.Validate())
assert.Nil(t, SupplementaryData1{}.Validate())
assert.Nil(t, SupplementaryDataEnvelope1{}.Validate())
assert.NotNil(t, TransactionReferences5{}.Validate())
assert.Nil(t, AccountIdentification4Choice{}.Validate())
assert.NotNil(t, GenericAccountIdentification1{}.Validate())
assert.NotNil(t, AccountSchemeName1Choice{}.Validate())
assert.NotNil(t, TaxRecordDetails1{}.Validate())
assert.Nil(t, TaxPeriod1{}.Validate())
assert.Nil(t, TaxRecord1{}.Validate())
assert.NotNil(t, TaxAmount1{}.Validate())
assert.NotNil(t, ActiveOrHistoricCurrencyAndAmount{}.Validate())
assert.NotNil(t, AmountType3Choice{}.Validate())
assert.NotNil(t, EquivalentAmount2{}.Validate())
assert.Nil(t, DatePeriodDetails{}.Validate())
assert.Nil(t, BranchAndFinancialInstitutionIdentification5{}.Validate())
assert.Nil(t, BranchData2{}.Validate())
assert.Nil(t, FinancialInstitutionIdentification8{}.Validate())
assert.Nil(t, CashAccount24{}.Validate())
assert.NotNil(t, CashAccountType2Choice{}.Validate())
assert.NotNil(t, CategoryPurpose1Choice{}.Validate())
assert.Nil(t, ContactDetails2{}.Validate())
assert.NotNil(t, CreditorReferenceInformation2{}.Validate())
assert.NotNil(t, CreditorReferenceType1Choice{}.Validate())
assert.NotNil(t, CreditorReferenceType2{}.Validate())
assert.NotNil(t, DateAndPlaceOfBirth{}.Validate())
assert.NotNil(t, DiscountAmountAndType1{}.Validate())
assert.NotNil(t, DiscountAmountType1Choice{}.Validate())
assert.NotNil(t, DocumentAdjustment1{}.Validate())
assert.NotNil(t, DocumentLineIdentification1{}.Validate())
assert.Nil(t, DocumentLineInformation1{}.Validate())
assert.NotNil(t, DocumentLineType1{}.Validate())
assert.NotNil(t, DocumentLineType1Choice{}.Validate())
assert.NotNil(t, Garnishment1{}.Validate())
assert.NotNil(t, GarnishmentType1{}.Validate())
assert.NotNil(t, GarnishmentType1Choice{}.Validate())
assert.Nil(t, GroupHeader62{}.Validate())
assert.Nil(t, OrganisationIdentification8{}.Validate())
assert.Nil(t, OriginalPaymentInformation6{}.Validate())
assert.Nil(t, Party11Choice{}.Validate())
assert.Nil(t, PartyIdentification43{}.Validate())
assert.Nil(t, PaymentTypeInformation19{}.Validate())
assert.Nil(t, PersonIdentification5{}.Validate())
assert.Nil(t, PostalAddress6{}.Validate())
assert.Nil(t, ReferredDocumentInformation7{}.Validate())
assert.NotNil(t, ReferredDocumentType3Choice{}.Validate())
assert.NotNil(t, ReferredDocumentType4{}.Validate())
assert.Nil(t, RemittanceAdviceV02{}.Validate())
assert.Nil(t, RemittanceAmount2{}.Validate())
assert.Nil(t, RemittanceAmount3{}.Validate())
assert.Nil(t, RemittanceInformation12{}.Validate())
assert.NotNil(t, ServiceLevel8Choice{}.Validate())
assert.Nil(t, StructuredRemittanceInformation13{}.Validate())
assert.NotNil(t, TaxAmountAndType1{}.Validate())
assert.NotNil(t, TaxAmountType1Choice{}.Validate())
assert.Nil(t, TaxAuthorisation1{}.Validate())
assert.Nil(t, TaxInformation4{}.Validate())
assert.Nil(t, TaxParty1{}.Validate())
assert.Nil(t, TaxParty2{}.Validate())
assert.Nil(t, TransactionReferences4{}.Validate())
assert.NotNil(t, LocalInstrument2Choice{}.Validate())
assert.Nil(t, ExchangeRate1{}.Validate())
}
func TestTypes(t *testing.T) {
var type1 ExternalClearingSystemIdentification1Code
assert.NotNil(t, type1.Validate())
type1 = "test"
assert.Nil(t, type1.Validate())
var type2 ExternalFinancialInstitutionIdentification1Code
assert.NotNil(t, type2.Validate())
type2 = "test"
assert.Nil(t, type2.Validate())
var type3 ExternalOrganisationIdentification1Code
assert.NotNil(t, type3.Validate())
type3 = "test"
assert.Nil(t, type3.Validate())
var type4 ExternalPersonIdentification1Code
assert.NotNil(t, type4.Validate())
type4 = "test"
assert.Nil(t, type4.Validate())
var type5 PreferredContactMethod1Code
assert.NotNil(t, type5.Validate())
type5 = "test"
assert.NotNil(t, type5.Validate())
type5 = "CELL"
assert.Nil(t, type5.Validate())
var type6 RemittanceLocationMethod2Code
assert.NotNil(t, type6.Validate())
type6 = "test"
assert.NotNil(t, type6.Validate())
type6 = "FAXI"
assert.Nil(t, type6.Validate())
var type7 ExternalAccountIdentification1Code
assert.NotNil(t, type7.Validate())
type7 = "test"
assert.Nil(t, type7.Validate())
var type8 TaxRecordPeriod1Code
assert.NotNil(t, type8.Validate())
type8 = "MM01"
assert.Nil(t, type8.Validate())
var type9 AddressType2Code
assert.NotNil(t, type9.Validate())
type9 = "ADDR"
assert.Nil(t, type9.Validate())
var type10 Authorisation1Code
assert.NotNil(t, type10.Validate())
type10 = "AUTH"
assert.Nil(t, type10.Validate())
var type11 ExternalCashAccountType1Code
assert.NotNil(t, type11.Validate())
type11 = "AUTH"
assert.Nil(t, type11.Validate())
var type12 DocumentType6Code
assert.NotNil(t, type12.Validate())
type12 = "MSIN"
assert.Nil(t, type12.Validate())
var type13 DocumentType3Code
assert.NotNil(t, type13.Validate())
type13 = "RADM"
assert.Nil(t, type13.Validate())
var type14 ExchangeRateType1Code
assert.NotNil(t, type14.Validate())
type14 = "SPOT"
assert.Nil(t, type14.Validate())
var type15 ExternalCategoryPurpose1Code
assert.NotNil(t, type15.Validate())
type15 = "SPOT"
assert.Nil(t, type15.Validate())
var type16 ExternalDiscountAmountType1Code
assert.NotNil(t, type16.Validate())
type16 = "SPOT"
assert.Nil(t, type16.Validate())
var type17 ExternalDocumentLineType1Code
assert.NotNil(t, type17.Validate())
type17 = "SPOT"
assert.Nil(t, type17.Validate())
var type18 ExternalGarnishmentType1Code
assert.NotNil(t, type18.Validate())
type18 = "SPOT"
assert.Nil(t, type18.Validate())
var type19 ExternalLocalInstrument1Code
assert.NotNil(t, type19.Validate())
type19 = "SPOT"
assert.Nil(t, type19.Validate())
var type20 ExternalServiceLevel1Code
assert.NotNil(t, type20.Validate())
type20 = "SPOT"
assert.Nil(t, type20.Validate())
var type21 ExternalTaxAmountType1Code
assert.NotNil(t, type21.Validate())
type21 = "SPOT"
assert.Nil(t, type21.Validate())
var type22 Priority2Code
assert.NotNil(t, type22.Validate())
type22 = "HIGH"
assert.Nil(t, type22.Validate())
}
|
// Copyright © 2016 Benjamin Martensson <benjamin.martensson@nrk.no>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cmd
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"crypto/tls"
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os/user"
"strconv"
"time"
"github.com/spf13/viper"
)
type PasswordList []struct {
Name string `json:"name,omitempty" yaml:"name,omitempty"`
ID int `json:"id,omitempty" yaml:"id,omitempty"`
}
type Password struct {
ID int `json:"id,omitempty" yaml:"id,omitempty"`
Name string `json:"name,omitempty" yaml:"name,omitempty"`
Username string `json:"username,omitempty" yaml:"username,omitempty"`
Password string `json:"password,omitempty" yaml:"password,omitempty"`
AccessInfo string `json:"access_info,omitempty" yaml:"access_info,omitempty"`
Email string `json:"email,omitempty" yaml:"email,omitempty"`
Notes string `json:"notes,omitempty" yaml:"notes,omitempty"`
NotesSnippet string `json:"notes_snippet,omitempty" yaml:"notes_snippet,omitempty"`
UpdatedOn string `json:"updated_on,omitempty" yaml:"updated_on,omitempty"`
UpdatedBy struct {
ID int `json:"id"`
Name string `json:"name"`
} `json:"updated_by"`
CreatedOn string `json:"created_on,omitempty" yaml:"created_on,omitempty"`
CreatedBy struct {
ID int `json:"id"`
Name string `json:"name"`
} `json:"created_by"`
Tags string `json:"tags,omitempty" yaml:"tags,omitempty"`
Project struct {
Name string `json:"name,omitempty" yaml:"name,omitempty"`
ID int `json:"id,omitempty" yaml:"id,omitempty"`
} `json:"project,omitempty" yaml:"project,omitempty"`
}
type Newpassword struct {
Name string `json:"name,omitempty"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
ProjectID int `json:"project_id,omitempty"`
Email string `json:"email,omitempty"`
Tags string `json:"tags,omitempty"`
Notes string `json:"notes,omitempty"`
AccessInfo string `json:"access_info,omitempty"`
ExpiryDate string `json:"expiry_date,omitempty"`
}
var newpassword Newpassword
var outputFormat string
var config = make(map[string]string)
func hmac256(message string, secret string) string {
key := []byte(secret)
h := hmac.New(sha256.New, key)
h.Write([]byte(message))
return hex.EncodeToString(h.Sum(nil))
}
func getTpm(uri string) *http.Response {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
time := strconv.FormatInt(time.Now().Unix(), 10)
unhash := uri + time
hash := hmac256(unhash, viper.GetString("privkey"))
req, err := http.NewRequest("GET", "https://"+viper.GetString("domain")+"/index.php/"+uri, nil)
req.Header.Add("X-Public-Key", viper.GetString("pubkey"))
req.Header.Add("X-Request-Hash", hash)
req.Header.Add("X-Request-Timestamp", time)
req.Header.Add("Content-Type", "application/json; charset=utf-8")
client := &http.Client{Transport: tr}
resp, err := client.Do(req)
if err != nil {
log.Fatal(err)
}
return resp
}
func postTpm(uri string, payload []byte) *http.Response {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
time := strconv.FormatInt(time.Now().Unix(), 10)
unhash := uri + time + string(payload)
hash := hmac256(unhash, viper.GetString("privkey"))
req, err := http.NewRequest("POST", "https://"+viper.GetString("domain")+"/index.php/"+uri, bytes.NewBuffer(payload))
req.Header.Add("X-Public-Key", viper.GetString("pubkey"))
req.Header.Add("X-Request-Hash", hash)
req.Header.Add("X-Request-Timestamp", time)
req.Header.Add("Content-Type", "application/json; charset=utf-8")
client := &http.Client{Transport: tr}
resp, err := client.Do(req)
if err != nil {
log.Fatal(err)
}
return resp
}
func putTpm(uri string, payload []byte) *http.Response {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
time := strconv.FormatInt(time.Now().Unix(), 10)
unhash := uri + time + string(payload)
hash := hmac256(unhash, viper.GetString("privkey"))
req, err := http.NewRequest("PUT", "https://"+viper.GetString("domain")+"/index.php/"+uri, bytes.NewBuffer(payload))
req.Header.Add("X-Public-Key", viper.GetString("pubkey"))
req.Header.Add("X-Request-Hash", hash)
req.Header.Add("X-Request-Timestamp", time)
req.Header.Add("Content-Type", "application/json; charset=utf-8")
client := &http.Client{Transport: tr}
resp, err := client.Do(req)
if err != nil {
log.Fatal(err)
}
return resp
}
func deleteTpm(uri string) *http.Response {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
time := strconv.FormatInt(time.Now().Unix(), 10)
unhash := uri + time
hash := hmac256(unhash, viper.GetString("privkey"))
req, err := http.NewRequest("DELETE", "https://"+viper.GetString("domain")+"/index.php/"+uri, nil)
req.Header.Add("X-Public-Key", viper.GetString("pubkey"))
req.Header.Add("X-Request-Hash", hash)
req.Header.Add("X-Request-Timestamp", time)
req.Header.Add("Content-Type", "application/json; charset=utf-8")
client := &http.Client{Transport: tr}
resp, err := client.Do(req)
if err != nil {
log.Fatal(err)
}
return resp
}
func writeConfig() {
usr, err := user.Current()
if err != nil {
log.Fatal(err)
}
configFile := usr.HomeDir + "/.tpm.json"
configJson, err := json.MarshalIndent(config, "", " ")
if err != nil {
fmt.Println("error:", err)
}
err = ioutil.WriteFile(configFile, configJson, 0600)
if err != nil {
log.Fatal(err)
}
fmt.Println("Written to", configFile)
}
|
package utils
import "math/rand"
// generate a number between >=0 && > max
func Random(max int) int {
return int(rand.Float64() * float64(max))
}
|
package src
import "github.com/gomodule/redigo/redis"
type Config struct {
Source string
SourcePassword string
Target string
TargetPassword string
Output string
Count int
}
// 默认的Redis连接配置
func defaultRedisOpts(password string) []redis.DialOption {
var options []redis.DialOption
if password != "" {
options = append(options, redis.DialPassword(password))
}
options = append(options, redis.DialConnectTimeout(TIMEOUT), redis.DialReadTimeout(TIMEOUT), redis.DialWriteTimeout(TIMEOUT))
return options
}
|
package main
import (
"context"
"fmt"
"io"
"log"
"time"
"google.golang.org/grpc"
client "github.com/notsu/grpc-playground/01-basic/ping-service/proto"
)
var (
method = "lotsOfReplies"
)
func main() {
fmt.Println("Run ping-service")
ctx := context.Background()
conn, err := grpc.Dial("pong:9000", grpc.WithInsecure())
if err != nil {
log.Fatalf("did not connect: %s", err)
}
defer conn.Close()
c := client.NewGreeterClient(conn)
switch method {
case "sayHello":
sayHello(ctx, c)
case "lotsOfReplies":
lotsOfReplies(ctx, c)
case "lotsOfGreetings":
lotsOfGreetings(ctx, c)
case "bidiHello":
bidiHello(ctx, c)
}
}
func sayHello(ctx context.Context, c client.GreeterClient) {
response, err := c.SayHello(ctx, &client.HelloRequest{Name: "pong"})
if err != nil {
log.Fatalf("Error when calling SayHello: %s", err)
}
log.Printf("Response from server: %s", response.Message)
}
func lotsOfReplies(ctx context.Context, c client.GreeterClient) {
done := make(chan bool)
stream, err := c.LotsOfReplies(ctx, &client.HelloRequest{Name: "pong"})
if err != nil {
log.Fatalf("Error when connect to the server: %s", err)
}
go func(stream client.Greeter_LotsOfRepliesClient) {
for {
msg, err := stream.Recv()
if err == io.EOF {
done <- true
return
}
if err != nil {
log.Fatalf("Error when receive a message: %s", err)
}
log.Printf("Receive a message: %s", msg.Message)
}
}(stream)
<-done
log.Println("Done!")
}
func lotsOfGreetings(ctx context.Context, c client.GreeterClient) {
stream, err := c.LotsOfGreetings(ctx)
if err != nil {
log.Fatalf("Error when connect to the server: %s", err)
}
for i := 1; i < 10; i++ {
stream.Send(&client.HelloRequest{
Name: fmt.Sprintf("Request number %d", i),
})
}
resp, err := stream.CloseAndRecv()
if err != nil {
log.Printf("Failed to close and receive: %s", err)
}
log.Printf("Receive a response: %s", resp.Message)
log.Println("Done!")
}
func bidiHello(ctx context.Context, c client.GreeterClient) {
done := make(chan bool)
stream, err := c.BidiHello(ctx)
if err != nil {
log.Fatalf("Error when connect to the server: %s", err)
}
go func(stream client.Greeter_BidiHelloClient) {
for {
msg, err := stream.Recv()
if err == io.EOF {
done <- true
return
}
if err != nil {
log.Fatalf("Error when receive a message: %s", err)
}
log.Printf("Receive a message: %s", msg.Message)
}
}(stream)
go func(stream client.Greeter_BidiHelloClient) {
i := 1
for {
msg := fmt.Sprintf("Request number %d", i)
err := stream.Send(&client.HelloRequest{
Name: msg,
})
if err == io.EOF {
done <- true
return
}
if err != nil {
log.Fatalf("Error when receive a message: %s", err)
}
log.Printf("Send a message: %s", msg)
i++
time.Sleep(3 * time.Second)
}
}(stream)
<-done
log.Println("Done!")
}
|
package controllers
import (
"github.com/astaxie/beego"
"smartapp/helper"
"strings"
)
type PluginController struct {
BaseController
}
func (c *PluginController) Show(tplName... string){
//c.TplName=tplName
file := helper.GetControllerStackFile(1)
file= (strings.SplitN(file,"/plugins/",2))[1]
file=(strings.SplitN(file,".go",2))[0]
files :=strings.SplitN(file,"/controller/",2)
beego.Debug("当前url1",c.Ctx.Input.URL(),"当前文件:",file)
_,act:=c.GetControllerAndAction()
if c.TplName == "" {
c.TplName = "plugins/"+strings.ToLower(files[0])+"/"+strings.ToLower(files[1]) + "/" + strings.ToLower(act) + "." + c.TplExt
//c.TplName= "/plugins/test"+strings.TrimLeft(c.TplName,)
}else if !strings.Contains(c.TplName, "plugins/"){
c.TplName = "plugins/"+strings.ToLower(files[0])+"/"+strings.TrimLeft(c.TplName,"/")
}
_ = c.Render()
return
}
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
//
package utility
import (
"strings"
"github.com/mattermost/mattermost-cloud/model"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
type nodeProblemDetector struct {
kubeconfigPath string
logger log.FieldLogger
desiredVersion *model.HelmUtilityVersion
actualVersion *model.HelmUtilityVersion
}
func newNodeProblemDetectorOrUnmanagedHandle(cluster *model.Cluster, kubeconfigPath string, logger log.FieldLogger) (Utility, error) {
desired := cluster.DesiredUtilityVersion(model.NodeProblemDetectorCanonicalName)
actual := cluster.ActualUtilityVersion(model.NodeProblemDetectorCanonicalName)
if model.UtilityIsUnmanaged(desired, actual) {
return newUnmanagedHandle(model.NodeProblemDetectorCanonicalName, logger), nil
}
nodeProblemDetector := newNodeProblemDetectorHandle(desired, cluster, kubeconfigPath, logger)
err := nodeProblemDetector.validate()
if err != nil {
return nil, errors.Wrap(err, "node problem detector utility config is invalid")
}
return nodeProblemDetector, nil
}
func newNodeProblemDetectorHandle(desiredVersion *model.HelmUtilityVersion, cluster *model.Cluster, kubeconfigPath string, logger log.FieldLogger) *nodeProblemDetector {
return &nodeProblemDetector{
kubeconfigPath: kubeconfigPath,
logger: logger.WithField("cluster-utility", model.NodeProblemDetectorCanonicalName),
desiredVersion: desiredVersion,
actualVersion: cluster.UtilityMetadata.ActualVersions.NodeProblemDetector,
}
}
func (n *nodeProblemDetector) validate() error {
if n.kubeconfigPath == "" {
return errors.New("kubeconfig path cannot be empty")
}
return nil
}
func (n *nodeProblemDetector) Destroy() error {
helm := n.newHelmDeployment(n.logger)
return helm.Delete()
}
func (n *nodeProblemDetector) Migrate() error {
return nil
}
func (n *nodeProblemDetector) CreateOrUpgrade() error {
logger := n.logger.WithField("node-problem-detector-action", "upgrade")
h := n.newHelmDeployment(logger)
err := h.Update()
if err != nil {
return err
}
err = n.updateVersion(h)
return err
}
func (n *nodeProblemDetector) DesiredVersion() *model.HelmUtilityVersion {
return n.desiredVersion
}
func (n *nodeProblemDetector) ActualVersion() *model.HelmUtilityVersion {
if n.actualVersion == nil {
return nil
}
return &model.HelmUtilityVersion{
Chart: strings.TrimPrefix(n.actualVersion.Version(), "node-problem-detector-"),
ValuesPath: n.actualVersion.Values(),
}
}
func (n *nodeProblemDetector) Name() string {
return model.NodeProblemDetectorCanonicalName
}
func (n *nodeProblemDetector) newHelmDeployment(logger log.FieldLogger) *helmDeployment {
return newHelmDeployment(
"deliveryhero/node-problem-detector",
"node-problem-detector",
"node-problem-detector",
n.kubeconfigPath,
n.desiredVersion,
defaultHelmDeploymentSetArgument,
logger,
)
}
func (n *nodeProblemDetector) ValuesPath() string {
if n.desiredVersion == nil {
return ""
}
return n.desiredVersion.Values()
}
func (n *nodeProblemDetector) updateVersion(h *helmDeployment) error {
actualVersion, err := h.Version()
if err != nil {
return err
}
n.actualVersion = actualVersion
return nil
}
|
package utils
import (
"bufio"
"os"
"strconv"
)
func ReadLines(filename string) []string {
file, err := os.Open(filename)
Check(err)
defer file.Close()
scanner := bufio.NewScanner(file)
var lines []string
for scanner.Scan() {
line := scanner.Text()
lines = append(lines, line)
}
Check(scanner.Err())
return lines
}
func ToInt(s string) int {
result, err := strconv.Atoi(s)
Check(err)
return result
}
func ToInt64(s string) int64 {
result, err := strconv.ParseInt(s, 10, 64)
Check(err)
return result
}
|
package service
import (
"fmt"
"log"
"sync"
"time"
dto "../dto"
m "../model"
repo "../repository"
u "../utils"
)
//EmailService service
type EmailService struct{}
//Emails Repository against each email id
var Emails = repo.Emails
var isSendGridActive = true
var mu sync.Mutex
//SendEmail service
func (emailService *EmailService) SendEmail(emailDTO dto.EmailDTO) map[string]interface{} {
if resp, ok := emailService.Validate(emailDTO); !ok {
return resp
}
emailID := Sender
lenOfContent := len(emailDTO.Content)
content := make([]m.Content, lenOfContent)
for i, c := range emailDTO.Content {
content[i] = m.Content{Type: c.Type, Value: c.Value}
}
email := m.Email{
From: emailID,
To: emailDTO.To,
Cc: emailDTO.Cc,
Bcc: emailDTO.Bcc,
Subject: emailDTO.Subject,
Content: content[:],
Status: m.SCHEDULED,
ScheduledTime: emailDTO.ScheduledTime,
}
resp := u.Message(true, m.SCHEDULED)
//Check if email is scheduled now
isEmailScheduledNow := verifyTime(email)
if isEmailScheduledNow {
isEmailToBeSentUsingSendGrid := true
if isEmailToBeSentUsingSendGrid {
res, err := SendEmailUsingSendGridServer(email)
if err != nil || res == 400 {
log.Printf("Could not use Send Grid server hence using Amazon Email Service %v", err)
isEmailToBeSentUsingSendGrid = false
}
if res == 202 {
email.Status = m.SENT
resp = u.Message(true, m.SENT)
}
}
// Send email using Amazon SES if Send Grid has failed to deliver
if !isEmailToBeSentUsingSendGrid {
awsRes, awsErr := SendEmailUsingAmazonSES(email)
if awsErr != nil {
resp = u.Message(true, m.FAILED)
email.Status = m.FAILED
} else if awsRes != nil {
resp = u.Message(true, m.SENT)
email.Status = m.SENT
}
}
}
emails := Emails[emailID]
emails = append(emails, email)
// Below code prevents race condition
mu.Lock()
Emails[emailID] = emails
mu.Unlock()
return resp
}
//GetEmails service method
func (emailService *EmailService) GetEmails() []m.Email {
emails := Emails[Sender]
return emails
}
func taskWithParams(a int, b string) {
fmt.Println(a, b)
}
//Validate method to validate all required fields
func (emailService *EmailService) Validate(emailDTO dto.EmailDTO) (map[string]interface{}, bool) {
if len(emailDTO.To) == 0 {
return u.Message(false, "Recepient email id is empty in the payload"), false
}
//All the required parameters are present
return u.Message(true, "success"), true
}
//SendScheduledEmails Check if email is scheduled now
func (emailService *EmailService) SendScheduledEmails() bool {
emails := Emails[Sender]
if len(emails) > 0 {
for i := range emails {
email := emails[i]
isEmailScheduledNow := verifyTime(email)
if isEmailScheduledNow {
isEmailToBeSentUsingSendGrid := true
if isEmailToBeSentUsingSendGrid {
res, err := SendEmailUsingSendGridServer(email)
if err != nil || res == 400 {
log.Printf("Could not use Send Grid server hence using Amazon Email Service %v", err)
isEmailToBeSentUsingSendGrid = false
}
if res == 202 {
email.Status = m.SENT
}
}
// Send email using Amazon SES if Send Grid has failed to deliver
if !isEmailToBeSentUsingSendGrid {
awsRes, awsErr := SendEmailUsingAmazonSES(email)
if awsErr != nil {
email.Status = m.FAILED
} else if awsRes != nil {
email.Status = m.SENT
}
}
emails[i] = email
}
}
}
return false
}
func verifyTime(email m.Email) bool {
layout := "02 Jan 06 15:04 MST"
emailTime, _ := time.Parse(layout, email.ScheduledTime)
return emailTime.Before(time.Now().UTC()) && email.Status == m.SCHEDULED
}
|
package migrate
import (
"github.com/bendrucker/terraform-cloud-migrate/configwrite"
"github.com/hashicorp/hcl/v2"
)
func New(path string, config Config) (*Migration, hcl.Diagnostics) {
writer, diags := configwrite.New(path)
steps := configwrite.NewSteps(writer, configwrite.Steps{
&configwrite.RemoteBackend{Config: config.Backend},
&configwrite.TerraformWorkspace{Variable: config.WorkspaceVariable},
&configwrite.Tfvars{Filename: configwrite.TfvarsFilename},
})
if config.ModulesDir != "" {
step := &configwrite.RemoteState{
RemoteBackend: config.Backend,
Path: config.ModulesDir,
}
step.WithWriter(writer)
steps = steps.Append(step)
}
return &Migration{steps}, diags
}
type Migration struct {
steps configwrite.Steps
}
func (m *Migration) Changes() (configwrite.Changes, hcl.Diagnostics) {
return m.steps.Changes()
}
|
package gorm
import (
"github.com/porter-dev/porter/internal/models"
ints "github.com/porter-dev/porter/internal/models/integrations"
"gorm.io/gorm"
)
func AutoMigrate(db *gorm.DB) error {
return db.AutoMigrate(
&models.Project{},
&models.Role{},
&models.User{},
&models.Release{},
&models.Session{},
&models.GitRepo{},
&models.Registry{},
&models.HelmRepo{},
&models.Cluster{},
&models.ClusterCandidate{},
&models.ClusterResolver{},
&models.Infra{},
&models.GitActionConfig{},
&models.Invite{},
&models.AuthCode{},
&models.DNSRecord{},
&models.PWResetToken{},
&models.NotificationConfig{},
&ints.KubeIntegration{},
&ints.BasicIntegration{},
&ints.OIDCIntegration{},
&ints.OAuthIntegration{},
&ints.GCPIntegration{},
&ints.AWSIntegration{},
&ints.TokenCache{},
&ints.ClusterTokenCache{},
&ints.RegTokenCache{},
&ints.HelmRepoTokenCache{},
&ints.GithubAppInstallation{},
&ints.GithubAppOAuthIntegration{},
&ints.SlackIntegration{},
)
}
|
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql_test
import (
"context"
gosql "database/sql"
"testing"
"github.com/cockroachdb/cockroach/pkg/sql/tests"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
)
func TestCommentOnIndex(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
params, _ := tests.CreateTestServerParams()
s, db, _ := serverutils.StartServer(t, params)
defer s.Stopper().Stop(context.Background())
if _, err := db.Exec(`
CREATE DATABASE d;
SET DATABASE = d;
CREATE TABLE t (c INT, INDEX t_c_idx (c));
`); err != nil {
t.Fatal(err)
}
testCases := []struct {
exec string
query string
expect gosql.NullString
}{
{
`COMMENT ON INDEX t_c_idx IS 'index_comment'`,
`SELECT obj_description(oid) from pg_class WHERE relname='t_c_idx';`,
gosql.NullString{String: `index_comment`, Valid: true},
},
{
`TRUNCATE t`,
`SELECT obj_description(oid) from pg_class WHERE relname='t_c_idx';`,
gosql.NullString{String: `index_comment`, Valid: true},
},
{
`COMMENT ON INDEX t_c_idx IS NULL`,
`SELECT obj_description(oid) from pg_class WHERE relname='t_c_idx';`,
gosql.NullString{Valid: false},
},
}
for _, tc := range testCases {
if _, err := db.Exec(tc.exec); err != nil {
t.Fatal(err)
}
row := db.QueryRow(tc.query)
var comment gosql.NullString
if err := row.Scan(&comment); err != nil {
t.Fatal(err)
}
if tc.expect != comment {
t.Fatalf("expected comment %v, got %v", tc.expect, comment)
}
}
}
func TestCommentOnIndexWhenDropTable(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
params, _ := tests.CreateTestServerParams()
s, db, _ := serverutils.StartServer(t, params)
defer s.Stopper().Stop(context.Background())
if _, err := db.Exec(`
CREATE DATABASE d;
SET DATABASE = d;
CREATE TABLE t (c INT, INDEX t_c_idx (c));
`); err != nil {
t.Fatal(err)
}
if _, err := db.Exec(`COMMENT ON INDEX t_c_idx IS 'index_comment'`); err != nil {
t.Fatal(err)
}
if _, err := db.Exec(`DROP TABLE t`); err != nil {
t.Fatal(err)
}
row := db.QueryRow(`SELECT comment FROM system.comments LIMIT 1`)
var comment string
err := row.Scan(&comment)
if !errors.Is(err, gosql.ErrNoRows) {
if err != nil {
t.Fatal(err)
}
t.Fatal("comment remain")
}
}
func TestCommentOnIndexWhenDropIndex(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
params, _ := tests.CreateTestServerParams()
s, db, _ := serverutils.StartServer(t, params)
defer s.Stopper().Stop(context.Background())
if _, err := db.Exec(`
CREATE DATABASE d;
SET DATABASE = d;
CREATE TABLE t (c INT, INDEX t_c_idx (c));
`); err != nil {
t.Fatal(err)
}
if _, err := db.Exec(`COMMENT ON INDEX t_c_idx IS 'index_comment'`); err != nil {
t.Fatal(err)
}
if _, err := db.Exec(`DROP INDEX t_c_idx`); err != nil {
t.Fatal(err)
}
row := db.QueryRow(`SELECT comment FROM system.comments LIMIT 1`)
var comment string
err := row.Scan(&comment)
if !errors.Is(err, gosql.ErrNoRows) {
if err != nil {
t.Fatal(err)
}
t.Fatal("comment remain")
}
}
|
// Copyright 2021 The Perses Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"flag"
"github.com/perses/common/app"
"github.com/perses/perses/internal/api/core"
"github.com/perses/perses/internal/api/core/middleware"
"github.com/perses/perses/internal/api/front"
"github.com/perses/perses/internal/api/shared/dependency"
"github.com/perses/perses/internal/config"
"github.com/sirupsen/logrus"
)
const banner = `
______
| ___ \
| |_/ /__ _ __ ___ ___ ___
| __/ _ \ '__/ __|/ _ \/ __|
| | | __/ | \__ \ __/\__ \
\_| \___|_| |___/\___||___/ %s
All your monitoring dashboards in one place. <\
\\
--------------==========================================>|||<*>//////]
//
</
`
func main() {
configFile := flag.String("config", "", "Path to the YAML configuration file for the API. Configuration settings can be overridden when using environment variables.")
dbFolder := flag.String("db.folder", "", "Path to the folder to use as a database. In case the flag is not used, Perses requires a connection to etcd.")
dbExtension := flag.String("db.extension", "yaml", "The extension of the file to read and use when creating a file. Valid values: 'yaml' or 'json'.")
flag.Parse()
// load the config from file or/and from environment
conf, err := config.Resolve(*configFile, *dbFolder, *dbExtension)
if err != nil {
logrus.WithError(err).Fatalf("error reading configuration from file %q or from environment", *configFile)
}
persistenceManager, err := dependency.NewPersistenceManager(conf.Database)
if err != nil {
logrus.WithError(err).Fatal("unable to instantiate the persistence manager")
}
serviceManager := dependency.NewServiceManager(persistenceManager)
persesAPI := core.NewPersesAPI(serviceManager)
persesFrontend := front.NewPersesFrontend()
runner := app.NewRunner().WithDefaultHTTPServer("perses").SetBanner(banner)
// register the API
runner.HTTPServerBuilder().
APIRegistration(persesAPI).
APIRegistration(persesFrontend).
Middleware(middleware.Proxy(persistenceManager.GetDatasource(), persistenceManager.GetGlobalDatasource()))
// start the application
runner.Start()
}
|
package controllers
import (
"net/http"
"github.com/astaxie/beego"
"github.com/gorilla/websocket"
models "../models"
)
// https://github.com/gorilla/websocket
// http://learn.javascript.ru/websockets
type WSController struct {
beego.Controller
}
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
// разрешаем подключение к сокету с разных доменов
CheckOrigin: func(r *http.Request) bool { return true },
}
func (w *WSController) Get() {
// подключение
ws, err := upgrader.Upgrade(w.Ctx.ResponseWriter, w.Ctx.Request, nil)
if _, ok := err.(websocket.HandshakeError); ok {
http.Error(w.Ctx.ResponseWriter, "Not a websocket handshake", 400)
return
} else if err != nil {
beego.Error(err.Error())
return
}
// регистрация нового клиента
c := &models.Client{
Send: make(chan []byte, 512),
Ws: ws,
}
models.H.Register <- c
go c.WritePump()
c.ReadPump()
}
|
package lc49
// 字母异位词分组
// https://leetcode-cn.com/problems/group-anagrams/
import "sort"
// GroupAnagrams .
// 每一个元素按照字符排序,看排序后的值是否再一个hashmap中存在
// 不存在,作为一个数组存入,存在则append到数组中
// 最后便利hashMap, 得到结果
// 时间复杂度O(K * nlog(n)), 空间复杂度O(n)
func GroupAnagrams(strs []string) [][]string {
stringMap := map[string][]string{}
result := [][]string{}
for _, s := range strs {
cArray := []rune(s)
sort.SliceStable(cArray, func(i, j int) bool {
return cArray[i] < cArray[j]
})
if len(stringMap[string(cArray)]) != 0 {
stringMap[string(cArray)] = append(stringMap[string(cArray)], s)
} else {
stringMap[string(cArray)] = []string{s}
}
}
for _, val := range stringMap {
result = append(result, val)
}
return result
}
|
package config
import (
"fmt"
"github.com/fsnotify/fsnotify"
"github.com/spf13/viper"
)
type Config struct {
MysqlAdmin MysqlAdmin
CourseConfig CourseConfig
Qiniu Qiniu
CasbinConfig CasbinConfig
RedisAdmin RedisAdmin
System System
Mongodb Mongodb
Tomongodb Tomongodb
Testmongodb Testmongodb
JWT JWT
NatsConfig NatsConfig
LiveCourseConfig LiveCourseConfig
Tencent Tencent
UploadConfig UploadConfig
}
type System struct {
UseMultipoint bool
Env string
}
type JWT struct {
SigningKey string
}
type CasbinConfig struct {
ModelPath string // casbin model地址配置
}
type MysqlAdmin struct { // mysql admin 数据库配置
Username string
Password string
Path string
Dbname string
Config string
}
type RedisAdmin struct { // Redis admin 数据库配置
Addr string
Password string
DB int
}
type Qiniu struct { // 七牛 密钥配置
AccessKey string
SecretKey string
}
type Tencent struct {
SecretId string
SecretKey string
Region string
SdkAppId int64
SdkAppKey string
AppId int64
BizId int64
CallbackKey string
TranscodeFileCallbackUrl string
TranscodeVideoCallbackUrl string
OwnerAccount string
}
//mongodb连接字符串
type Mongodb struct {
Hosts string
User string
Passwd string
PoolLimit uint64
ReadReferer string
ReplicaSet string
}
//mongodb连接字符串
type Tomongodb struct {
Hosts string
User string
Passwd string
PoolLimit uint64
ReadReferer string
ReplicaSet string
}
type Testmongodb struct {
Hosts string
User string
Passwd string
PoolLimit uint64
ReadReferer string
ReplicaSet string
}
//课程配置相关
type CourseConfig struct {
AssetsUrl string
}
type UploadConfig struct {
UploadAssets string
}
//nats配置
type NatsConfig struct {
Hosts string
}
//直播课程配置
type LiveCourseConfig struct {
LivePushDomain string
LivePushDomainKey string
LivePullDomain string
LivePullDomainKey string
}
var GinVueAdminconfig Config
func init() {
v := viper.New()
v.SetConfigName("config") // 设置配置文件名 (不带后缀)
v.AddConfigPath("./static/config/") // 第一个搜索路径
v.SetConfigType("json")
err := v.ReadInConfig() // 搜索路径,并读取配置数据
if err != nil {
panic(fmt.Errorf("Fatal error config file: %s \n", err))
}
v.WatchConfig()
v.OnConfigChange(func(e fsnotify.Event) {
fmt.Println("Config file changed:", e.Name)
})
if err := v.Unmarshal(&GinVueAdminconfig); err != nil {
fmt.Println(err)
}
}
|
/*
* Copyright (c) 2020. Uriel Márquez All Rights Reserved
* https://umarquez.c0d3.mx
*/
package day3
import (
"strings"
)
type Vector [2]int
func (v Vector) Add(vec Vector) Vector {
return Vector{v[0] + vec[0], v[1] + vec[1]}
}
func CountTreesUntilTheBottom(pattern string, start Vector, steps Vector) int {
position := start
rows := strings.Split(pattern, "\n")
trees := 0
for position[1] < len(rows) {
row := rows[position[1]]
cell := string(row[position[0]])
if cell == "#" {
trees++
}
position = position.Add(steps)
position[0] = (position[0] + len(row)) % len(row)
}
return trees
}
func MultiplyTreesOf(pattern string, start Vector, steps []Vector) int {
var total = 1
for _, step := range steps {
res := CountTreesUntilTheBottom(pattern, start, step)
total *= res
//log.Printf("%v = %v", step, res)
}
return total
}
|
package book
// Volume represents a book that consists of chapters
type Volume struct {
Chapters []Chapter `json:"chapters"`
Metadata Metadata `json:"metadata"`
}
|
package utils
import (
"fmt"
"regexp"
"strconv"
"strings"
)
func FromTextSize(size string) (int64, error) {
size = strings.Replace(strings.TrimSpace(strings.ToUpper(size)), " ", "", -1)
re := regexp.MustCompile("^([1-9][0-9]*)(B|KB|MB|GB)$")
strs := re.FindStringSubmatch(size)
typeMap := map[string]int64{
"B": 1,
"KB": 1000,
"MB": 1000000,
"GB": 1000000000,
}
if len(strs) != 3 {
return 0, fmt.Errorf("text doesn't contain a valid size")
}
num, err := strconv.ParseInt(strs[1], 10, 64)
if err != nil {
return 0, fmt.Errorf("error converting string to int: %s", err)
}
mul := typeMap[strs[2]]
return int64(num) * mul, nil
}
|
package files
import (
"github.com/gin-gonic/gin"
"github.com/sunil-bansiwal/file_download_manager/model/downloads"
"net/http"
)
func GetDownloadedFiles(c *gin.Context) {
fileDB := downloads.FilesDB
for id, _ := range fileDB {
c.JSON(http.StatusOK, id)
}
}
|
package employee
import "fmt"
type Employee struct {
FirstName string
LastName string
Age int
Gender bool
}
func (e Employee) ToString() {
fmt.Printf("Employee: {firstName:%s, lastName:%s, age:%d, gender:%v}\n", e.FirstName, e.LastName, e.Age, e.Gender)
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package nearbyfixture
import (
"context"
"strconv"
"time"
nearbycommon "chromiumos/tast/common/cros/nearbyshare"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/arc"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/crossdevice"
"chromiumos/tast/testing"
)
type mediums int
const (
defaultMediums mediums = iota
webRTCAndWLAN
webRTCOnly
wlanOnly
)
// NewNearbyShareLogin creates a fixture that logs in and enables Nearby Share.
// Note that nearbyShareGAIALogin inherits from nearbyShareAndroidSetup.
func NewNearbyShareLogin(arcEnabled, backgroundScanningEnabled, useAndroidAccount bool, m mediums) testing.FixtureImpl {
defaultNearbyOpts := []chrome.Option{
chrome.EnableFeatures("GwpAsanMalloc", "GwpAsanPartitionAlloc"),
chrome.DisableFeatures("SplitSettingsSync"),
chrome.ExtraArgs("--nearby-share-certificate-validity-period-hours=4", "--nearby-share-num-private-certificates=1", "--nearby-share-verbose-logging", "--enable-logging", "--vmodule=*blue*=1", "--vmodule=*nearby*=1"),
}
if arcEnabled {
defaultNearbyOpts = append(defaultNearbyOpts, chrome.ARCEnabled(), chrome.EnableFeatures("ArcNearbySharing"), chrome.ExtraArgs(arc.DisableSyncFlags()...))
}
if backgroundScanningEnabled {
defaultNearbyOpts = append(defaultNearbyOpts, chrome.EnableFeatures("BluetoothAdvertisementMonitoring"),
chrome.EnableFeatures("NearbySharingBackgroundScanning"))
}
switch m {
case webRTCAndWLAN:
defaultNearbyOpts = append(defaultNearbyOpts, chrome.EnableFeatures("NearbySharingWebRtc"), chrome.EnableFeatures("NearbySharingWifiLan"))
case webRTCOnly:
defaultNearbyOpts = append(defaultNearbyOpts, chrome.EnableFeatures("NearbySharingWebRtc"), chrome.DisableFeatures("NearbySharingWifiLan"))
case wlanOnly:
defaultNearbyOpts = append(defaultNearbyOpts, chrome.DisableFeatures("NearbySharingWebRtc"), chrome.EnableFeatures("NearbySharingWifiLan"))
}
return &nearbyShareLoginFixture{
opts: defaultNearbyOpts,
arcEnabled: arcEnabled,
useAndroidAccount: useAndroidAccount,
}
}
func init() {
const (
// These are the default GAIA credentials that will be used to sign in on CrOS. Use the optional "custom" vars below to specify you'd like to specify your own credentials while running locally on personal devices.
defaultCrOSUsername = "nearbyshare.cros_username"
defaultCrOSPassword = "nearbyshare.cros_password"
// These vars can be used from the command line when running tests locally to configure the tests to run on personal GAIA accounts.
// Use these vars to log in with your own GAIA credentials. If running in-contacts tests with an Android device, it is expected that the CrOS user and Android user are already mutual contacts.
customCrOSUsername = "cros_username"
customCrOSPassword = "cros_password"
// Set this var to True to prevent the tests from clearing existing user accounts from the DUT.
keepState = nearbycommon.KeepStateVar
)
// Basic login fixtures for general CrOS<->Android sharing. The Android account for these fixtures uses the modulefood version of Nearby Share.
testing.AddFixture(&testing.Fixture{
Name: "nearbyShareGAIALogin",
Desc: "CrOS login with GAIA and Nearby Share flags enabled",
Contacts: []string{
"chromeos-sw-engprod@google.com",
},
Parent: "nearbyShareAndroidSetup",
Impl: NewNearbyShareLogin(false, false, false, defaultMediums),
Vars: []string{
defaultCrOSUsername,
defaultCrOSPassword,
customCrOSUsername,
customCrOSPassword,
defaultAndroidUsername,
defaultAndroidPassword,
keepState,
},
SetUpTimeout: 3 * time.Minute,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: resetTimeout,
})
testing.AddFixture(&testing.Fixture{
Name: "nearbyShareGAIALoginAndroidAccount",
Desc: "CrOS login with Android nearby share account and Nearby Share enabled",
Contacts: []string{
"chromeos-sw-engprod@google.com",
},
Parent: "nearbyShareAndroidSetup",
Impl: NewNearbyShareLogin(false, false, true, defaultMediums),
Vars: []string{
defaultCrOSUsername,
defaultCrOSPassword,
customCrOSUsername,
customCrOSPassword,
defaultAndroidUsername,
defaultAndroidPassword,
keepState,
},
SetUpTimeout: 3 * time.Minute,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: resetTimeout,
})
// Basic login fixtures for general CrOS<->Android sharing. The Android account for these fixtures uses the production version of Nearby Share.
testing.AddFixture(&testing.Fixture{
Name: "nearbyShareGAIALoginProd",
Desc: "CrOS login with GAIA and Nearby Share flags enabled",
Contacts: []string{
"chromeos-sw-engprod@google.com",
},
Parent: "nearbyShareAndroidSetupProd",
Impl: NewNearbyShareLogin(false, false, false, defaultMediums),
Vars: []string{
defaultCrOSUsername,
defaultCrOSPassword,
customCrOSUsername,
customCrOSPassword,
prodAndroidUsername,
prodAndroidPassword,
keepState,
},
SetUpTimeout: 3 * time.Minute,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: resetTimeout,
})
testing.AddFixture(&testing.Fixture{
Name: "nearbyShareGAIALoginAndroidAccountProd",
Desc: "CrOS login with Android nearby share account and Nearby Share enabled",
Contacts: []string{
"chromeos-sw-engprod@google.com",
},
Parent: "nearbyShareAndroidSetupProd",
Impl: NewNearbyShareLogin(false, false, true, defaultMediums),
Vars: []string{
defaultCrOSUsername,
defaultCrOSPassword,
customCrOSUsername,
customCrOSPassword,
prodAndroidUsername,
prodAndroidPassword,
keepState,
},
SetUpTimeout: 3 * time.Minute,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: resetTimeout,
})
// Basic login fixtures for general CrOS<->Android sharing. The Android account for these fixtures uses the dev version of Nearby Share.
testing.AddFixture(&testing.Fixture{
Name: "nearbyShareGAIALoginDev",
Desc: "CrOS login with GAIA and Nearby Share flags enabled",
Contacts: []string{
"chromeos-sw-engprod@google.com",
},
Parent: "nearbyShareAndroidSetupDev",
Impl: NewNearbyShareLogin(false, false, false, defaultMediums),
Vars: []string{
defaultCrOSUsername,
defaultCrOSPassword,
customCrOSUsername,
customCrOSPassword,
devAndroidUsername,
devAndroidPassword,
keepState,
},
SetUpTimeout: 3 * time.Minute,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: resetTimeout,
})
testing.AddFixture(&testing.Fixture{
Name: "nearbyShareGAIALoginAndroidAccountDev",
Desc: "CrOS login with Android nearby share account and Nearby Share enabled",
Contacts: []string{
"chromeos-sw-engprod@google.com",
},
Parent: "nearbyShareAndroidSetupDev",
Impl: NewNearbyShareLogin(false, false, true, defaultMediums),
Vars: []string{
defaultCrOSUsername,
defaultCrOSPassword,
customCrOSUsername,
customCrOSPassword,
devAndroidUsername,
devAndroidPassword,
keepState,
},
SetUpTimeout: 3 * time.Minute,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: resetTimeout,
})
// Fixture for testing shares initiated via background scanning.
testing.AddFixture(&testing.Fixture{
Name: "nearbyShareGAIALoginBackgroundScanningEnabled",
Desc: "CrOS login with GAIA; Nearby Share and Background scanning flags enabled",
Contacts: []string{
"chromeos-sw-engprod@google.com",
},
Parent: "nearbyShareAndroidSetup",
Impl: NewNearbyShareLogin(false, true, false, defaultMediums),
Vars: []string{
defaultCrOSUsername,
defaultCrOSPassword,
customCrOSUsername,
customCrOSPassword,
defaultAndroidUsername,
defaultAndroidPassword,
keepState,
},
SetUpTimeout: 3 * time.Minute,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: resetTimeout,
})
// Fixture for testing shares initiated from the ARC sharesheet.
testing.AddFixture(&testing.Fixture{
Name: "nearbyShareGAIALoginARCEnabled",
Desc: "CrOS login with GAIA, Nearby Share flags enabled, and ARC enabled",
Contacts: []string{
"chromeos-sw-engprod@google.com",
"arc-app-dev@google.com",
},
Parent: "nearbyShareAndroidSetup",
Impl: NewNearbyShareLogin(true, false, false, defaultMediums),
Vars: []string{
defaultCrOSUsername,
defaultCrOSPassword,
customCrOSUsername,
customCrOSPassword,
defaultAndroidUsername,
defaultAndroidPassword,
keepState,
},
SetUpTimeout: 3 * time.Minute,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: resetTimeout,
})
// Fixtures for testing different online transfer media (WebRTC and WLAN).
testing.AddFixture(&testing.Fixture{
Name: "nearbyShareGAIALoginWebRTCAndWLAN",
Desc: "CrOS login with GAIA; use WebRTC and WLAN upgrade mediums",
Contacts: []string{
"chromeos-sw-engprod@google.com",
},
Parent: "nearbyShareAndroidSetup",
Impl: NewNearbyShareLogin(false, false, false, webRTCAndWLAN),
Vars: []string{
defaultCrOSUsername,
defaultCrOSPassword,
customCrOSUsername,
customCrOSPassword,
keepState,
},
SetUpTimeout: 3 * time.Minute,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: resetTimeout,
})
testing.AddFixture(&testing.Fixture{
Name: "nearbyShareGAIALoginWebRTCOnly",
Desc: "CrOS login with GAIA; only use WebRTC upgrade medium",
Contacts: []string{
"chromeos-sw-engprod@google.com",
},
Parent: "nearbyShareAndroidSetup",
Impl: NewNearbyShareLogin(false, false, false, webRTCOnly),
Vars: []string{
defaultCrOSUsername,
defaultCrOSPassword,
customCrOSUsername,
customCrOSPassword,
keepState,
},
SetUpTimeout: 3 * time.Minute,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: resetTimeout,
})
testing.AddFixture(&testing.Fixture{
Name: "nearbyShareGAIALoginWLANOnly",
Desc: "CrOS login with GAIA; only use WLAN upgrade medium",
Contacts: []string{
"chromeos-sw-engprod@google.com",
},
Parent: "nearbyShareAndroidSetup",
Impl: NewNearbyShareLogin(false, false, false, wlanOnly),
Vars: []string{
defaultCrOSUsername,
defaultCrOSPassword,
customCrOSUsername,
customCrOSPassword,
keepState,
},
SetUpTimeout: 3 * time.Minute,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: resetTimeout,
})
}
type nearbyShareLoginFixture struct {
opts []chrome.Option
cr *chrome.Chrome
arcEnabled bool
arc *arc.ARC
useAndroidAccount bool
}
func (f *nearbyShareLoginFixture) SetUp(ctx context.Context, s *testing.FixtState) interface{} {
// Android device info from parent fixture
androidDevice := s.ParentValue().(*FixtData).AndroidDevice
androidDeviceName := s.ParentValue().(*FixtData).AndroidDeviceName
androidUsername := s.ParentValue().(*FixtData).AndroidUsername
loggedIn := s.ParentValue().(*FixtData).AndroidLoggedIn
if err := androidDevice.IsConnected(ctx); err != nil {
s.Log("Android device is no longer reachable via adb. Reconnecting")
adbDevice, _, err := crossdevice.AdbSetup(ctx)
if err != nil {
s.Fatal("Failed to reconnect to adb device: ", err)
}
androidDevice.SetADBDevice(ctx, adbDevice)
}
// Allocate time for saving logs in case of failure.
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 10*time.Second)
defer cancel()
// Save logcat so we have Android logs even if fixture setup fails.
defer androidDevice.DumpLogs(cleanupCtx, s.OutDir(), "fixture_setup_logcat.txt")
crosUsername := s.RequiredVar("nearbyshare.cros_username")
crosPassword := s.RequiredVar("nearbyshare.cros_password")
customUser, userOk := s.Var("cros_username")
customPass, passOk := s.Var("cros_password")
if userOk && passOk {
s.Log("Logging in with user-provided credentials")
crosUsername = customUser
crosPassword = customPass
} else if f.useAndroidAccount {
// Logging in on the same account as the Phone ensures that certificates are distributed to the CrOS device. Android prepends the logged in account to contacts. This works around the delay in syncing contacts from contacts.google.com to the Phones local address book, causing the CrOS device to fail during discovery because it is not able to dec.
s.Log("Logging in with Android GAIA credentials")
switch s.ParentValue().(*FixtData).AndroidNearbyChannel {
case modulefood:
crosUsername = s.RequiredVar(defaultAndroidUsername)
crosPassword = s.RequiredVar(defaultAndroidPassword)
case prod:
crosUsername = s.RequiredVar(prodAndroidUsername)
crosPassword = s.RequiredVar(prodAndroidPassword)
case dev:
crosUsername = s.RequiredVar(devAndroidUsername)
crosPassword = s.RequiredVar(devAndroidPassword)
}
} else {
s.Log("Logging in with default GAIA credentials")
}
f.opts = append(f.opts, chrome.GAIALogin(chrome.Creds{User: crosUsername, Pass: crosPassword}))
if val, ok := s.Var(nearbycommon.KeepStateVar); ok {
b, err := strconv.ParseBool(val)
if err != nil {
s.Fatalf("Unable to convert %v var to bool: %v", nearbycommon.KeepStateVar, err)
}
if b {
f.opts = append(f.opts, chrome.KeepState())
}
}
cr, err := chrome.New(
ctx,
f.opts...,
)
if err != nil {
s.Fatal("Failed to start Chrome: ", err)
}
f.cr = cr
// Starting ARC restarts ADB, which kills the connection to the snippet.
// Starting it here (before we check the connection and attempt a reconnect) will ensure the snippet connection is up.
if f.arcEnabled {
a, err := arc.New(ctx, s.OutDir())
if err != nil {
s.Fatal("Failed to start ARC: ", err)
}
f.arc = a
}
// Sometimes during login the tcp connection to the snippet server and/or adb is lost.
// Check we can still connect to the adb device.
if err := androidDevice.IsConnected(ctx); err != nil {
s.Log("Android device is no longer reachable via adb. Reconnecting")
adbDevice, _, err := crossdevice.AdbSetup(ctx)
if err != nil {
s.Fatal("Failed to reconnect to adb device: ", err)
}
androidDevice.SetADBDevice(ctx, adbDevice)
}
// If we cannot do a simple snippet rpc call, reconnect to the snippet server.
if _, err := androidDevice.GetNearbySharingVersion(ctx); err != nil {
s.Log("Lost connection to the Snippet server. Reconnecting")
if err := androidDevice.ReconnectToSnippet(ctx); err != nil {
s.Fatal("Failed to reconnect to the snippet server: ", err)
}
}
// Lock chrome after all Setup is complete so we don't block other fixtures.
chrome.Lock()
return &FixtData{
Chrome: cr,
CrOSUsername: crosUsername,
AndroidDevice: androidDevice,
AndroidDeviceName: androidDeviceName,
AndroidUsername: androidUsername,
AndroidLoggedIn: loggedIn,
ARC: f.arc,
}
}
func (f *nearbyShareLoginFixture) TearDown(ctx context.Context, s *testing.FixtState) {
chrome.Unlock()
if err := f.cr.Close(ctx); err != nil {
s.Log("Failed to close Chrome connection: ", err)
}
f.cr = nil
if f.arc != nil {
f.arc.Close(ctx)
f.arc = nil
}
}
func (f *nearbyShareLoginFixture) Reset(ctx context.Context) error {
if err := f.cr.Responded(ctx); err != nil {
return errors.Wrap(err, "existing Chrome connection is unusable")
}
if err := f.cr.ResetState(ctx); err != nil {
return errors.Wrap(err, "failed resetting existing Chrome session")
}
return nil
}
func (f *nearbyShareLoginFixture) PreTest(ctx context.Context, s *testing.FixtTestState) {
if f.arcEnabled {
if err := f.arc.ResetOutDir(ctx, s.OutDir()); err != nil {
s.Error("Failed to to reset outDir field of ARC object: ", err)
}
}
}
func (f *nearbyShareLoginFixture) PostTest(ctx context.Context, s *testing.FixtTestState) {
if f.arcEnabled {
if err := f.arc.SaveLogFiles(ctx); err != nil {
s.Error("Failed to to save ARC-related log files: ", err)
}
}
}
|
package cafe
import "internetCafe/tourist"
// Computer struct defines a computer from the internet cafe
type Computer struct {
// User occupying the computer
User *tourist.Tourist
}
// IsFree tells if a computer is currently used by a tourist
func (c *Computer) IsFree() bool {
if c.User != nil {
return false
}
return true
}
|
package services
var presetStack = map[string][]serviceConstructor{
"blockchain": {
NewDiscoveryService,
NewHeartbeatService,
NewBlockChainService,
},
}
|
package lc
import "github.com/phea/leetcode-go/types"
// Time: O(n)
// Benchmark: 108ms 7.5mb | 90%
func walk(node *types.TreeNode, stack *[]int) {
if node == nil {
return
}
walk(node.Left, stack)
*stack = append(*stack, node.Val)
walk(node.Right, stack)
}
func getAllElements(root1 *types.TreeNode, root2 *types.TreeNode) []int {
stack1, stack2 := []int{}, []int{}
walk(root1, &stack1)
walk(root2, &stack2)
arr := make([]int, len(stack1)+len(stack2))
pos1, pos2 := 0, 0
for i := 0; i < len(arr); i++ {
if pos1 < len(stack1) && pos2 < len(stack2) {
if stack1[pos1] <= stack2[pos2] {
arr[i] = stack1[pos1]
pos1++
} else {
arr[i] = stack2[pos2]
pos2++
}
} else if pos1 < len(stack1) {
arr[i] = stack1[pos1]
pos1++
} else {
arr[i] = stack2[pos2]
pos2++
}
}
return arr
}
|
package addsubcommands
import (
"fmt"
"os"
snmpsimclient "github.com/inexio/snmpsim-restapi-go-client"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
// UserToEngineCmd represents the userToEngine command
var UserToEngineCmd = &cobra.Command{
Use: "user-to-engine",
Args: cobra.ExactArgs(0),
Short: "Adds an user to an engine",
Long: `Adds the user with a given user-id to the engine with the given engine-id.`,
Run: func(cmd *cobra.Command, args []string) {
//Load the client data from the config
baseUrl := viper.GetString("mgmt.http.baseUrl")
username := viper.GetString("mgmt.http.authUsername")
password := viper.GetString("mgmt.http.authPassword")
//Create a new client
client, err := snmpsimclient.NewManagementClient(baseUrl)
if err != nil {
log.Error().
Msg("Error while creating management client")
os.Exit(1)
}
err = client.SetUsernameAndPassword(username, password)
if err != nil {
log.Error().
Msg("Error while setting username and password")
os.Exit(1)
}
//Read in the engine-id
engineId, err := cmd.Flags().GetInt("engine")
if err != nil {
log.Error().
Msg("Error while retrieving engine-id")
os.Exit(1)
}
//Read in the user-id
userId, err := cmd.Flags().GetInt("user")
if err != nil {
log.Error().
Msg("Error while retrieving user-id")
os.Exit(1)
}
//Add the user to the engine
err = client.AddUserToEngine(engineId, userId)
if err != nil {
log.Error().
Msg("Error while adding user to engine")
os.Exit(1)
}
fmt.Println("User", userId, "has been added to engine", engineId)
},
}
func init() {
//Set user flag
UserToEngineCmd.Flags().Int("user", 0, "Id of the user that is to be added to the engine")
err := UserToEngineCmd.MarkFlagRequired("user")
if err != nil {
log.Error().
Msg("Could not mark 'user' flag required")
os.Exit(1)
}
//Set engine flag
UserToEngineCmd.Flags().Int("engine", 0, "Id of the engine to that the user will be added")
err = UserToEngineCmd.MarkFlagRequired("engine")
if err != nil {
log.Error().
Msg("Could not mark 'engine' flag required")
os.Exit(1)
}
}
|
package main
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
)
type Rule [2]int
func (r *Rule) Validate(v int) bool {
return v >= r[0] && v <= r[1]
}
type Rules []Rule
func (r *Rules) Validate(v int) bool {
for _, ru := range *r {
if ru.Validate(v) {
return true
}
}
return false
}
type Ticket []int
type Data struct {
Rules map[string]Rules
Ticket Ticket
NearbyTicket []Ticket
NearbyTicketValid []Ticket
}
func (d *Data) ValidateRule(v int) bool {
for _, r := range d.Rules {
if r.Validate(v) {
return true
}
}
return false
}
func (d *Data) FirstInvalidTicketValue(t Ticket) (int, bool) {
for _, tv := range t {
if !d.ValidateRule(tv) {
return tv, false
}
}
return 0, true
}
func (d *Data) CheckCompleteInvalidTicket(t Ticket) bool {
c := 0
for _, tv := range t {
if !d.ValidateRule(tv) {
c++
}
}
return len(t) == c
}
func (d *Data) CheckCompleteValidTicket(t Ticket) bool {
c := 0
for _, tv := range t {
if d.ValidateRule(tv) {
c++
}
}
return len(t) == c
}
func (d *Data) FindFirstRuleName(v int) string {
for n, r := range d.Rules {
if r.Validate(v) {
return n
}
}
return ""
}
func (d *Data) FindRuleNames(v int) map[string]int {
m := map[string]int{}
for n, r := range d.Rules {
if r.Validate(v) {
if _, ok := m[n]; !ok {
m[n] = 0
}
m[n]++
}
}
return m
}
func (d *Data) FindTicketRuleName(t Ticket) string {
m := map[string]int{}
for _, v := range t {
for n, r := range d.Rules {
if r.Validate(v) {
if _, ok := m[n]; !ok {
m[n] = 0
}
m[n]++
}
}
}
max := 0
maxN := ""
for n, c := range m {
if c > max {
maxN = n
max = c
}
}
return maxN
}
func (d *Data) FindNamesForIdx(idx int) map[string]int {
m := map[string]int{}
tt := d.FindValidTickets()
for _, v := range tt {
mm := map[string]int{}
for n, r := range d.Rules {
if r.Validate(v[idx]) {
if _, ok := m[n]; !ok {
m[n] = 0
}
m[n]++
if _, ok := mm[n]; !ok {
mm[n] = 0
}
mm[n]++
}
}
}
for n, c := range m {
if c != len(tt) {
delete(m, n)
}
}
return m
}
func (d *Data) FindCompleteInvalidIndex() int {
for i, t := range d.NearbyTicket {
if d.CheckCompleteInvalidTicket(t) {
return i
}
}
return -1
}
func (d *Data) FindCompleteValidIndex() int {
for i, t := range d.NearbyTicket {
if d.CheckCompleteValidTicket(t) {
return i
}
}
return -1
}
func (d *Data) FindValidTickets() []Ticket {
d.NearbyTicketValid = []Ticket{}
for _, t := range d.NearbyTicket {
if _, b := d.FirstInvalidTicketValue(t); b {
d.NearbyTicketValid = append(d.NearbyTicketValid, t)
}
}
return d.NearbyTicketValid
}
func parseFile(filename string) *Data {
data := &Data{
Rules: map[string]Rules{},
Ticket: Ticket{},
NearbyTicket: []Ticket{},
}
prev := ""
file, _ := os.Open(filename)
fscanner := bufio.NewScanner(file)
step := 0
for fscanner.Scan() {
s := fscanner.Text()
// fmt.Println(s)
if s == "" || s == "\n" {
step++
continue
}
p := strings.Split(s, ":")
value := ""
if len(p) == 1 {
value = p[0]
} else if len(p[1]) == 0 {
prev = p[0]
continue
} else {
prev = p[0]
value = p[1]
}
if step == 0 {
for _, v := range strings.Split(strings.TrimSpace(value), " or ") {
pv := strings.Split(v, "-")
start, _ := strconv.Atoi(pv[0])
end, _ := strconv.Atoi(pv[1])
if _, ok := data.Rules[prev]; !ok {
data.Rules[prev] = Rules{}
}
data.Rules[prev] = append(data.Rules[prev], Rule{start, end})
}
} else if step == 1 {
for _, vs := range strings.Split(value, ",") {
i, _ := strconv.Atoi(vs)
data.Ticket = append(data.Ticket, i)
}
} else {
v := Ticket{}
for _, vs := range strings.Split(value, ",") {
i, _ := strconv.Atoi(vs)
v = append(v, i)
}
data.NearbyTicket = append(data.NearbyTicket, v)
}
}
return data
}
func processMain1(data *Data) {
fmt.Println("----------")
fmt.Printf("%#v\n", data)
s := 0
for _, t := range data.NearbyTicket {
v, b := data.FirstInvalidTicketValue(t)
if b {
fmt.Printf("%#v: %d\n", t, v)
s += v
}
}
fmt.Printf("Toal: %d\n", s)
}
func main1() {
processMain1(parseFile("list.test.txt"))
processMain1(parseFile("list.txt"))
}
|
package main
import "fmt"
var number = [] int {1, 2, 3, 4, 5, 6}
func main() {
// i := 0
// for i < 6 {
// fmt.Println(number[i])
// i++
// }
// for i := 0; i < 6; i++ {
// fmt.Println(number[i])
// }
for i,x := range number {
fmt.Println("第", i, "个数为", x)
}
} |
package main
import (
"flag"
"fmt"
"github.com/emersion/go-imap"
"github.com/emersion/go-imap/client"
_ "github.com/emersion/go-message/charset"
"github.com/emersion/go-message/mail"
"io"
"io/ioutil"
"log"
"os"
"regexp"
"strconv"
)
func init() {
log.SetFlags(log.Llongfile|log.LstdFlags)
}
var (
server, email, password string
workernum int
)
func PutData(email string) (err error) {
var f *os.File
//write csv record
csvFile := "./data.csv"
f, err = os.OpenFile(csvFile, os.O_APPEND|os.O_CREATE, 0755)
if err != nil {
log.Fatal(err)
}
_, err = f.Write([]byte(email+"\n"))
if err != nil {
log.Fatal(err)
}
if err := f.Close(); err != nil {
log.Fatal(err)
}
return
}
func main() {
//获取命令行参数
flag.StringVar(&server, "server", "", "imap服务地址(包含端口)")
flag.StringVar(&email, "email", "", "邮箱名")
flag.StringVar(&password, "password", "", "密码")
flag.IntVar(&workernum, "workernum", 32, "并发数:")
flag.Parse()
if flag.NFlag() < 3 {
flag.PrintDefaults()
log.Fatal("参数缺失")
}
if server == "" || email == "" || password == "" || workernum == 0 {
log.Fatal("服务器地址,用户名,密码,参数错误")
}
//连接imap服务
imapClient, err := client.Dial(server)
if err != nil {
log.Fatal(err)
}
//登陆
if err := imapClient.Login(email, password); err != nil {
log.Fatal(err)
}
// Select INBOX
mbox, err := imapClient.Select("INBOX", false)
if err != nil {
log.Fatal(err)
}
// Get the last message
if mbox.Messages == 0 {
log.Fatal("No message in mailbox")
}
seqSet := new(imap.SeqSet)
seqSet.AddRange(1, mbox.Messages)
// Get the whole message body
var section imap.BodySectionName
items := []imap.FetchItem{section.FetchItem()}
messages := make(chan *imap.Message, 10)
go func() {
if err := imapClient.Fetch(seqSet, items, messages); err != nil {
log.Fatal(err)
}
}()
var i uint32
i = 1
for {
if i == mbox.Messages {
break
}
msg := <-messages
if msg == nil {
fmt.Println("Server didn't returned message")
continue
}
r := msg.GetBody(§ion)
if r == nil {
fmt.Println("Server didn't returned message body")
continue
}
// Create a new mail reader
mr, err := mail.CreateReader(r)
if err != nil {
fmt.Println(err)
continue
}
// Process each message's part
//for {
p, err := mr.NextPart()
if err == io.EOF {
break
} else if err != nil {
fmt.Println(err)
continue
}
b, _ := ioutil.ReadAll(p.Body)
reEmail := `[\w\.]+@\w+\.[a-z]{2,3}(\.[a-z]{2,3})?`
re := regexp.MustCompile(reEmail)
//-1 表示匹配所有 如果输入5 表示只匹配5个
email := re.FindStringSubmatch(string(b))
if len(email) > 0 {
err = PutData(email[0])
if err != nil {
fmt.Println(err)
}
fmt.Println(strconv.Itoa(int(i))+": "+email[0])
}
//}
i++
}
} |
package main
import "fmt"
type ListNode struct {
Val int
Next *ListNode
}
func Print(list *ListNode) {
for list != nil {
fmt.Print(list.Val)
list = list.Next
if list != nil {
fmt.Print("->")
}
}
fmt.Println()
}
func swapPairs(head *ListNode) *ListNode {
var h, now *ListNode
for head != nil {
if head.Next != nil {
nextHead := head.Next.Next
if h == nil {
h = head.Next
h.Next = head
now = head
} else {
now.Next = head.Next
now.Next.Next = head
now = head
}
now.Next = nil
head = nextHead
} else {
if h == nil {
h = head
} else {
now.Next = head
}
head = head.Next
}
}
return h
}
func main() {
input := &ListNode{
Val: 1,
Next: &ListNode{
Val: 2,
Next: &ListNode{
Val: 3,
Next: &ListNode{
Val: 4,
Next: nil,
},
},
},
}
Print(swapPairs(input))
}
|
package main
import "fmt"
type person struct {
first string
last string
age int
}
type secretAgent struct {
person // embedded type, promoted to the outer type; also known as anonymous field
license bool
}
func main() {
sa1 := secretAgent{
person: person{ // unqualified type name ACTS as the field name
first: "James",
last: "Bond",
age: 32,
},
license: true,
}
fmt.Println(sa1)
fmt.Println(sa1.first) // Promoted to the top level of the type
fmt.Println(sa1.person.first) // Could also be used in case of name collisions
}
|
package server
import (
"context"
"encoding/json"
"fmt"
"net/http"
"github.com/99designs/gqlgen/graphql/handler"
"github.com/99designs/gqlgen/graphql/handler/extension"
"github.com/99designs/gqlgen/graphql/handler/lru"
"github.com/99designs/gqlgen/graphql/handler/transport"
"github.com/99designs/gqlgen/graphql/playground"
"github.com/ddouglas/ledger/internal/account"
"github.com/ddouglas/ledger/internal/auth"
"github.com/ddouglas/ledger/internal/gateway"
"github.com/ddouglas/ledger/internal/importer"
"github.com/ddouglas/ledger/internal/item"
resolvers "github.com/ddouglas/ledger/internal/server/gql"
"github.com/ddouglas/ledger/internal/server/gql/dataloaders"
"github.com/ddouglas/ledger/internal/server/gql/generated"
"github.com/ddouglas/ledger/internal/transaction"
"github.com/ddouglas/ledger/internal/user"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/newrelic/go-agent/v3/newrelic"
"github.com/sirupsen/logrus"
)
type server struct {
port uint
logger *logrus.Logger
auth auth.Service
loaders dataloaders.Service
importer importer.Service
gateway gateway.Service
newrelic *newrelic.Application
user user.Service
account account.Service
item item.Service
transaction transaction.Service
server *http.Server
}
func New(
port uint,
newrelic *newrelic.Application,
logger *logrus.Logger,
auth auth.Service,
loaders dataloaders.Service,
gateway gateway.Service,
user user.Service,
importer importer.Service,
account account.Service,
item item.Service,
transaction transaction.Service,
) *server {
s := &server{
newrelic: newrelic,
port: port,
logger: logger,
auth: auth,
loaders: loaders,
gateway: gateway,
user: user,
importer: importer,
account: account,
item: item,
transaction: transaction,
}
s.server = &http.Server{
Addr: fmt.Sprintf(":%d", s.port),
Handler: s.buildRouter(),
}
return s
}
func (s *server) Run() error {
s.logger.WithField("service", "server").Infof("Starting on Port %d", s.port)
return s.server.ListenAndServe()
}
// GracefullyShutdown gracefully shuts down the HTTP API.
func (s *server) GracefullyShutdown(ctx context.Context) error {
s.logger.Info("attempting to shutdown server gracefully")
return s.server.Shutdown(ctx)
}
func (s *server) buildRouter() *chi.Mux {
r := chi.NewRouter()
r.Use(
s.requestLogger(s.logger),
s.cors,
middleware.SetHeader("content-type", "application/json"),
)
r.Get("/health", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
})
r.Get("/playground", playground.Handler("GraphQL playground", "/graphql"))
r.Post("/external/plaid/v1/webhook", s.handlePlaidPostV1Webhook)
r.Post("/external/plaid/v1/link/token", s.handlePlaidPostLinkToken)
r.Post("/external/auth0/v1/exchange", s.handleAuth0PostCodeExchange)
r.Group(func(r chi.Router) {
r.Use(s.authorization)
r.Get("/retool/auth", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
})
r.Get("/items", s.handleGetUserItems)
// r.Post("/items", s.handlePostUserItems)
r.Get("/items/{itemID}/accounts", s.handleGetItemAccounts)
r.Get("/items/{itemID}/accounts/{accountID}", s.handleGetItemAccount)
r.Get("/items/{itemID}", s.handleGetUserItem)
r.Delete("/items/{itemID}", s.handleDeleteUserItem)
r.Get("/items/{itemID}/accounts/{accountID}/transactions", s.handleGetAccountTransactions)
r.Put("/items/{itemID}/accounts/{accountID}/transactions", s.handleUpdateTransactions)
r.Get("/items/{itemID}/accounts/{accountID}/transactions/{transactionID}", s.handleGetAccountTransaction)
r.Patch("/items/{itemID}/accounts/{accountID}/transactions/{transactionID}", s.handlePatchAccountTransaction)
r.Get("/items/{itemID}/accounts/{accountID}/transactions/{transactionID}/receipt", s.handleGetAccountTransactionReceiptURL)
r.Post("/items/{itemID}/accounts/{accountID}/transactions/{transactionID}/receipt", s.handlePostAccountTransactionReceipt)
r.Delete("/items/{itemID}/accounts/{accountID}/transactions/{transactionID}/receipt", s.handleDeleteAccountTransactionReceipt)
// ##### GraphQL Handler #####
handler := handler.New(
generated.NewExecutableSchema(
generated.Config{
Resolvers: resolvers.New(
s.logger,
s.account,
s.gateway,
s.item,
s.loaders,
s.transaction,
),
},
),
)
handler.AddTransport(transport.POST{})
handler.AddTransport(transport.MultipartForm{})
handler.Use(extension.Introspection{})
handler.SetQueryCache(lru.New(1000))
handler.Use(extension.AutomaticPersistedQuery{
Cache: lru.New(100),
})
r.Handle("/graphql", handler)
})
return r
}
// func PrintRoutes(r chi.Routes) {
// var printRoutes func(parentPattern string, r chi.Routes)
// printRoutes = func(parentPattern string, r chi.Routes) {
// rts := r.Routes()
// parentPattern = strings.TrimSuffix(parentPattern, "/*")
// for _, rt := range rts {
// if rt.SubRoutes == nil {
// fmt.Println(parentPattern, "+", rt.Pattern)
// } else {
// pat := rt.Pattern
// subRoutes := rt.SubRoutes
// printRoutes(parentPattern+pat, subRoutes)
// }
// }
// }
// printRoutes("", r)
// }
func closeRequestBody(ctx context.Context, r *http.Request) {
err := r.Body.Close()
if err != nil {
newrelic.FromContext(ctx).NoticeError(err)
}
}
func (s *server) writeResponse(ctx context.Context, w http.ResponseWriter, code int, data interface{}) {
if code != http.StatusOK {
w.WriteHeader(code)
}
if data != nil {
_ = json.NewEncoder(w).Encode(data)
}
}
func (s *server) writeError(ctx context.Context, w http.ResponseWriter, code int, err error) {
// If err is not nil, actually pass in a map so that the output to the wire is {"error": "text...."} else just let it fall through
if err != nil {
LogEntrySetField(ctx, "error", err)
s.writeResponse(ctx, w, code, map[string]interface{}{
"message": err.Error(),
})
return
}
s.writeResponse(ctx, w, code, nil)
}
|
package res
// Code inspired, and partly borrowed, from SubList in nats-server
// https://github.com/nats-io/nats-server/blob/master/server/sublist.go
// Common byte variables for wildcards and token separator.
const (
pmark = '$'
pwild = '*'
fwild = '>'
btsep = '.'
)
const invalidPattern = "res: invalid pattern"
// Mux stores patterns and efficiently retrieves pattern handlers.
type Mux struct {
pattern string
plen int
root *node
parent *Mux
}
// A registered handler
type regHandler struct {
Handler
group group
}
// A node represents one part of the path, and has pointers
// to the next nodes, including wildcards.
// Only one instance of handlers may exist per node.
type node struct {
hs *regHandler // Handlers on this node
params []pathParam // path parameters for the handlers
nodes map[string]*node
param *node
wild *node // Wild card node
}
// A pathParam represent a parameter part of the resource name.
type pathParam struct {
name string // name of the parameter
idx int // token index of the parameter
}
// Matchin handlers instance to a resource name
type nodeMatch struct {
hs *regHandler
params map[string]string
}
// NewMux returns a new root Mux starting with given pattern.
func NewMux(pattern string) *Mux {
return &Mux{
pattern: pattern,
plen: len(splitPattern(pattern)),
root: &node{},
}
}
// Handle registers the handler functions for the given resource subpattern.
//
// A pattern may contain placeholders that acts as wildcards, and will be
// parsed and stored in the request.PathParams map.
// A placeholder is a resource name part starting with a dollar ($) character:
// s.Handle("user.$id", handler) // Will match "user.10", "user.foo", etc.
// An anonymous placeholder is a resource name part using an asterisk (*) character:
// s.Handle("user.*", handler) // Will match "user.10", "user.foo", etc.
// A full wildcard can be used as last part using a greather than (>) character:
// s.Handle("data.>", handler) // Will match "data.foo", "data.foo.bar", etc.
//
// If the pattern is already registered, or if there are conflicts among
// the handlers, Handle panics.
func (m *Mux) Handle(subpattern string, hf ...Option) {
var h Handler
for _, f := range hf {
f.SetOption(&h)
}
m.AddHandler(subpattern, h)
}
// AddHandler register a handler for the given resource subpattern.
// The pattern used is the same as described for Handle.
func (m *Mux) AddHandler(subpattern string, hs Handler) {
h := regHandler{
Handler: hs,
group: parseGroup(hs.Group, subpattern),
}
m.add(subpattern, &h)
}
// Mount attaches another Mux at a given pattern.
// When mounting, any pattern set on the sub Mux will be merge with the subpattern.
func (m *Mux) Mount(subpattern string, sub *Mux) {
if sub.parent != nil {
panic("res: already mounted")
}
spattern := mergePattern(sub.pattern, subpattern)
if spattern == "" {
panic("res: attempting to mount to root")
}
n, _ := m.fetch(spattern, sub.root)
if n != sub.root {
panic("res: attempting to mount to existing pattern: " + mergePattern(m.pattern, spattern))
}
sub.pattern = spattern
sub.parent = m
}
// Route create a new Mux and mounts it to the given subpattern.
func (m *Mux) Route(subpattern string, fn func(m *Mux)) *Mux {
sub := NewMux("")
if fn != nil {
fn(sub)
}
m.Mount(subpattern, sub)
return sub
}
// add inserts new handlers for a given subpattern.
// An invalid pattern, or a pattern already registered will cause panic.
func (m *Mux) add(subpattern string, hs *regHandler) {
n, params := m.fetch(subpattern, nil)
if n.hs != nil {
panic("res: registration already done for pattern " + mergePattern(m.pattern, subpattern))
}
n.params = params
n.hs = hs
}
// fetch get the node for a given subpattern (not including Mux path).
// An invalid pattern will cause panic.
func (m *Mux) fetch(subpattern string, mount *node) (*node, []pathParam) {
tokens := splitPattern(subpattern)
var params []pathParam
l := m.root
var n *node
var doMount bool
for i, t := range tokens {
if mount != nil && i == len(tokens)-1 {
doMount = true
}
lt := len(t)
if lt == 0 {
panic(invalidPattern)
}
if t[0] == pmark || t[0] == pwild {
if lt == 1 {
panic(invalidPattern)
}
if t[0] == pmark {
name := t[1:]
// Validate subpattern is unique
for _, p := range params {
if p.name == name {
panic("res: placeholder " + t + " found multiple times in pattern: " + mergePattern(m.pattern, subpattern))
}
}
params = append(params, pathParam{name: name, idx: i})
}
if l.param == nil {
if doMount {
l.param = mount
} else {
l.param = &node{}
}
}
n = l.param
} else if t[0] == fwild {
// Validate the full wildcard is last
if lt > 1 || i < len(tokens)-1 {
panic(invalidPattern)
}
if l.wild == nil {
if doMount {
panic("res: attempting to mount on full wildcard pattern: " + mergePattern(m.pattern, subpattern))
}
l.wild = &node{}
}
n = l.wild
} else {
if l.nodes == nil {
l.nodes = make(map[string]*node)
if doMount {
n = mount
} else {
n = &node{}
}
l.nodes[t] = n
} else {
n = l.nodes[t]
if n == nil {
if doMount {
n = mount
} else {
n = &node{}
}
l.nodes[t] = n
}
}
}
l = n
}
return l, params
}
// get parses the resource name and gets the registered handlers and
// any path params.
// It will assume the first tokens matches the Mux path (if any).
// Returns nil, nil if there is no match
func (m *Mux) get(rname string) (*regHandler, map[string]string) {
pl := m.plen
var tokens []string
if len(rname) > 0 {
tokens = make([]string, 0, 32)
start := 0
for i := 0; i < len(rname); i++ {
if rname[i] == btsep {
if pl > 0 {
pl--
} else {
tokens = append(tokens, rname[start:i])
}
start = i + 1
}
}
if pl == 0 {
tokens = append(tokens, rname[start:])
}
}
var nm nodeMatch
matchNode(m.root, tokens, 0, &nm)
return nm.hs, nm.params
}
func matchNode(l *node, toks []string, i int, nm *nodeMatch) bool {
t := toks[i]
i++
c := 2
n := l.nodes[t]
for c > 0 {
// Does the node exist
if n != nil {
// Check if it is the last token
if len(toks) == i {
// Check if this node has handlers
if n.hs != nil {
nm.hs = n.hs
// Check if we have path parameters for the handlers
if len(n.params) > 0 {
// Create a map with path parameter values
nm.params = make(map[string]string, len(n.params))
for _, pp := range n.params {
nm.params[pp.name] = toks[pp.idx]
}
}
return true
}
} else {
// Match against next node
if matchNode(n, toks, i, nm) {
return true
}
}
}
// To avoid repeating code above, set node to test to l.param
// and run it all again.
n = l.param
c--
}
// Check full wild card
if l.wild != nil {
n = l.wild
nm.hs = n.hs
if len(n.params) > 0 {
// Create a map with path parameter values
nm.params = make(map[string]string, len(n.params))
for _, pp := range n.params {
nm.params[pp.name] = toks[pp.idx]
}
}
}
return false
}
func splitPattern(p string) []string {
if len(p) == 0 {
return nil
}
tokens := make([]string, 0, 32)
start := 0
for i := 0; i < len(p); i++ {
if p[i] == btsep {
tokens = append(tokens, p[start:i])
start = i + 1
}
}
tokens = append(tokens, p[start:])
return tokens
}
func mergePattern(a, b string) string {
if a == "" {
return b
}
if b == "" {
return a
}
return a + "." + b
}
func (m *Mux) hasResources() bool {
return hasPattern(m.root, func(n *node) bool {
if n.hs == nil {
return false
}
hs := n.hs
return hs.Get != nil || len(hs.Call) > 0 || len(hs.Auth) > 0 || hs.New != nil
})
}
func (m *Mux) hasAccess() bool {
return hasPattern(m.root, func(n *node) bool {
if n.hs == nil {
return false
}
return n.hs.Access != nil
})
}
func hasPattern(n *node, test func(n *node) bool) bool {
if n.wild != nil && test(n.wild) {
return true
}
if n.param != nil && (test(n.param) || hasPattern(n.param, test)) {
return true
}
for _, nn := range n.nodes {
if test(nn) || hasPattern(nn, test) {
return true
}
}
return false
}
|
package encoding
import (
"bytes"
"context"
"github.com/grafana/tempo/tempodb/encoding/common"
)
type recordAppender struct {
records []common.Record
}
// NewRecordAppender returns an appender that stores records only.
func NewRecordAppender(records []common.Record) Appender {
return &recordAppender{
records: records,
}
}
// Append appends the id/object to the writer. Note that the caller is giving up ownership of the two byte arrays backing the slices.
// Copies should be made and passed in if this is a problem
func (a *recordAppender) Append(id common.ID, b []byte) error {
return common.ErrUnsupported
}
func (a *recordAppender) Records() []common.Record {
return a.records
}
func (a *recordAppender) RecordsForID(id common.ID) []common.Record {
_, i, _ := common.Records(a.records).Find(context.Background(), id)
if i >= len(a.records) || i < 0 {
return nil
}
sliceRecords := make([]common.Record, 0, 1)
for bytes.Equal(a.records[i].ID, id) {
sliceRecords = append(sliceRecords, a.records[i])
i++
if i >= len(a.records) {
break
}
}
return sliceRecords
}
func (a *recordAppender) Length() int {
return len(a.records)
}
func (a *recordAppender) DataLength() uint64 {
return 0
}
func (a *recordAppender) Complete() error {
return nil
}
|
package cookie
// Copyright 2016-2017 MediaMath
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"io"
"net/http"
"net/http/httptest"
"net/url"
"os"
"reflect"
"testing"
"time"
)
var (
prod, _ = url.Parse("https://api.mediamath.com")
)
func setup() {
os.Setenv("T1_API_USERNAME", "user")
os.Setenv("T1_API_PASSWORD", "password")
os.Setenv("T1_API_KEY", "apikey")
}
func setupServer(statusCode int, filename, cType string) *httptest.Server {
hf := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
f, err := os.Open(filename)
if err != nil {
panic(err)
}
defer f.Close()
if cType == "" {
w.Header().Set("Content-Type", mediaTypeJSON)
} else {
w.Header().Set("Content-Type", cType)
}
w.WriteHeader(statusCode)
io.Copy(w, f)
})
return httptest.NewServer(hf)
}
func TestCredentialsFromEnv(t *testing.T) {
setup()
c := GetCredentialsFromEnv()
if exp := "user"; c.Username != exp {
t.Errorf("env username: want %v, got %v", exp, c.Username)
}
if exp := "password"; c.Password != exp {
t.Errorf("env password: want %v, got %v", exp, c.Password)
}
if exp := "apikey"; c.APIKey != exp {
t.Errorf("env api key: want %v, got %v", exp, c.APIKey)
}
}
func TestConfigEncode(t *testing.T) {
setup()
vals := GetCredentialsFromEnv().Encode()
exp := url.Values{
"user": []string{"user"},
"password": []string{"password"},
"api_key": []string{"apikey"},
}
if !reflect.DeepEqual(exp, vals) {
t.Errorf("config encode: want %v, got %v", exp, vals)
}
}
func TestNewClient(t *testing.T) {
c, err := New(Config{}, nil)
if err != nil {
t.Errorf("new: %v", err)
}
if c.Jar == nil {
t.Error("new: expected cookie jar, got none attached")
}
if exp := 300 * time.Second; c.Timeout != exp {
t.Errorf("new timeout: want %v, got %v", exp, c.Timeout)
}
}
func TestSetSession(t *testing.T) {
c, _ := New(Config{}, nil)
err := SetSession(c, "mysessionid", prod)
if err != nil {
t.Errorf("set session: %v", err)
}
cooks := c.Jar.Cookies(prod)
if len(cooks) == 0 {
t.Fatal("set session: no cookies set")
}
cook := cooks[0]
if want, got := "adama_session", cook.Name; want != got {
t.Errorf("cookie name: want %v, got %v", want, got)
}
if want, got := "mysessionid", cook.Value; want != got {
t.Errorf("cookie value: want %v, got %v", want, got)
}
}
func TestValidLogin(t *testing.T) {
setup()
conf := GetCredentialsFromEnv()
c, _ := New(conf, nil)
s := setupServer(200, "testdata/valid_login.json", "")
defer s.Close()
u, _ := url.Parse(s.URL)
err := Login(c, u, conf)
if err != nil {
t.Errorf("valid login: %v", err)
}
}
func TestDeveloperInactive(t *testing.T) {
setup()
conf := GetCredentialsFromEnv()
c, _ := New(conf, nil)
s := setupServer(403, "testdata/invalid_developerinactive.html", "text/xml")
defer s.Close()
u, _ := url.Parse(s.URL)
err := Login(c, u, conf)
if err == nil {
t.Error("dev inactive: expected an error, got none")
} else if exp, e := "login: <h1>Developer Inactive</h1>\n", err.Error(); e != exp {
t.Errorf("dev inactive: want %v, got %v", exp, e)
}
}
func TestAuthError(t *testing.T) {
setup()
conf := GetCredentialsFromEnv()
c, _ := New(conf, nil)
s := setupServer(401, "testdata/invalid_autherror.json", "")
defer s.Close()
u, _ := url.Parse(s.URL)
err := Login(c, u, conf)
if err == nil {
t.Error("auth error: expected an error, got none")
} else if exp, e := "login: Authentication error", err.Error(); e != exp {
t.Errorf("dev inactive: want %v, got %v", exp, e)
}
}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package cgroup provides an interface to read and write configuration to
// cgroup.
package cgroup
import (
"bufio"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/cenkalti/backoff"
specs "github.com/opencontainers/runtime-spec/specs-go"
"golang.org/x/sync/errgroup"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/cleanup"
"gvisor.dev/gvisor/pkg/log"
)
const (
cgroupv1FsName = "cgroup"
cgroupv2FsName = "cgroup2"
// procRoot is the procfs root this module uses.
procRoot = "/proc"
// cgroupRoot is the cgroupfs root this module uses.
cgroupRoot = "/sys/fs/cgroup"
)
var controllers = map[string]controller{
"blkio": &blockIO{},
"cpu": &cpu{},
"cpuset": &cpuSet{},
"hugetlb": &hugeTLB{},
"memory": &memory{},
"net_cls": &networkClass{},
"net_prio": &networkPrio{},
"pids": &pids{},
// These controllers either don't have anything in the OCI spec or is
// irrelevant for a sandbox.
"cpuacct": &noop{},
"devices": &noop{},
"freezer": &noop{},
"perf_event": &noop{},
"rdma": &noop{},
"systemd": &noop{},
}
// IsOnlyV2 checks whether cgroups V2 is enabled and V1 is not.
func IsOnlyV2() bool {
var stat unix.Statfs_t
if err := unix.Statfs(cgroupRoot, &stat); err != nil {
// It's not used for anything important, assume not V2 on failure.
return false
}
return stat.Type == unix.CGROUP2_SUPER_MAGIC
}
func setOptionalValueInt(path, name string, val *int64) error {
if val == nil || *val == 0 {
return nil
}
str := strconv.FormatInt(*val, 10)
return setValue(path, name, str)
}
func setOptionalValueUint(path, name string, val *uint64) error {
if val == nil || *val == 0 {
return nil
}
str := strconv.FormatUint(*val, 10)
return setValue(path, name, str)
}
func setOptionalValueUint32(path, name string, val *uint32) error {
if val == nil || *val == 0 {
return nil
}
str := strconv.FormatUint(uint64(*val), 10)
return setValue(path, name, str)
}
func setOptionalValueUint16(path, name string, val *uint16) error {
if val == nil || *val == 0 {
return nil
}
str := strconv.FormatUint(uint64(*val), 10)
return setValue(path, name, str)
}
func setValue(path, name, data string) error {
fullpath := filepath.Join(path, name)
log.Debugf("Setting %q to %q", fullpath, data)
return writeFile(fullpath, []byte(data), 0700)
}
// writeFile is similar to ioutil.WriteFile() but doesn't create the file if it
// doesn't exist.
func writeFile(path string, data []byte, perm os.FileMode) error {
f, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC, perm)
if err != nil {
return err
}
defer f.Close()
_, err = f.Write(data)
return err
}
func getValue(path, name string) (string, error) {
fullpath := filepath.Join(path, name)
out, err := ioutil.ReadFile(fullpath)
if err != nil {
return "", err
}
return string(out), nil
}
func getInt(path, name string) (int, error) {
s, err := getValue(path, name)
if err != nil {
return 0, err
}
return strconv.Atoi(strings.TrimSpace(s))
}
// fillFromAncestor sets the value of a cgroup file from the first ancestor
// that has content. It does nothing if the file in 'path' has already been set.
func fillFromAncestor(path string) (string, error) {
out, err := ioutil.ReadFile(path)
if err != nil {
return "", err
}
val := strings.TrimSpace(string(out))
if val != "" {
// File is set, stop here.
return val, nil
}
// File is not set, recurse to parent and then set here.
name := filepath.Base(path)
parent := filepath.Dir(filepath.Dir(path))
val, err = fillFromAncestor(filepath.Join(parent, name))
if err != nil {
return "", err
}
if err := writeFile(path, []byte(val), 0700); err != nil {
return "", nil
}
return val, nil
}
// countCpuset returns the number of CPU in a string formatted like:
//
// "0-2,7,12-14 # bits 0, 1, 2, 7, 12, 13, and 14 set" - man 7 cpuset
func countCpuset(cpuset string) (int, error) {
var count int
for _, p := range strings.Split(cpuset, ",") {
interval := strings.Split(p, "-")
switch len(interval) {
case 1:
if _, err := strconv.Atoi(interval[0]); err != nil {
return 0, err
}
count++
case 2:
start, err := strconv.Atoi(interval[0])
if err != nil {
return 0, err
}
end, err := strconv.Atoi(interval[1])
if err != nil {
return 0, err
}
if start < 0 || end < 0 || start > end {
return 0, fmt.Errorf("invalid cpuset: %q", p)
}
count += end - start + 1
default:
return 0, fmt.Errorf("invalid cpuset: %q", p)
}
}
return count, nil
}
// loadPaths loads cgroup paths for given 'pid', may be set to 'self'.
func loadPaths(pid string) (map[string]string, error) {
procCgroup, err := os.Open(filepath.Join(procRoot, pid, "cgroup"))
if err != nil {
return nil, err
}
defer procCgroup.Close()
// Load mountinfo for the current process, because it's where cgroups is
// being accessed from.
mountinfo, err := os.Open(filepath.Join(procRoot, "self/mountinfo"))
if err != nil {
return nil, err
}
defer mountinfo.Close()
return loadPathsHelper(procCgroup, mountinfo, IsOnlyV2())
}
func loadPathsHelper(cgroup, mountinfo io.Reader, unified bool) (map[string]string, error) {
paths := make(map[string]string)
scanner := bufio.NewScanner(cgroup)
for scanner.Scan() {
// Format: ID:[name=]controller1,controller2:path
// Example: 2:cpu,cpuacct:/user.slice
tokens := strings.Split(scanner.Text(), ":")
if len(tokens) != 3 {
return nil, fmt.Errorf("invalid cgroups file, line: %q", scanner.Text())
}
if len(tokens[1]) == 0 && unified {
paths[cgroup2Key] = tokens[2]
continue
}
if len(tokens[1]) == 0 {
continue
}
for _, ctrlr := range strings.Split(tokens[1], ",") {
// Remove prefix for cgroups with no controller, eg. systemd.
ctrlr = strings.TrimPrefix(ctrlr, "name=")
// Discard unknown controllers.
if _, ok := controllers[ctrlr]; ok {
paths[ctrlr] = tokens[2]
}
}
}
if err := scanner.Err(); err != nil {
return nil, err
}
// For nested containers, in /proc/[pid]/cgroup we see paths from host,
// which don't exist in container, so recover the container paths here by
// double-checking with /proc/[pid]/mountinfo
mountScanner := bufio.NewScanner(mountinfo)
haveCg2Path := false
for mountScanner.Scan() {
// Format: ID parent major:minor root mount-point options opt-fields - fs-type source super-options
// Example: 39 32 0:34 / /sys/fs/cgroup/devices rw,noexec shared:18 - cgroup cgroup rw,devices
fields := strings.Fields(mountScanner.Text())
if len(fields) < 9 {
// Skip mounts that are not cgroup mounts.
continue
}
switch fields[len(fields)-3] {
case cgroupv1FsName:
// Cgroup controller type is in the super-options field.
superOptions := strings.Split(fields[len(fields)-1], ",")
for _, opt := range superOptions {
// Remove prefix for cgroups with no controller, eg. systemd.
opt = strings.TrimPrefix(opt, "name=")
// Only considers cgroup controllers that are registered, and skip other
// irrelevant options, e.g. rw.
if cgroupPath, ok := paths[opt]; ok {
rootDir := fields[3]
if rootDir != "/" {
// When cgroup is in submount, remove repeated path components from
// cgroup path to avoid duplicating them.
relCgroupPath, err := filepath.Rel(rootDir, cgroupPath)
if err != nil {
return nil, err
}
paths[opt] = relCgroupPath
}
}
}
case cgroupv2FsName:
if cgroupPath, ok := paths[cgroup2Key]; !haveCg2Path && ok {
root := fields[3]
relCgroupPath, err := filepath.Rel(root, cgroupPath)
if err != nil {
return nil, err
}
haveCg2Path = true
paths[cgroup2Key] = relCgroupPath
}
}
}
if err := mountScanner.Err(); err != nil {
return nil, err
}
return paths, nil
}
// Cgroup represents a cgroup configuration.
type Cgroup interface {
Install(res *specs.LinuxResources) error
Uninstall() error
Join() (func(), error)
CPUQuota() (float64, error)
CPUUsage() (uint64, error)
NumCPU() (int, error)
MemoryLimit() (uint64, error)
MakePath(controllerName string) string
}
// cgroupV1 represents a group inside all controllers. For example:
//
// Name='/foo/bar' maps to /sys/fs/cgroup/<controller>/foo/bar on
// all controllers.
//
// If Name is relative, it uses the parent cgroup path to determine the
// location. For example:
//
// Name='foo/bar' and Parent[ctrl]="/user.slice", then it will map to
// /sys/fs/cgroup/<ctrl>/user.slice/foo/bar
type cgroupV1 struct {
Name string `json:"name"`
Parents map[string]string `json:"parents"`
Own map[string]bool `json:"own"`
}
// NewFromSpec creates a new Cgroup instance if the spec includes a cgroup path.
// Returns nil otherwise. Cgroup paths are loaded based on the current process.
// If useSystemd is true, the Cgroup will be created and managed with
// systemd. This requires systemd (>=v244) to be running on the host and the
// cgroup path to be in the form `slice:prefix:name`.
func NewFromSpec(spec *specs.Spec, useSystemd bool) (Cgroup, error) {
if spec.Linux == nil || spec.Linux.CgroupsPath == "" {
return nil, nil
}
return NewFromPath(spec.Linux.CgroupsPath, useSystemd)
}
// NewFromPath creates a new Cgroup instance from the specified relative path.
// Cgroup paths are loaded based on the current process.
// If useSystemd is true, the Cgroup will be created and managed with
// systemd. This requires systemd (>=v244) to be running on the host and the
// cgroup path to be in the form `slice:prefix:name`.
func NewFromPath(cgroupsPath string, useSystemd bool) (Cgroup, error) {
return new("self", cgroupsPath, useSystemd)
}
// NewFromPid loads cgroup for the given process.
// If useSystemd is true, the Cgroup will be created and managed with
// systemd. This requires systemd (>=v244) to be running on the host and the
// cgroup path to be in the form `slice:prefix:name`.
func NewFromPid(pid int, useSystemd bool) (Cgroup, error) {
return new(strconv.Itoa(pid), "", useSystemd)
}
func new(pid, cgroupsPath string, useSystemd bool) (Cgroup, error) {
var (
parents map[string]string
err error
cg Cgroup
)
// If path is relative, load cgroup paths for the process to build the
// relative paths.
if !filepath.IsAbs(cgroupsPath) {
parents, err = loadPaths(pid)
if err != nil {
return nil, fmt.Errorf("finding current cgroups: %w", err)
}
}
if IsOnlyV2() {
// The cgroupsPath is in a special `slice:prefix:name` format for systemd
// that should not be modified.
if p, ok := parents[cgroup2Key]; ok && !useSystemd {
// The cgroup of current pid will have tasks in it and we can't use
// that, instead, use the its parent which should not have tasks in it.
cgroupsPath = filepath.Join(filepath.Dir(p), cgroupsPath)
}
// Assume that for v2, cgroup is always mounted at cgroupRoot.
cg, err = newCgroupV2(cgroupRoot, cgroupsPath, useSystemd)
if err != nil {
return nil, err
}
} else {
cg = &cgroupV1{
Name: cgroupsPath,
Parents: parents,
Own: make(map[string]bool),
}
}
log.Debugf("New cgroup for pid: %s, %T: %+v", pid, cg, cg)
return cg, nil
}
// CgroupJSON is a wrapper for Cgroup that can be encoded to JSON.
type CgroupJSON struct {
Cgroup Cgroup
}
type cgroupJSONv1 struct {
Cgroup *cgroupV1 `json:"cgroupv1"`
}
type cgroupJSONv2 struct {
Cgroup *cgroupV2 `json:"cgroupv2"`
}
type cgroupJSONSystemd struct {
Cgroup *cgroupSystemd `json:"cgroupsystemd"`
}
type cgroupJSONUnknown struct {
Cgroup any `json:"cgroupunknown"`
}
// UnmarshalJSON implements json.Unmarshaler.UnmarshalJSON
func (c *CgroupJSON) UnmarshalJSON(data []byte) error {
m := map[string]json.RawMessage{}
if err := json.Unmarshal(data, &m); err != nil {
return err
}
var cg Cgroup
if rm, ok := m["cgroupv1"]; ok {
cg = &cgroupV1{}
if err := json.Unmarshal(rm, cg); err != nil {
return err
}
} else if rm, ok := m["cgroupv2"]; ok {
cg = &cgroupV2{}
if err := json.Unmarshal(rm, cg); err != nil {
return err
}
} else if rm, ok := m["cgroupsystemd"]; ok {
cg = &cgroupSystemd{}
if err := json.Unmarshal(rm, cg); err != nil {
return err
}
}
c.Cgroup = cg
return nil
}
// MarshalJSON implements json.Marshaler.MarshalJSON
func (c *CgroupJSON) MarshalJSON() ([]byte, error) {
if c.Cgroup == nil {
return json.Marshal(cgroupJSONUnknown{})
}
switch c.Cgroup.(type) {
case *cgroupV1:
return json.Marshal(cgroupJSONv1{Cgroup: c.Cgroup.(*cgroupV1)})
case *cgroupV2:
return json.Marshal(cgroupJSONv2{Cgroup: c.Cgroup.(*cgroupV2)})
case *cgroupSystemd:
return json.Marshal(cgroupJSONSystemd{Cgroup: c.Cgroup.(*cgroupSystemd)})
}
return nil, nil
}
// Install creates and configures cgroups according to 'res'. If cgroup path
// already exists, it means that the caller has already provided a
// pre-configured cgroups, and 'res' is ignored.
func (c *cgroupV1) Install(res *specs.LinuxResources) error {
log.Debugf("Installing cgroup path %q", c.Name)
// Clean up partially created cgroups on error. Errors during cleanup itself
// are ignored.
clean := cleanup.Make(func() { _ = c.Uninstall() })
defer clean.Clean()
// Controllers can be symlinks to a group of controllers (e.g. cpu,cpuacct).
// So first check what directories need to be created. Otherwise, when
// the directory for one of the controllers in a group is created, it will
// make it seem like the directory already existed and it's not owned by the
// other controllers in the group.
var missing []string
for key := range controllers {
path := c.MakePath(key)
if _, err := os.Stat(path); err != nil {
missing = append(missing, key)
} else {
log.Debugf("Using pre-created cgroup %q: %q", key, path)
}
}
for _, key := range missing {
ctrlr := controllers[key]
if skip, err := createController(c, key); skip && ctrlr.optional() {
if err := ctrlr.skip(res); err != nil {
return err
}
log.Infof("Skipping cgroup %q, err: %v", key, err)
continue
} else if err != nil {
return err
}
// Only set controllers that were created by me.
c.Own[key] = true
path := c.MakePath(key)
if err := ctrlr.set(res, path); err != nil {
return err
}
}
clean.Release()
return nil
}
// createController creates the controller directory, checking that the
// controller is enabled in the system. It returns a boolean indicating whether
// the controller should be skipped (e.g. controller is disabled). In case it
// should be skipped, it also returns the error it got.
func createController(c Cgroup, name string) (bool, error) {
ctrlrPath := filepath.Join(cgroupRoot, name)
if _, err := os.Stat(ctrlrPath); err != nil {
return os.IsNotExist(err), err
}
path := c.MakePath(name)
log.Debugf("Creating cgroup %q: %q", name, path)
if err := os.MkdirAll(path, 0755); err != nil {
return errors.Is(err, unix.EROFS), err
}
return false, nil
}
// Uninstall removes the settings done in Install(). If cgroup path already
// existed when Install() was called, Uninstall is a noop.
func (c *cgroupV1) Uninstall() error {
log.Debugf("Deleting cgroup %q", c.Name)
g, ctx := errgroup.WithContext(context.Background())
for key := range controllers {
if !c.Own[key] {
// cgroup is managed by caller, don't touch it.
continue
}
path := c.MakePath(key)
log.Debugf("Removing cgroup controller for key=%q path=%q", key, path)
// If we try to remove the cgroup too soon after killing the sandbox we
// might get EBUSY, so we retry for a few seconds until it succeeds.
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
b := backoff.WithContext(backoff.NewConstantBackOff(100*time.Millisecond), ctx)
fn := func() error {
err := unix.Rmdir(path)
if os.IsNotExist(err) {
return nil
}
return err
}
// Run deletions in parallel to remove all directories even if there are
// failures/timeouts in other directories.
g.Go(func() error {
if err := backoff.Retry(fn, b); err != nil {
return fmt.Errorf("removing cgroup path %q: %w", path, err)
}
return nil
})
}
return g.Wait()
}
// Join adds the current process to the all controllers. Returns function that
// restores cgroup to the original state.
func (c *cgroupV1) Join() (func(), error) {
// First save the current state so it can be restored.
paths, err := loadPaths("self")
if err != nil {
return nil, err
}
var undoPaths []string
for ctrlr, path := range paths {
// Skip controllers we don't handle.
if _, ok := controllers[ctrlr]; ok {
fullPath := filepath.Join(cgroupRoot, ctrlr, path)
undoPaths = append(undoPaths, fullPath)
}
}
cu := cleanup.Make(func() {
for _, path := range undoPaths {
log.Debugf("Restoring cgroup %q", path)
// Writing the value 0 to a cgroup.procs file causes
// the writing process to be moved to the corresponding
// cgroup. - cgroups(7).
if err := setValue(path, "cgroup.procs", "0"); err != nil {
log.Warningf("Error restoring cgroup %q: %v", path, err)
}
}
})
defer cu.Clean()
// Now join the cgroups.
for key, ctrlr := range controllers {
path := c.MakePath(key)
log.Debugf("Joining cgroup %q", path)
// Writing the value 0 to a cgroup.procs file causes the writing process to
// be moved to the corresponding cgroup - cgroups(7).
if err := setValue(path, "cgroup.procs", "0"); err != nil {
if ctrlr.optional() && os.IsNotExist(err) {
continue
}
return nil, err
}
}
return cu.Release(), nil
}
// CPUQuota returns the CFS CPU quota.
func (c *cgroupV1) CPUQuota() (float64, error) {
path := c.MakePath("cpu")
quota, err := getInt(path, "cpu.cfs_quota_us")
if err != nil {
return -1, err
}
period, err := getInt(path, "cpu.cfs_period_us")
if err != nil {
return -1, err
}
if quota <= 0 || period <= 0 {
return -1, err
}
return float64(quota) / float64(period), nil
}
// CPUUsage returns the total CPU usage of the cgroup.
func (c *cgroupV1) CPUUsage() (uint64, error) {
path := c.MakePath("cpuacct")
usage, err := getValue(path, "cpuacct.usage")
if err != nil {
return 0, err
}
return strconv.ParseUint(strings.TrimSpace(usage), 10, 64)
}
// NumCPU returns the number of CPUs configured in 'cpuset/cpuset.cpus'.
func (c *cgroupV1) NumCPU() (int, error) {
path := c.MakePath("cpuset")
cpuset, err := getValue(path, "cpuset.cpus")
if err != nil {
return 0, err
}
return countCpuset(strings.TrimSpace(cpuset))
}
// MemoryLimit returns the memory limit.
func (c *cgroupV1) MemoryLimit() (uint64, error) {
path := c.MakePath("memory")
limStr, err := getValue(path, "memory.limit_in_bytes")
if err != nil {
return 0, err
}
return strconv.ParseUint(strings.TrimSpace(limStr), 10, 64)
}
// MakePath builds a path to the given controller.
func (c *cgroupV1) MakePath(controllerName string) string {
path := c.Name
if parent, ok := c.Parents[controllerName]; ok {
path = filepath.Join(parent, c.Name)
}
return filepath.Join(cgroupRoot, controllerName, path)
}
type controller interface {
// optional controllers don't fail if not found.
optional() bool
// set applies resource limits to controller.
set(*specs.LinuxResources, string) error
// skip is called when controller is not found to check if it can be safely
// skipped or not based on the spec.
skip(*specs.LinuxResources) error
}
type noop struct{}
func (n *noop) optional() bool {
return true
}
func (*noop) set(*specs.LinuxResources, string) error {
return nil
}
func (n *noop) skip(*specs.LinuxResources) error {
return nil
}
type mandatory struct{}
func (*mandatory) optional() bool {
return false
}
func (*mandatory) skip(*specs.LinuxResources) error {
panic("cgroup controller is not optional")
}
type memory struct {
mandatory
}
func (*memory) set(spec *specs.LinuxResources, path string) error {
if spec == nil || spec.Memory == nil {
return nil
}
if err := setOptionalValueInt(path, "memory.limit_in_bytes", spec.Memory.Limit); err != nil {
return err
}
if err := setOptionalValueInt(path, "memory.soft_limit_in_bytes", spec.Memory.Reservation); err != nil {
return err
}
if err := setOptionalValueInt(path, "memory.memsw.limit_in_bytes", spec.Memory.Swap); err != nil {
return err
}
if err := setOptionalValueInt(path, "memory.kmem.limit_in_bytes", spec.Memory.Kernel); err != nil {
return err
}
if err := setOptionalValueInt(path, "memory.kmem.tcp.limit_in_bytes", spec.Memory.KernelTCP); err != nil {
return err
}
if err := setOptionalValueUint(path, "memory.swappiness", spec.Memory.Swappiness); err != nil {
return err
}
if spec.Memory.DisableOOMKiller != nil && *spec.Memory.DisableOOMKiller {
if err := setValue(path, "memory.oom_control", "1"); err != nil {
return err
}
}
return nil
}
type cpu struct {
mandatory
}
func (*cpu) set(spec *specs.LinuxResources, path string) error {
if spec == nil || spec.CPU == nil {
return nil
}
if err := setOptionalValueUint(path, "cpu.shares", spec.CPU.Shares); err != nil {
return err
}
if err := setOptionalValueInt(path, "cpu.cfs_quota_us", spec.CPU.Quota); err != nil {
return err
}
if err := setOptionalValueUint(path, "cpu.cfs_period_us", spec.CPU.Period); err != nil {
return err
}
if err := setOptionalValueUint(path, "cpu.rt_period_us", spec.CPU.RealtimePeriod); err != nil {
return err
}
return setOptionalValueInt(path, "cpu.rt_runtime_us", spec.CPU.RealtimeRuntime)
}
type cpuSet struct {
mandatory
}
func (*cpuSet) set(spec *specs.LinuxResources, path string) error {
// cpuset.cpus and mems are required fields, but are not set on a new cgroup.
// If not set in the spec, get it from one of the ancestors cgroup.
if spec == nil || spec.CPU == nil || spec.CPU.Cpus == "" {
if _, err := fillFromAncestor(filepath.Join(path, "cpuset.cpus")); err != nil {
return err
}
} else {
if err := setValue(path, "cpuset.cpus", spec.CPU.Cpus); err != nil {
return err
}
}
if spec == nil || spec.CPU == nil || spec.CPU.Mems == "" {
_, err := fillFromAncestor(filepath.Join(path, "cpuset.mems"))
return err
}
return setValue(path, "cpuset.mems", spec.CPU.Mems)
}
type blockIO struct {
mandatory
}
func (*blockIO) set(spec *specs.LinuxResources, path string) error {
if spec == nil || spec.BlockIO == nil {
return nil
}
if err := setOptionalValueUint16(path, "blkio.weight", spec.BlockIO.Weight); err != nil {
return err
}
if err := setOptionalValueUint16(path, "blkio.leaf_weight", spec.BlockIO.LeafWeight); err != nil {
return err
}
for _, dev := range spec.BlockIO.WeightDevice {
if dev.Weight != nil {
val := fmt.Sprintf("%d:%d %d", dev.Major, dev.Minor, *dev.Weight)
if err := setValue(path, "blkio.weight_device", val); err != nil {
return err
}
}
if dev.LeafWeight != nil {
val := fmt.Sprintf("%d:%d %d", dev.Major, dev.Minor, *dev.LeafWeight)
if err := setValue(path, "blkio.leaf_weight_device", val); err != nil {
return err
}
}
}
if err := setThrottle(path, "blkio.throttle.read_bps_device", spec.BlockIO.ThrottleReadBpsDevice); err != nil {
return err
}
if err := setThrottle(path, "blkio.throttle.write_bps_device", spec.BlockIO.ThrottleWriteBpsDevice); err != nil {
return err
}
if err := setThrottle(path, "blkio.throttle.read_iops_device", spec.BlockIO.ThrottleReadIOPSDevice); err != nil {
return err
}
return setThrottle(path, "blkio.throttle.write_iops_device", spec.BlockIO.ThrottleWriteIOPSDevice)
}
func setThrottle(path, name string, devs []specs.LinuxThrottleDevice) error {
for _, dev := range devs {
val := fmt.Sprintf("%d:%d %d", dev.Major, dev.Minor, dev.Rate)
if err := setValue(path, name, val); err != nil {
return err
}
}
return nil
}
type networkClass struct{}
func (*networkClass) optional() bool {
return true
}
func (*networkClass) set(spec *specs.LinuxResources, path string) error {
if spec == nil || spec.Network == nil {
return nil
}
return setOptionalValueUint32(path, "net_cls.classid", spec.Network.ClassID)
}
func (*networkClass) skip(spec *specs.LinuxResources) error {
if spec != nil && spec.Network != nil && spec.Network.ClassID != nil {
return fmt.Errorf("Network.ClassID set but net_cls cgroup controller not found")
}
return nil
}
type networkPrio struct{}
func (*networkPrio) optional() bool {
return true
}
func (*networkPrio) set(spec *specs.LinuxResources, path string) error {
if spec == nil || spec.Network == nil {
return nil
}
for _, prio := range spec.Network.Priorities {
val := fmt.Sprintf("%s %d", prio.Name, prio.Priority)
if err := setValue(path, "net_prio.ifpriomap", val); err != nil {
return err
}
}
return nil
}
func (*networkPrio) skip(spec *specs.LinuxResources) error {
if spec != nil && spec.Network != nil && len(spec.Network.Priorities) > 0 {
return fmt.Errorf("Network.Priorities set but net_prio cgroup controller not found")
}
return nil
}
type pids struct{}
func (*pids) optional() bool {
return true
}
func (*pids) skip(spec *specs.LinuxResources) error {
if spec != nil && spec.Pids != nil && spec.Pids.Limit > 0 {
return fmt.Errorf("Pids.Limit set but pids cgroup controller not found")
}
return nil
}
func (*pids) set(spec *specs.LinuxResources, path string) error {
if spec == nil || spec.Pids == nil || spec.Pids.Limit <= 0 {
return nil
}
val := strconv.FormatInt(spec.Pids.Limit, 10)
return setValue(path, "pids.max", val)
}
type hugeTLB struct{}
func (*hugeTLB) optional() bool {
return true
}
func (*hugeTLB) skip(spec *specs.LinuxResources) error {
if spec != nil && len(spec.HugepageLimits) > 0 {
return fmt.Errorf("HugepageLimits set but hugetlb cgroup controller not found")
}
return nil
}
func (*hugeTLB) set(spec *specs.LinuxResources, path string) error {
if spec == nil {
return nil
}
for _, limit := range spec.HugepageLimits {
name := fmt.Sprintf("hugetlb.%s.limit_in_bytes", limit.Pagesize)
val := strconv.FormatUint(limit.Limit, 10)
if err := setValue(path, name, val); err != nil {
return err
}
}
return nil
}
|
package main
import (
"bufio"
"log"
"strings"
)
type item struct {
Name string
Children []*item
}
func parse(data string) item {
scanner := bufio.NewScanner(strings.NewReader(data))
var rootItem item
var currentLevelItem *item
lastLevel := 0
lineNumber := 0
for scanner.Scan() {
lineNumber++
line := scanner.Text()
level := getLevel(line)
lineItem := item{
Name: strings.TrimLeft(line, " "),
Children: make([]*item, 0),
}
if level == 0 {
rootItem = lineItem
currentLevelItem = &rootItem
lastLevel = 1
} else {
if rootItem.Name == "" || level-lastLevel > 1 {
log.Fatal("syntax error in line: ", lineNumber)
}
currentLevelItem = &rootItem
// as we start from root level, start with level - 1
for i := level - 1; i > 0; i-- {
currentLevelItem = currentLevelItem.Children[len(currentLevelItem.Children)-1]
}
// log.Println("append at ", currentLevelItem)
currentLevelItem.Children = append(currentLevelItem.Children, &lineItem)
lastLevel = level
}
}
return rootItem
}
func getLevel(line string) int {
level := 0
for {
if strings.HasPrefix(line, " ") {
line = strings.Replace(line, " ", "", 1)
level++
} else {
break
}
}
return level
}
|
package assertion
import (
"fmt"
"reflect"
)
const panicFormatWithType = "expected %v (type %v) bot got %v (type %v)"
const panicFormat = "expected %v bot got %v"
func Equals(expected, actual interface{}) {
if reflect.TypeOf(expected) != reflect.TypeOf(actual) {
panic(fmt.Sprintf(panicFormatWithType, expected, reflect.TypeOf(expected), actual, reflect.TypeOf(actual)))
}
if !isEqual(expected, actual) {
panic(fmt.Sprintf(panicFormat, expected, actual))
}
}
func isEqual(expected, actual interface{}) bool {
switch expected.(type) {
case int:
return expected.(int) == actual.(int)
case int8:
return expected.(int8) == actual.(int8)
case int16:
return expected.(int16) == actual.(int16)
case int32:
return expected.(int32) == actual.(int32)
case int64:
return expected.(int64) == actual.(int64)
case string:
return expected.(string) == actual.(string)
case uint8:
return expected.(uint8) == actual.(uint8)
case uint16:
return expected.(uint16) == actual.(uint16)
case uint32:
return expected.(uint32) == actual.(uint32)
case uint64:
return expected.(uint64) == actual.(uint64)
case float32:
return expected.(float32) == actual.(float32)
case float64:
return expected.(float64) == actual.(float64)
case uintptr:
return expected.(uintptr) == actual.(uintptr)
case []int:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.([]int)
actualSlice := actual.([]int)
if len(expectedSlice) != len(actualSlice) {
return false
}
for i := range expectedSlice {
if expectedSlice[i] != actualSlice[i] {
return false
}
}
return true
case []int8:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.([]int8)
actualSlice := actual.([]int8)
if len(expectedSlice) != len(actualSlice) {
return false
}
for i := range expectedSlice {
if expectedSlice[i] != actualSlice[i] {
return false
}
}
return true
case []int16:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.([]int16)
actualSlice := actual.([]int16)
if len(expectedSlice) != len(actualSlice) {
return false
}
for i := range expectedSlice {
if expectedSlice[i] != actualSlice[i] {
return false
}
}
return true
case []int32:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.([]int32)
actualSlice := actual.([]int32)
if len(expectedSlice) != len(actualSlice) {
return false
}
for i := range expectedSlice {
if expectedSlice[i] != actualSlice[i] {
return false
}
}
return true
case []int64:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.([]int64)
actualSlice := actual.([]int64)
if len(expectedSlice) != len(actualSlice) {
return false
}
for i := range expectedSlice {
if expectedSlice[i] != actualSlice[i] {
return false
}
}
return true
case []string:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.([]string)
actualSlice := actual.([]string)
if len(expectedSlice) != len(actualSlice) {
return false
}
for i := range expectedSlice {
if expectedSlice[i] != actualSlice[i] {
return false
}
}
return true
case []uint:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.([]uint)
actualSlice := actual.([]uint)
if len(expectedSlice) != len(actualSlice) {
return false
}
for i := range expectedSlice {
if expectedSlice[i] != actualSlice[i] {
return false
}
}
return true
case []uint8:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.([]uint8)
actualSlice := actual.([]uint8)
if len(expectedSlice) != len(actualSlice) {
return false
}
for i := range expectedSlice {
if expectedSlice[i] != actualSlice[i] {
return false
}
}
return true
case []uint16:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.([]uint16)
actualSlice := actual.([]uint16)
if len(expectedSlice) != len(actualSlice) {
return false
}
for i := range expectedSlice {
if expectedSlice[i] != actualSlice[i] {
return false
}
}
return true
case []uint32:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.([]uint32)
actualSlice := actual.([]uint32)
if len(expectedSlice) != len(actualSlice) {
return false
}
for i := range expectedSlice {
if expectedSlice[i] != actualSlice[i] {
return false
}
}
return true
case []uint64:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.([]uint64)
actualSlice := actual.([]uint64)
if len(expectedSlice) != len(actualSlice) {
return false
}
for i := range expectedSlice {
if expectedSlice[i] != actualSlice[i] {
return false
}
}
return true
case []float32:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.([]float32)
actualSlice := actual.([]float32)
if len(expectedSlice) != len(actualSlice) {
return false
}
for i := range expectedSlice {
if expectedSlice[i] != actualSlice[i] {
return false
}
}
return true
case []float64:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.([]float64)
actualSlice := actual.([]float64)
if len(expectedSlice) != len(actualSlice) {
return false
}
for i := range expectedSlice {
if expectedSlice[i] != actualSlice[i] {
return false
}
}
return true
case []interface{}:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.([]interface{})
actualSlice := actual.([]interface{})
if len(expectedSlice) != len(actualSlice) {
return false
}
for i := range expectedSlice {
if !isEqual(expectedSlice[i], actualSlice[i]) {
return false
}
}
return true
case map[int]int:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.(map[int]int)
actualSlice := actual.(map[int]int)
if len(expectedSlice) != len(actualSlice) {
return false
}
for k := range expectedSlice {
if expectedSlice[k] != actualSlice[k] {
return false
}
}
return true
case map[int]int8:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.(map[int]int8)
actualSlice := actual.(map[int]int8)
if len(expectedSlice) != len(actualSlice) {
return false
}
for k := range expectedSlice {
if expectedSlice[k] != actualSlice[k] {
return false
}
}
return true
case map[int]int16:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.(map[int]int16)
actualSlice := actual.(map[int]int16)
if len(expectedSlice) != len(actualSlice) {
return false
}
for k := range expectedSlice {
if expectedSlice[k] != actualSlice[k] {
return false
}
}
return true
case map[int]int32:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.(map[int]int32)
actualSlice := actual.(map[int]int32)
if len(expectedSlice) != len(actualSlice) {
return false
}
for k := range expectedSlice {
if expectedSlice[k] != actualSlice[k] {
return false
}
}
return true
case map[int]int64:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.(map[int]int64)
actualSlice := actual.(map[int]int64)
if len(expectedSlice) != len(actualSlice) {
return false
}
for k := range expectedSlice {
if expectedSlice[k] != actualSlice[k] {
return false
}
}
return true
case map[int]string:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.(map[int]string)
actualSlice := actual.(map[int]string)
if len(expectedSlice) != len(actualSlice) {
return false
}
for k := range expectedSlice {
if expectedSlice[k] != actualSlice[k] {
return false
}
}
return true
case map[int]uint:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.(map[int]uint)
actualSlice := actual.(map[int]uint)
if len(expectedSlice) != len(actualSlice) {
return false
}
for k := range expectedSlice {
if expectedSlice[k] != actualSlice[k] {
return false
}
}
return true
case map[int]uint8:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.(map[int]uint8)
actualSlice := actual.(map[int]uint8)
if len(expectedSlice) != len(actualSlice) {
return false
}
for k := range expectedSlice {
if expectedSlice[k] != actualSlice[k] {
return false
}
}
return true
case map[int]uint16:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.(map[int]uint16)
actualSlice := actual.(map[int]uint16)
if len(expectedSlice) != len(actualSlice) {
return false
}
for k := range expectedSlice {
if expectedSlice[k] != actualSlice[k] {
return false
}
}
return true
case map[int]uint32:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.(map[int]uint32)
actualSlice := actual.(map[int]uint32)
if len(expectedSlice) != len(actualSlice) {
return false
}
for k := range expectedSlice {
if expectedSlice[k] != actualSlice[k] {
return false
}
}
return true
case map[int]uint64:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.(map[int]uint64)
actualSlice := actual.(map[int]uint64)
if len(expectedSlice) != len(actualSlice) {
return false
}
for k := range expectedSlice {
if expectedSlice[k] != actualSlice[k] {
return false
}
}
return true
case map[int]float32:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.(map[int]float32)
actualSlice := actual.(map[int]float32)
if len(expectedSlice) != len(actualSlice) {
return false
}
for k := range expectedSlice {
if expectedSlice[k] != actualSlice[k] {
return false
}
}
return true
case map[int]float64:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.(map[int]float64)
actualSlice := actual.(map[int]float64)
if len(expectedSlice) != len(actualSlice) {
return false
}
for k := range expectedSlice {
if expectedSlice[k] != actualSlice[k] {
return false
}
}
return true
case map[string]int:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.(map[string]int)
actualSlice := actual.(map[string]int)
if len(expectedSlice) != len(actualSlice) {
return false
}
for k := range expectedSlice {
if expectedSlice[k] != actualSlice[k] {
return false
}
}
return true
case map[string]int8:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.(map[string]int8)
actualSlice := actual.(map[string]int8)
if len(expectedSlice) != len(actualSlice) {
return false
}
for k := range expectedSlice {
if expectedSlice[k] != actualSlice[k] {
return false
}
}
return true
case map[string]int16:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.(map[string]int16)
actualSlice := actual.(map[string]int16)
if len(expectedSlice) != len(actualSlice) {
return false
}
for k := range expectedSlice {
if expectedSlice[k] != actualSlice[k] {
return false
}
}
return true
case map[string]int32:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.(map[string]int32)
actualSlice := actual.(map[string]int32)
if len(expectedSlice) != len(actualSlice) {
return false
}
for k := range expectedSlice {
if expectedSlice[k] != actualSlice[k] {
return false
}
}
return true
case map[string]int64:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.(map[string]int64)
actualSlice := actual.(map[string]int64)
if len(expectedSlice) != len(actualSlice) {
return false
}
for k := range expectedSlice {
if expectedSlice[k] != actualSlice[k] {
return false
}
}
return true
case map[string]string:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.(map[string]string)
actualSlice := actual.(map[string]string)
if len(expectedSlice) != len(actualSlice) {
return false
}
for k := range expectedSlice {
if expectedSlice[k] != actualSlice[k] {
return false
}
}
return true
case map[string]uint:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.(map[string]uint)
actualSlice := actual.(map[string]uint)
if len(expectedSlice) != len(actualSlice) {
return false
}
for k := range expectedSlice {
if expectedSlice[k] != actualSlice[k] {
return false
}
}
return true
case map[string]uint8:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.(map[string]uint8)
actualSlice := actual.(map[string]uint8)
if len(expectedSlice) != len(actualSlice) {
return false
}
for k := range expectedSlice {
if expectedSlice[k] != actualSlice[k] {
return false
}
}
return true
case map[string]uint16:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.(map[string]uint16)
actualSlice := actual.(map[string]uint16)
if len(expectedSlice) != len(actualSlice) {
return false
}
for k := range expectedSlice {
if expectedSlice[k] != actualSlice[k] {
return false
}
}
return true
case map[string]uint32:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.(map[string]uint32)
actualSlice := actual.(map[string]uint32)
if len(expectedSlice) != len(actualSlice) {
return false
}
for k := range expectedSlice {
if expectedSlice[k] != actualSlice[k] {
return false
}
}
return true
case map[string]uint64:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.(map[string]uint64)
actualSlice := actual.(map[string]uint64)
if len(expectedSlice) != len(actualSlice) {
return false
}
for k := range expectedSlice {
if expectedSlice[k] != actualSlice[k] {
return false
}
}
return true
case map[string]float32:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.(map[string]float32)
actualSlice := actual.(map[string]float32)
if len(expectedSlice) != len(actualSlice) {
return false
}
for k := range expectedSlice {
if expectedSlice[k] != actualSlice[k] {
return false
}
}
return true
case map[string]float64:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.(map[string]float64)
actualSlice := actual.(map[string]float64)
if len(expectedSlice) != len(actualSlice) {
return false
}
for k := range expectedSlice {
if expectedSlice[k] != actualSlice[k] {
return false
}
}
return true
case []map[interface{}]interface{}:
if expected == nil && actual == nil {
return true
}
if expected == nil || actual == nil {
return false
}
expectedSlice := expected.(map[interface{}]interface{})
actualSlice := actual.(map[interface{}]interface{})
if len(expectedSlice) != len(actualSlice) {
return false
}
for k := range expectedSlice {
if !isEqual(expectedSlice[k], actualSlice[k]) {
return false
}
}
return true
case interface{}:
expectedVal := reflect.ValueOf(expected)
actualVal := reflect.ValueOf(actual)
switch reflect.TypeOf(expected).Kind() {
case reflect.Array, reflect.Slice:
if expectedVal.Len() != actualVal.Len() {
return false
}
for i := 0; i < expectedVal.Len(); i++ {
if !isEqual(expectedVal.Index(i).Interface(), actualVal.Index(i).Interface()) {
return false
}
}
return true
case reflect.Map:
if expectedVal.Len() != actualVal.Len() {
return false
}
for _, key := range expectedVal.MapKeys() {
if !isEqual(expectedVal.MapIndex(key).Interface(), actualVal.MapIndex(key).Interface()) {
return false
}
}
return true
default:
return expected == actual
}
default:
return expected == actual
}
}
|
package internal
import (
"context"
"database/sql"
pb "github.com/lucasantarella/business-profiles-grpc-golib"
"log"
"lucasantarella.com/businesscards/models"
"lucasantarella.com/businesscards/utils"
"time"
)
type Server struct {
Db *sql.DB
}
func (s *Server) GetProfileSocialLinks(ctx context.Context, in *pb.GetProfileSocialLinksRequest) (*pb.GetProfileSocialLinksResponse, error) {
defer utils.TimeTrack(time.Now(), "GetProfileSocialLinks")
log.Printf("onGetProfileSocialLinks")
// create model
socialModel := models.ProfilesSocial{}
socials, err := socialModel.FindByProfileID(s.Db, int64(in.ProfileId))
if err != nil {
return &pb.GetProfileSocialLinksResponse{}, err
}
socialPbSet := make([]*pb.ProfileSocialLink, len(socials))
for i, social := range socials {
socialPbSet[i] = &pb.ProfileSocialLink{
Provider: pb.ProfileSocialLink_SocialProvider(social.Type),
Value: social.Value.String,
}
}
return &pb.GetProfileSocialLinksResponse{Links: socialPbSet}, nil
}
func (s *Server) GetProfileExperiences(context.Context, *pb.GetProfileExperiencesRequest) (*pb.GetProfileExperiencesResponse, error) {
panic("implement me")
}
func (s *Server) CreateProfile(context.Context, *pb.CreateProfileRequest) (*pb.Profile, error) {
defer utils.TimeTrack(time.Now(), "CreateProfile")
log.Printf("onCreateProfile")
panic("implement me")
}
func (s *Server) GetProfile(ctx context.Context, in *pb.GetProfileRequest) (*pb.Profile, error) {
defer utils.TimeTrack(time.Now(), "GetProfile")
log.Printf("onGetProfile")
// create model
profile := models.Profiles{}
err := profile.Find(s.Db, int64(in.Id))
if err != nil {
return nil, err
}
return profile.ToPbProfile(), nil
}
func (s *Server) UpdateProfile(context.Context, *pb.UpdateProfileRequest) (*pb.Profile, error) {
defer utils.TimeTrack(time.Now(), "UpdateProfile")
log.Printf("onUpdateProfile")
panic("implement me")
}
|
package transformer
import (
"expvar"
"flag"
"fmt"
"log"
"os"
"sort"
"strings"
"github.com/dustin/go-humanize"
"github.com/sburnett/transformer/store"
)
// A pipeline stage is a single step of data processing, which reads data from
// Reader, sends each record to Transformer, and writes the resulting Records to
// Writer. The Name is purely informational.
type PipelineStage struct {
Name string
Transformer Transformer
Reader store.Reader
Writer store.Writer
}
type Pipeline []PipelineStage
func (pipeline Pipeline) StageNames() []string {
var names []string
for _, stage := range pipeline {
names = append(names, stage.Name)
}
return names
}
var stagesDone *expvar.Int
var currentStage *expvar.String
func init() {
stagesDone = expvar.NewInt("StagesComplete")
currentStage = expvar.NewString("CurrentStage")
}
type PipelineThunk func() Pipeline
// Convenience function to parse command line arguments, figure out which
// pipeline to run and configure that pipeline to run.
func ParsePipelineChoice(pipelineThunks map[string]PipelineThunk) (string, Pipeline) {
runOnly := flag.String("run_only", "", "Comma separated list of stages to run.")
runAfter := flag.String("run_from", "", "Run this stage and all stages following it.")
listStages := flag.Bool("list_stages", false, "List the stages in the pipeline and exit.")
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage of %s [global flags] <pipeline> [pipeline flags]:\n", os.Args[0])
fmt.Fprintln(os.Stderr, " [global flags] can be:")
flag.PrintDefaults()
var pipelineNames []string
for name := range pipelineThunks {
pipelineNames = append(pipelineNames, name)
}
sort.Strings(pipelineNames)
fmt.Fprintln(os.Stderr, " <pipeline> is one of these:", strings.Join(pipelineNames, ", "))
fmt.Fprintln(os.Stderr, " Pass '-help' to a pipeline to see [pipeline flags]")
}
flag.Parse()
if flag.NArg() < 1 {
flag.Usage()
os.Exit(1)
}
pipelineName := flag.Arg(0)
pipelineThunk, ok := pipelineThunks[pipelineName]
if !ok {
fmt.Fprintf(os.Stderr, "Invalid pipeline!\n\n")
flag.Usage()
os.Exit(1)
}
pipeline := pipelineThunk()
if *listStages {
fmt.Fprintln(os.Stderr, strings.Join(pipeline.StageNames(), "\n"))
os.Exit(0)
}
if len(*runOnly) > 0 {
stageNames := strings.Split(*runOnly, ",")
var stagesToRun []PipelineStage
for _, stageName := range stageNames {
foundStage := false
for _, stage := range pipeline {
if stage.Name == stageName {
stagesToRun = append(stagesToRun, stage)
foundStage = true
break
}
}
if !foundStage {
fmt.Fprintf(os.Stderr, "Invalid stage in pipeline %s\n", pipelineName)
fmt.Fprintf(os.Stderr, "Possible stages:\n %s\n", strings.Join(pipeline.StageNames(), "\n "))
os.Exit(1)
}
}
return pipelineName, stagesToRun
}
if len(*runAfter) > 0 {
stageNames := strings.Split(*runAfter, ",")
for idx, stage := range pipeline {
for _, stageName := range stageNames {
if stage.Name == stageName {
return pipelineName, pipeline[idx:]
}
}
}
fmt.Fprintf(os.Stderr, "Invalid stage in pipeline %s\n", pipelineName)
fmt.Fprintf(os.Stderr, "Possible stages:\n %s\n", strings.Join(pipeline.StageNames(), "\n "))
os.Exit(1)
}
return pipelineName, pipeline
}
// Run a set of pipeline stages. We run stages
// sequentially, with no parallelism between stages.
func RunPipeline(pipeline Pipeline) {
for idx, stage := range pipeline {
currentStage.Set(stage.Name)
log.Printf("Running %s pipeline stage: %v", humanize.Ordinal(idx+1), stage.Name)
RunTransformer(stage.Transformer, stage.Reader, stage.Writer)
stagesDone.Add(1)
}
log.Printf("All stages complete")
}
|
package forest_test
import (
"testing"
forest "git.sr.ht/~whereswaldon/forest-go"
"git.sr.ht/~whereswaldon/forest-go/fields"
"golang.org/x/crypto/openpgp"
)
func MakeIdentityOrSkip(t *testing.T) (*forest.Identity, forest.Signer) {
privkey, err := openpgp.NewEntity("forest-test", "comment", "email@email.io", nil)
if err != nil {
t.Skip("Failed to create private key", err)
}
signer, err := forest.NewNativeSigner(privkey)
username, err := fields.NewQualifiedContent(fields.ContentTypeUTF8String, []byte("Test Name"))
if err != nil {
t.Skip("Failed to qualify username", err)
}
metadata, err := fields.NewQualifiedContent(fields.ContentTypeUTF8String, []byte{})
if err != nil {
t.Skip("Failed to qualify metadata", err)
}
identity, err := forest.NewIdentity(signer, username, metadata)
if err != nil {
t.Error("Failed to create Identity with valid parameters", err)
}
return identity, signer
}
func TestIdentityValidatesSelf(t *testing.T) {
identity, _ := MakeIdentityOrSkip(t)
if correct, err := forest.ValidateID(identity, *identity.ID()); err != nil || !correct {
t.Error("ID validation failed on unmodified node", err)
}
if correct, err := forest.ValidateSignature(identity, identity); err != nil || !correct {
t.Error("Signature validation failed on unmodified node", err)
}
}
func TestIdentityValidationFailsWhenTampered(t *testing.T) {
identity, _ := MakeIdentityOrSkip(t)
identity.Name.Blob = fields.Blob([]byte("whatever"))
if correct, err := forest.ValidateID(identity, *identity.ID()); err == nil && correct {
t.Error("ID validation succeeded on modified node", err)
}
if correct, err := forest.ValidateSignature(identity, identity); err == nil && correct {
t.Error("Signature validation succeeded on modified node", err)
}
}
func TestIdentitySerialize(t *testing.T) {
identity, _ := MakeIdentityOrSkip(t)
buf, err := identity.MarshalBinary()
if err != nil {
t.Error("Failed to serialize identity", err)
}
id2, err := forest.UnmarshalIdentity(buf)
if err != nil {
t.Error("Failed to deserialize identity", err)
}
if !identity.Equals(id2) {
t.Errorf("Deserialized identity should be the same as what went in, expected %v, got %v", identity, id2)
}
}
|
package main
import (
"flag"
"fmt"
"log"
"os"
"text/tabwriter"
"upspin.io/client"
"upspin.io/config"
_ "upspin.io/dir/remote"
"upspin.io/flags"
_ "upspin.io/key/transports"
"upspin.io/subcmd"
"upspin.io/transports"
"upspin.io/upspin"
)
type state struct {
*subcmd.State
client *client.Client
entries []*upspin.DirEntry
}
const help = `
Du tells you the disk usage of an upspin directory that you can access.
`
func main() {
const name = "du"
log.SetFlags(0)
log.SetPrefix("upspin du: ")
s := &state{
State: subcmd.NewState(name),
}
human := flag.Bool("h", false, "print size in human readable format")
depth := flag.Int("d", -1, "depth to recur in directories")
s.ParseFlags(flag.CommandLine, os.Args[1:], help, "du -h -d=<depth> <path>")
cfg, err := config.FromFile(flags.Config)
if err != nil && err != config.ErrNoFactotum {
s.Exit(err)
}
transports.Init(cfg)
s.State.Init(cfg)
if flag.Arg(0) == "" {
s.Exitf("must supply a path")
}
done := map[upspin.PathName]int64{}
for _, entry := range s.GlobAllUpspin(flag.Args()) {
root := &tree{DirEntry: entry}
s.list(entry, root, done)
if len(root.children) != 1 {
s.Exitf("something weird happended")
}
w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
if *depth >= 0 {
printDepthEntries(w, root.children[0], done, *human, *depth)
} else {
printEntries(w, root.children[0], done, *human)
}
w.Flush()
}
s.ExitNow()
}
type tree struct {
*upspin.DirEntry
children []*tree
}
func (t *tree) String() string {
return string(t.Name)
}
func (s *state) list(entry *upspin.DirEntry, parent *tree, done map[upspin.PathName]int64) {
if entry.IsDir() {
dirNode := &tree{DirEntry: entry}
parent.children = append(parent.children, dirNode)
dirContents, err := s.Client.Glob(upspin.AllFilesGlob(entry.Name))
if err != nil {
s.Exit(err)
}
for _, subEntry := range dirContents {
if size, ok := done[subEntry.Name]; !ok || size == 0 {
s.list(subEntry, dirNode, done)
}
done[entry.Name] += done[subEntry.Name]
}
}
if entry.IsRegular() || entry.IsLink() {
fileNode := &tree{DirEntry: entry}
parent.children = append(parent.children, fileNode)
done[entry.Name] += size(entry)
}
}
func size(entry *upspin.DirEntry) (size int64) {
for _, b := range entry.Blocks {
size += b.Size
}
return
}
func printDepthEntries(w *tabwriter.Writer, node *tree, sizes map[upspin.PathName]int64, human bool, depth int) {
if depth < 0 {
return
}
for _, child := range node.children {
printDepthEntries(w, child, sizes, human, depth-1)
}
if node.IsDir() && human {
fmt.Fprintln(w, humanEntry(node, sizes))
} else if node.IsDir() {
fmt.Fprintf(w, "%d\t%s\n", sizes[node.Name], node)
}
}
func printEntries(w *tabwriter.Writer, node *tree, sizes map[upspin.PathName]int64, human bool) {
for _, child := range node.children {
printEntries(w, child, sizes, human)
}
if human {
fmt.Fprintln(w, humanEntry(node, sizes))
} else {
fmt.Fprintf(w, "%d\t%s\n", sizes[node.Name], node)
}
}
func humanEntry(node *tree, sizes map[upspin.PathName]int64) string {
scaleName := []string{"B", "K", "M", "G", "T"}
fsize := float64(sizes[node.Name])
scale := 0
for fsize > 1024 {
scale++
fsize /= 1024
}
return fmt.Sprintf("%.1f%s\t%s", fsize, scaleName[scale], node)
}
|
package main
import "strings"
//125. 验证回文串
//给定一个字符串,验证它是否是回文串,只考虑字母和数字字符,可以忽略字母的大小写。
//
//说明:本题中,我们将空字符串定义为有效的回文串。
//
//示例 1:
//
//输入: "A man, a plan, a canal: Panama"
//输出: true
//示例 2:
//
//输入: "race a car"
//输出: false
func isPalindrome(s string) bool {
s = strings.ToLower(s)
n := len(s)
l := 0
r := n - 1
for l < r {
for l < r && !isValid(s[l]) {
l++
}
for l < r && !isValid(s[r]) {
r--
}
if s[l] != s[r] {
return false
}
l++
r--
}
return true
}
func isValid(a byte) bool {
if a >= '0' && a <= '9' {
return true
}
if a >= 'a' && a <= 'z' {
return true
}
if a >= 'A' && a <= 'Z' {
return true
}
return false
}
func main() {
}
|
package article_service
import (
"hanxiaolin/gin-demo/logging"
"hanxiaolin/gin-demo/models"
"hanxiaolin/gin-demo/pkg/gredis"
"hanxiaolin/gin-demo/service/cache_service"
"encoding/json"
"fmt"
)
type Article struct {
ID int
TagID int
Title string
Desc string
Content string
CoverImageUrl string
State int
CreatedBy string
ModifiedBy string
PageNum int
PageSize int
}
func (a *Article) ExistByID() (bool, error) {
return models.ExistArticleByID(a.ID)
}
func (a *Article) Get() (*models.Article, error) {
var cacheArticle *models.Article
cache := cache_service.Article{ID: a.ID}
key := cache.GetArticleKey()
if gredis.Exists(key) {
data, err := gredis.Get(key)
if err != nil {
logging.Info(err)
} else {
json.Unmarshal(data, &cacheArticle)
return cacheArticle, nil
}
}
article, err := models.GetArticle(a.ID)
if err != nil {
fmt.Println(err)
return nil, err
}
err = gredis.Set(key, article, 3600)
if err != nil {
fmt.Println(err)
return nil, err
}
return article, nil
}
|
// Copyright 2017 The Aiicy Team.
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package models
import "github.com/Aiicy/AiicyDS/modules/mailer"
// mailerUser is a wrapper for satisfying mailer.User interface.
type mailerUser struct {
user *User
}
func (this mailerUser) ID() int64 {
return this.user.ID
}
func (this mailerUser) DisplayName() string {
return this.user.DisplayName()
}
func (this mailerUser) Email() string {
return this.user.Email
}
func (this mailerUser) GenerateActivateCode() string {
return this.user.GenerateActivateCode()
}
func (this mailerUser) GenerateEmailActivateCode(email string) string {
return this.user.GenerateEmailActivateCode(email)
}
func NewMailerUser(u *User) mailer.User {
return mailerUser{u}
}
|
package util
import (
"crypto/md5"
"fmt"
"io"
"github.com/lib/pq"
"github.com/pganalyze/collector/setup/query"
)
type PGHelperFn struct {
// function name
name string
// everything before prosrc in function definition sql, including
// the opening quote before the function source content starts
head string
// the function body, prosrc
body string
// everything after prosrc, including the closing quote at the
// beginning
tail string
}
func (pgfn *PGHelperFn) GetDefinition() string {
return pgfn.head + pgfn.body + pgfn.tail
}
func (pgfn *PGHelperFn) Matches(md5hash string) bool {
h := md5.New()
io.WriteString(h, pgfn.body)
expected := fmt.Sprintf("%x", h.Sum(nil))
return md5hash == expected
}
var ExplainHelper = PGHelperFn{
name: "explain",
head: "CREATE OR REPLACE FUNCTION pganalyze.explain(query text, params text[]) RETURNS text AS $$",
body: `DECLARE
prepared_query text;
prepared_params text;
result text;
BEGIN
SELECT regexp_replace(query, ';+\s*\Z', '') INTO prepared_query;
IF prepared_query LIKE '%;%' THEN
RAISE EXCEPTION 'cannot run EXPLAIN when query contains semicolon';
END IF;
IF array_length(params, 1) > 0 THEN
SELECT string_agg(quote_literal(param) || '::unknown', ',') FROM unnest(params) p(param) INTO prepared_params;
EXECUTE 'PREPARE pganalyze_explain AS ' || prepared_query;
BEGIN
EXECUTE 'EXPLAIN (VERBOSE, FORMAT JSON) EXECUTE pganalyze_explain(' || prepared_params || ')' INTO STRICT result;
EXCEPTION WHEN OTHERS THEN
DEALLOCATE pganalyze_explain;
RAISE;
END;
DEALLOCATE pganalyze_explain;
ELSE
EXECUTE 'EXPLAIN (VERBOSE, FORMAT JSON) ' || prepared_query INTO STRICT result;
END IF;
RETURN result;
END`,
tail: "$$ LANGUAGE plpgsql VOLATILE SECURITY DEFINER;",
}
var GetStatReplicationHelper = PGHelperFn{
name: "get_stat_replication",
head: "CREATE OR REPLACE FUNCTION pganalyze.get_stat_replication() RETURNS SETOF pg_stat_replication AS $$",
body: "/* pganalyze-collector */ SELECT * FROM pg_catalog.pg_stat_replication;",
tail: "$$ LANGUAGE sql VOLATILE SECURITY DEFINER;",
}
func ValidateHelperFunction(fn PGHelperFn, runner *query.Runner) (bool, error) {
row, err := runner.QueryRow(
fmt.Sprintf(
`SELECT md5(btrim(prosrc, E' \\n\\r\\t'))
FROM pg_proc INNER JOIN pg_user ON (pg_proc.proowner = pg_user.usesysid)
WHERE proname = %s
AND pronamespace::regnamespace::text = 'pganalyze'
AND prosecdef
AND pg_user.usesuper`,
pq.QuoteLiteral(fn.name),
),
)
if err == query.ErrNoRows {
return false, nil
} else if err != nil {
return false, err
}
actual := row.GetString(0)
return fn.Matches(actual), nil
}
|
package styles
// Present for backwards compatibility.
//
// Deprecated: use styles.Get(name) instead.
var (
Abap = Registry["abap"]
Algol = Registry["algol"]
AlgolNu = Registry["algol_nu"]
Arduino = Registry["arduino"]
Autumn = Registry["autumn"]
Average = Registry["average"]
Base16Snazzy = Registry["base16-snazzy"]
Borland = Registry["borland"]
BlackWhite = Registry["bw"]
CatppuccinFrappe = Registry["catppuccin-frappe"]
CatppuccinLatte = Registry["catppuccin-latte"]
CatppuccinMacchiato = Registry["catppuccin-macchiato"]
CatppuccinMocha = Registry["catppuccin-mocha"]
Colorful = Registry["colorful"]
DoomOne = Registry["doom-one"]
DoomOne2 = Registry["doom-one2"]
Dracula = Registry["dracula"]
Emacs = Registry["emacs"]
Friendly = Registry["friendly"]
Fruity = Registry["fruity"]
GitHubDark = Registry["github-dark"]
GitHub = Registry["github"]
GruvboxLight = Registry["gruvbox-light"]
Gruvbox = Registry["gruvbox"]
HrDark = Registry["hrdark"]
HrHighContrast = Registry["hr_high_contrast"]
Igor = Registry["igor"]
Lovelace = Registry["lovelace"]
Manni = Registry["manni"]
ModusOperandi = Registry["modus-operandi"]
ModusVivendi = Registry["modus-vivendi"]
Monokai = Registry["monokai"]
MonokaiLight = Registry["monokailight"]
Murphy = Registry["murphy"]
Native = Registry["native"]
Nord = Registry["nord"]
OnesEnterprise = Registry["onesenterprise"]
ParaisoDark = Registry["paraiso-dark"]
ParaisoLight = Registry["paraiso-light"]
Pastie = Registry["pastie"]
Perldoc = Registry["perldoc"]
Pygments = Registry["pygments"]
RainbowDash = Registry["rainbow_dash"]
RosePineDawn = Registry["rose-pine-dawn"]
RosePineMoon = Registry["rose-pine-moon"]
RosePine = Registry["rose-pine"]
Rrt = Registry["rrt"]
SolarizedDark = Registry["solarized-dark"]
SolarizedDark256 = Registry["solarized-dark256"]
SolarizedLight = Registry["solarized-light"]
SwapOff = Registry["swapoff"]
Tango = Registry["tango"]
Trac = Registry["trac"]
Vim = Registry["vim"]
VisualStudio = Registry["vs"]
Vulcan = Registry["vulcan"]
WitchHazel = Registry["witchhazel"]
XcodeDark = Registry["xcode-dark"]
Xcode = Registry["xcode"]
)
|
package storage
import (
"fmt"
"github.com/lxc/lxd/lxd/db"
"github.com/lxc/lxd/lxd/operations"
"github.com/lxc/lxd/lxd/state"
"github.com/lxc/lxd/lxd/storage/drivers"
"github.com/lxc/lxd/shared/api"
log "github.com/lxc/lxd/shared/log15"
"github.com/lxc/lxd/shared/logger"
"github.com/lxc/lxd/shared/logging"
)
// MockBackend controls whether to run the storage logic in mock mode.
var MockBackend = false
// volIDFuncMake returns a function that can be supplied to the underlying storage drivers allowing
// them to lookup the volume ID for a specific volume type and volume name. This function is tied
// to the Pool ID that it is generated for, meaning the storage drivers do not need to know the ID
// of the pool they belong to, or do they need access to the database.
func volIDFuncMake(state *state.State, poolID int64) func(volType drivers.VolumeType, volName string) (int64, error) {
// Return a function to retrieve a volume ID for a volume Name for use in driver.
return func(volType drivers.VolumeType, volName string) (int64, error) {
volTypeID, err := VolumeTypeToDBType(volType)
if err != nil {
return -1, err
}
// TODO add project support in the future by splitting the volName by "_".
volID, _, err := state.Cluster.StoragePoolNodeVolumeGetTypeByProject("default", volName, volTypeID, poolID)
if err != nil {
if err == db.ErrNoSuchObject {
return -1, fmt.Errorf("Volume doesn't exist")
}
return -1, err
}
return volID, nil
}
}
// CreatePool creates a new storage pool on disk and returns a Pool interface.
func CreatePool(state *state.State, poolID int64, dbPool *api.StoragePool, op *operations.Operation) (Pool, error) {
// Sanity checks.
if dbPool == nil {
return nil, ErrNilValue
}
// Ensure a config map exists.
if dbPool.Config == nil {
dbPool.Config = map[string]string{}
}
// Handle mock requests.
if MockBackend {
pool := mockBackend{}
pool.name = dbPool.Name
pool.state = state
pool.logger = logging.AddContext(logger.Log, log.Ctx{"driver": "mock", "pool": pool.name})
return &pool, nil
}
logger := logging.AddContext(logger.Log, log.Ctx{"driver": dbPool.Driver, "pool": dbPool.Name})
// Load the storage driver.
driver, err := drivers.Load(state, dbPool.Driver, dbPool.Name, dbPool.Config, logger, volIDFuncMake(state, poolID), validateVolumeCommonRules)
if err != nil {
return nil, err
}
// Setup the pool struct.
pool := lxdBackend{}
pool.driver = driver
pool.id = poolID
pool.name = dbPool.Name
pool.state = state
pool.logger = logger
// Create the pool itself on the storage device..
err = pool.create(dbPool, op)
if err != nil {
return nil, err
}
return &pool, nil
}
// GetPoolByName retrieves the pool from the database by its name and returns a Pool interface.
func GetPoolByName(state *state.State, name string) (Pool, error) {
// Handle mock requests.
if MockBackend {
pool := mockBackend{}
pool.name = name
pool.state = state
pool.logger = logging.AddContext(logger.Log, log.Ctx{"driver": "mock", "pool": pool.name})
return &pool, nil
}
// Load the database record.
poolID, dbPool, err := state.Cluster.StoragePoolGet(name)
if err != nil {
return nil, err
}
// Ensure a config map exists.
if dbPool.Config == nil {
dbPool.Config = map[string]string{}
}
logger := logging.AddContext(logger.Log, log.Ctx{"driver": dbPool.Driver, "pool": dbPool.Name})
// Load the storage driver.
driver, err := drivers.Load(state, dbPool.Driver, dbPool.Name, dbPool.Config, logger, volIDFuncMake(state, poolID), validateVolumeCommonRules)
if err != nil {
return nil, err
}
// Setup the pool struct.
pool := lxdBackend{}
pool.driver = driver
pool.id = poolID
pool.name = dbPool.Name
pool.state = state
pool.logger = logger
return &pool, nil
}
|
// Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package schemaexpr
import (
"context"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
)
// ValidateUniqueWithoutIndexPredicate verifies that an expression is a valid
// unique without index predicate. If the expression is valid, it returns the
// serialized expression with the columns dequalified.
//
// A predicate expression is valid if all of the following are true:
//
// - It results in a boolean.
// - It refers only to columns in the table.
// - It does not include subqueries.
// - It does not include non-immutable, aggregate, window, or set returning
// functions.
//
func ValidateUniqueWithoutIndexPredicate(
ctx context.Context,
tn tree.TableName,
desc catalog.TableDescriptor,
pred tree.Expr,
semaCtx *tree.SemaContext,
) (string, error) {
expr, _, err := DequalifyAndValidateExpr(
ctx,
desc,
pred,
types.Bool,
"unique without index predicate",
semaCtx,
tree.VolatilityImmutable,
&tn,
)
if err != nil {
return "", err
}
return expr, nil
}
|
package middleware
import (
"github.com/forease/i18n/v2/i18n"
"github.com/gin-gonic/gin"
)
var (
DefaultLang = "zh_CN"
lang = DefaultLang
)
func I18N(args ...string) gin.HandlerFunc {
if len(args) > 0 {
lang = args[0]
}
return func(c *gin.Context) {
}
}
func LoadLocales(dir string) error {
return i18n.LoadLocales(dir)
}
func Tr(format string, args ...interface{}) string {
return i18n.Tr(lang, format, args...)
}
func getLang() string {
return ""
}
|
package middleware
import (
"movie-app/auth"
"movie-app/helper"
"movie-app/user"
"strings"
"github.com/dgrijalva/jwt-go"
"github.com/gin-gonic/gin"
)
func AdminMiddleware(authService auth.Service, userService user.Service) gin.HandlerFunc {
return func(c *gin.Context) {
authHeader := c.GetHeader("Authorization")
if !strings.Contains(authHeader, "Bearer") {
helper.AuthorizationHandling(c)
return
}
tokenString := ""
arrayToken := strings.Split(authHeader, " ")
if len(arrayToken) == 2 {
tokenString = arrayToken[1]
}
token, err := authService.ValidateToken(tokenString)
if err != nil {
helper.AuthorizationHandling(c)
return
}
claim, ok := token.Claims.(jwt.MapClaims)
if !ok || !token.Valid {
helper.AuthorizationHandling(c)
return
}
userId := int(claim["user_id"].(float64))
account, err := userService.GetUserById(user.GetUserUriInput{ID: userId})
if err != nil {
helper.AuthorizationHandling(c)
return
}
if claim["role"] != "Admin" {
helper.AuthorizationHandling(c)
return
}
c.Set("currentUser", account)
}
}
|
package config
import (
"github.com/joho/godotenv"
"log"
"os"
)
var (
// 数据库参数
DatabaseURI string
DatabaseName string
JwtKey string
)
func init() {
// 加载配置文件
err := godotenv.Load("./config/.env")
if err != nil {
log.Fatal("Error loading config.env file", err)
}
// 加载配置文件
DatabaseURI = os.Getenv("DATABASE_URI")
DatabaseName = os.Getenv("DATABASE_NAME")
JwtKey = os.Getenv("JWT_KEY")
}
|
package generator
import (
"bufio"
"fmt"
"html/template"
"io/ioutil"
"math"
"os"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/RomanosTrechlis/blog-generator/config"
"gopkg.in/yaml.v2"
)
// siteGenerator object
type siteGenerator struct {
Sources []string
SiteInfo *config.SiteInformation
}
// New creates a new SiteGenerator
func NewSiteGenerator(sources []string, siteInfo *config.SiteInformation) *siteGenerator {
return &siteGenerator{sources, siteInfo}
}
var templatePath string
// Generate starts the static blog generation
func (g *siteGenerator) Generate() (err error) {
templatePath = g.SiteInfo.ThemeFolder + "template.html"
fmt.Println("Generating Site...")
err = clearAndCreateDestination(g.SiteInfo.DestFolder)
if err != nil {
return err
}
err = clearAndCreateDestination(fmt.Sprintf("%s/archive", g.SiteInfo.DestFolder))
if err != nil {
return err
}
t, err := getTemplate(templatePath)
if err != nil {
return err
}
posts := make([]*post, 0)
for _, path := range g.Sources {
post, err := g.newPost(path)
if err != nil {
return err
}
posts = append(posts, post)
}
sort.Sort(byDateDesc(posts))
generators := g.createTasks(posts, t)
err = g.runTasks(generators)
if err != nil {
return err
}
fmt.Println("Finished generating Site...")
return nil
}
func (g *siteGenerator) newPost(path string) (p *post, err error) {
meta, err := g.getPostMeta(path)
if err != nil {
return nil, err
}
html, err := getHTML(path)
if err != nil {
return nil, err
}
imagesDir, images, err := getImages(path)
if err != nil {
return nil, err
}
name := path[strings.LastIndex(path, "/"):]
p = &post{name: name, meta: meta, html: html, imagesDir: imagesDir, images: images}
return p, nil
}
func (g *siteGenerator) getPostMeta(path string) (*Meta, error) {
filePath := fmt.Sprintf("%s/meta.yml", path)
metaraw, err := ioutil.ReadFile(filePath)
if err != nil {
return nil, fmt.Errorf("error while reading file %s: %v", filePath, err)
}
meta := Meta{}
err = yaml.Unmarshal(metaraw, &meta)
if err != nil {
return nil, fmt.Errorf("error reading yml in %s: %v", filePath, err)
}
parsedDate, err := time.Parse(g.SiteInfo.DateFormat, meta.Date)
if err != nil {
return nil, fmt.Errorf("error parsing date in %s: %v", filePath, err)
}
meta.ParsedDate = parsedDate
return &meta, nil
}
func (g *siteGenerator) createTasks(posts []*post, t *template.Template) []Generator {
generators := make([]Generator, 0)
destination := g.SiteInfo.DestFolder
//posts
for _, post := range posts {
pg := postGenerator{post, g.SiteInfo, t, destination}
generators = append(generators, &pg)
}
tagPostsMap := createTagPostsMap(posts)
// frontpage
paging := g.SiteInfo.NumPostsFrontPage
numOfPages := getNumberOfPages(posts, paging)
for i := 0; i < numOfPages; i++ {
to := destination
if i != 0 {
to = fmt.Sprintf("%s/%d", destination, i+1)
}
toP := (i + 1) * paging
if (i + 1) == numOfPages {
toP = len(posts)
}
lg := &listingGenerator{posts[i*paging : toP], t, g.SiteInfo, to, "", i + 1, numOfPages}
generators = append(generators, lg)
}
// archive
ag := listingGenerator{posts, t, g.SiteInfo, fmt.Sprintf("%s/archive", destination), "Archive", 0, 0}
// tags
tg := tagsGenerator{
tagPostsMap: tagPostsMap,
template: t,
siteInfo: g.SiteInfo,
}
// categories
catPostsMap := createCatPostsMap(posts)
ct := categoriesGenerator{
catPostsMap: catPostsMap,
template: t,
destination: destination,
siteInfo: g.SiteInfo,
}
// sitemap
sg := sitemapGenerator{
posts: posts,
tagPostsMap: tagPostsMap,
categoryPostsMap: catPostsMap,
destination: destination,
blogURL: g.SiteInfo.BlogURL,
}
// rss
rg := rssGenerator{
posts: posts,
destination: destination,
siteInfo: g.SiteInfo,
}
// statics
fileToDestination := make(map[string]string)
templateToFile := make(map[string]string)
for _, row := range g.SiteInfo.StaticPages {
if row.IsTemplate {
templateToFile[g.SiteInfo.ThemeFolder+row.File] = fmt.Sprintf("%s/%s", destination, row.To)
continue
}
fileToDestination[g.SiteInfo.ThemeFolder+row.File] = fmt.Sprintf("%s/%s", destination, row.To)
}
statg := staticsGenerator{
fileToDestination: fileToDestination,
templateToFile: templateToFile,
template: t,
siteInfo: g.SiteInfo,
}
generators = append(generators, &ag, &tg, &ct, &sg, &rg, &statg)
return generators
}
func (g *siteGenerator) runTasks(generators []Generator) (err error) {
var wg sync.WaitGroup
finished := make(chan bool, 1)
errors := make(chan error, 1)
pool := make(chan struct{}, 50)
for _, generator := range generators {
wg.Add(1)
go func(g Generator) {
defer wg.Done()
pool <- struct{}{}
defer func() { <-pool }()
err := g.Generate()
if err != nil {
errors <- err
}
}(generator)
}
go func() {
wg.Wait()
close(finished)
}()
select {
case <-finished:
return nil
case err := <-errors:
if err != nil {
return err
}
}
return nil
}
type htmlConfig struct {
path string
pageTitle string
pageNum int
maxPageNum int
isPost bool
temp *template.Template
content template.HTML
siteInfo *config.SiteInformation
}
func (h htmlConfig) writeHTML() error {
filePath := fmt.Sprintf("%s/index.html", h.path)
f, err := os.Create(filePath)
if err != nil {
return fmt.Errorf("error creating file %s: %v", filePath, err)
}
defer f.Close()
w := bufio.NewWriter(f)
next := h.pageNum + 1
prev := h.pageNum - 1
if h.pageNum == h.maxPageNum {
next = 0
}
td := IndexData{
Name: h.siteInfo.Author,
Year: time.Now().Year(),
HTMLTitle: getHTMLTitle(h.pageTitle, h.siteInfo.BlogTitle),
PageTitle: h.pageTitle,
Content: h.content,
CanonicalLink: buildCanonicalLink(h.path, h.siteInfo.BlogURL),
PageNum: h.pageNum,
NextPageNum: next,
PrevPageNum: prev,
URL: buildCanonicalLink(h.path, h.siteInfo.BlogURL),
IsPost: h.isPost,
}
err = h.temp.Execute(w, td)
if err != nil {
return fmt.Errorf("error executing template %s: %v", templatePath, err)
}
err = w.Flush()
if err != nil {
return fmt.Errorf("error writing file %s: %v", filePath, err)
}
return nil
}
func getHTMLTitle(pageTitle, blogTitle string) (title string) {
if pageTitle == "" {
return blogTitle
}
return fmt.Sprintf("%s - %s", pageTitle, blogTitle)
}
func createTagPostsMap(posts []*post) (result map[string][]*post) {
result = make(map[string][]*post)
for _, p := range posts {
for _, tag := range p.meta.Tags {
key := strings.ToLower(tag)
if result[key] == nil {
result[key] = []*post{p}
} else {
result[key] = append(result[key], p)
}
}
}
return result
}
func createCatPostsMap(posts []*post) (result map[string][]*post) {
result = make(map[string][]*post)
for _, p := range posts {
for _, cat := range p.meta.Categories {
key := strings.ToLower(cat)
if result[key] == nil {
result[key] = []*post{p}
} else {
result[key] = append(result[key], p)
}
}
}
return result
}
func getNumberOfPages(posts []*post, postsPerPage int) (n int) {
res := float64(len(posts)) / float64(postsPerPage)
n, _ = strconv.Atoi(fmt.Sprintf("%.0f", math.Ceil(res)))
return n
}
|
package main
import (
"chatroom/server/model"
"fmt"
"net"
"time"
)
//处理客户端的通讯
func process(conn net.Conn) {
//这里需要延时关闭conn
defer conn.Close()
//这里调用总控,创建一个
processor := &Processor{
Conn: conn,
}
err := processor.process2()
if err != nil {
fmt.Println("客户端和服务器通讯协程错误,err=", err)
return
}
}
//这里我们编写一个函数,完成对UserDao的初始化任务
func initUserDao(){
//这里的pool本身就是一个全局变量,这里需要注意一下初始化顺序的问题
//initPool后initUserdao
model.MyUserDao = model.NewUserDao(pool)
}
func main() {
//当服务启动时,我们就去初始化我们的redis链接池
initPool("localhost:6379",16,0,300*time.Second)
initUserDao()
//提示信息
fmt.Println("服务器在8889端口监听.....")
listen, err := net.Listen("tcp", "0.0.0.0:8889")
defer listen.Close()
if err != nil {
fmt.Println("net.Listen err=", err)
return
}
//一旦监听成功,就等待库护短来连接服务器
for {
fmt.Println("等待客户端来连接服务器")
conn, err := listen.Accept() //返回套接字
if err != nil {
fmt.Println("Listen.Accept err=", err)
return
}
//一旦连接成功,则启动一个协程和客户端保持通讯
go process(conn)
}
}
|
// Copyright (c) Mainflux
// SPDX-License-Identifier: Apache-2.0
package cassandra
import "github.com/gocql/gocql"
const (
table = `CREATE TABLE IF NOT EXISTS messages (
id uuid,
channel text,
subtopic text,
publisher text,
protocol text,
name text,
unit text,
value double,
string_value text,
bool_value boolean,
data_value blob,
sum double,
time double,
update_time double,
PRIMARY KEY (channel, time, id)
) WITH CLUSTERING ORDER BY (time DESC)`
jsonTable = `CREATE TABLE IF NOT EXISTS %s (
id uuid,
channel text,
subtopic text,
publisher text,
protocol text,
created bigint,
payload text,
PRIMARY KEY (channel, created, id)
) WITH CLUSTERING ORDER BY (created DESC)`
)
// DBConfig contains Cassandra DB specific parameters.
type DBConfig struct {
Hosts []string
Keyspace string
User string
Pass string
Port int
}
// Connect establishes connection to the Cassandra cluster.
func Connect(cfg DBConfig) (*gocql.Session, error) {
cluster := gocql.NewCluster(cfg.Hosts...)
cluster.Keyspace = cfg.Keyspace
cluster.Consistency = gocql.Quorum
cluster.Authenticator = gocql.PasswordAuthenticator{
Username: cfg.User,
Password: cfg.Pass,
}
cluster.Port = cfg.Port
session, err := cluster.CreateSession()
if err != nil {
return nil, err
}
if err := session.Query(table).Exec(); err != nil {
return nil, err
}
return session, nil
}
|
package main
import (
"errors"
"fmt"
"os"
"strings"
"github.com/pepabo/undocker"
"github.com/urfave/cli"
)
var version = "unknown"
func main() {
u := undocker.Undocker{
Out: os.Stdout,
Err: os.Stderr,
}
opts := undocker.Options{}
app := cli.NewApp()
app.Name = "undocker"
app.Usage = "Decompose docker images."
app.Version = version
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "registry-url, r",
Usage: "docker registry url",
EnvVar: "REGISTRY_URL",
Destination: &opts.RegistryURL,
},
cli.StringFlag{
Name: "registry-user, u",
Usage: "docker registry login username",
EnvVar: "REGISTRY_USER",
Destination: &opts.RegistryUser,
},
cli.StringFlag{
Name: "registry-pass, p",
Usage: "docker registry login password",
EnvVar: "REGISTRY_PASS",
Destination: &opts.RegistryPass,
},
cli.StringFlag{
Name: "tmpdir",
Value: "/tmp/undocker",
Usage: "temporal directory to extract image",
EnvVar: "UNDOCKER_TMP_PATH",
Destination: &opts.TmpPath,
},
}
extractCommand := cli.Command{
Name: "extract",
Aliases: []string{"e"},
Usage: "Extract to rootfs.",
ArgsUsage: "[image] [destination]",
Flags: []cli.Flag{
cli.BoolFlag{
Name: "overwrite-symlink-refs, s",
Usage: "Overwrite symbolic link references",
Destination: &opts.Extract.OverwriteSymlinkRefs,
},
},
Action: func(c *cli.Context) error {
repo, tag, err := parseReference(c.Args().Get(0))
if err != nil {
return cli.ShowCommandHelp(c, "extract")
}
dest := c.Args().Get(1)
if dest == "" {
dest = "."
}
return u.Extract(repo, tag, dest, opts)
},
}
showCommand := cli.Command{
Name: "show",
Aliases: []string{"s"},
Usage: "Show image informations",
Subcommands: []cli.Command{
{
Name: "config",
Usage: "Show image configuration",
ArgsUsage: "[image]",
Action: func(c *cli.Context) error {
repo, tag, err := parseReference(c.Args().Get(0))
if err != nil {
return cli.ShowCommandHelp(c, "config")
}
return u.Config(repo, tag, opts)
},
},
// {
// Name: "manifest",
// Usage: "Show image manifest",
// ArgsUsage: "[image]",
// Action: func(c *cli.Context) error {
// repo, tag, err := parseReference(c.Args().Get(0))
// if err != nil {
// return cli.ShowCommandHelp(c, "config")
// }
// return u.Config(repo, tag, opts)
// },
// },
},
}
app.Commands = append(app.Commands, extractCommand)
app.Commands = append(app.Commands, showCommand)
err := app.Run(os.Args)
if err != nil {
fmt.Println(err)
os.Exit(3)
}
}
func parseReference(arg string) (repository, tag string, err error) {
ref := strings.SplitN(arg, ":", 2)
if ref[0] == "" {
return "", "", errors.New("Invalid image")
}
if len(ref) < 2 {
return ref[0], "latest", nil
}
return ref[0], ref[1], nil
}
|
package model
import (
mgo "github.com/globalsign/mgo"
"github.com/simplejia/namesrv/mongo"
)
type Stat struct {
Name string
}
func (stat *Stat) Regular() (ok bool) {
if stat == nil {
return
}
ok = true
return
}
// Db 返回db name
func (stat *Stat) Db() (db string) {
return "stat"
}
// Table 返回table name
func (stat *Stat) Table() (table string) {
return "num_day"
}
// GetC 返回db col
func (stat *Stat) GetC() (c *mgo.Collection) {
db, table := stat.Db(), stat.Table()
session := mongo.DBS[db]
sessionCopy := session.Copy()
c = sessionCopy.DB(db).C(table)
return
}
func NewStat() *Stat {
stat := &Stat{}
return stat
}
|
package dbctl
import (
"github.com/SuperTikuwa/mission-techdojo/model"
)
func InsertNewUser(newUser model.User) error {
db := gormConnect()
defer db.Close()
if result := db.Create(&newUser); result.Error != nil {
writeLog(failure, result.Error)
return result.Error
}
return nil
}
func SelectUserByToken(token string) model.User {
db := gormConnect()
defer db.Close()
var user model.User
if err := db.Where("token = ?", token).First(&user).Error; err != nil {
writeLog(failure, err)
return model.User{}
}
return user
}
func UpdateUser(user model.User) error {
db := gormConnect()
defer db.Close()
if err := db.Model(&model.User{}).Where("token = ?", user.Token).Update(user).Error; err != nil {
writeLog(failure, err)
return err
}
return nil
}
func UserExists(user model.User) bool {
db := gormConnect()
defer db.Close()
checkResult := SelectUserByToken(user.Token)
if checkResult.Name != "" || checkResult.Token != "" {
return true
}
return false
}
|
package helper
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"reflect"
"strings"
)
func StripPrefix(path string, handler http.Handler) http.Handler {
return http.StripPrefix(path,
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "" {
r.URL.Path = "/"
}
handler.ServeHTTP(w, r)
}))
}
// Cors permite el acceso desde otro servidor
func Cors(w http.ResponseWriter, r *http.Request) {
origin := r.Header.Get("Origin")
if origin == "" {
origin = "*"
}
w.Header().Set("Access-Control-Allow-Credentials", "true")
w.Header().Set("Access-Control-Allow-Origin", origin)
w.Header().Set("Access-Control-Allow-Methods", "GET,PUT,POST,DELETE,OPTIONS")
w.Header().Set("Access-Control-Allow-Headers", "X-Requested-With,Content-Type,Cache-Control")
}
func getContentType(r *http.Request) string {
for key := range r.Header {
if strings.ToLower(key) == "content-type" {
return r.Header.Get(key)
}
}
return ""
}
//GetPostParams Get the parameters sent by the post method in an http request
func GetPostParams(r *http.Request) url.Values {
contentType := getContentType(r)
fmt.Println("Content-Type", contentType)
switch {
case contentType == "application/json":
params := map[string]interface{}{}
result := url.Values{}
decoder := json.NewDecoder(r.Body)
err := decoder.Decode(¶ms)
if err != nil {
fmt.Println(err)
}
for k, v := range params {
if reflect.ValueOf(v).Kind().String() == "string" {
result.Set(k, v.(string))
}
}
return result
case contentType == "application/x-www-form-urlencoded":
r.ParseForm()
return r.Form
case strings.Contains(contentType, "multipart/form-data"):
r.ParseMultipartForm(int64(10 * 1000))
return r.Form
}
return url.Values{}
}
|
package asptr
// Int returns a pointer to the input value
func Int(v int) *int {
return &v
}
// Int8 returns a pointer to the input value
func Int8(v int8) *int8 {
return &v
}
// Int16 returns a pointer to the input value
func Int16(v int16) *int16 {
return &v
}
// Int32 returns a pointer to the input value
func Int32(v int32) *int32 {
return &v
}
// Int64 returns a pointer to the input value
func Int64(v int64) *int64 {
return &v
}
// Uint returns a pointer to the input value
func Uint(v int) *int {
return &v
}
// Uint8 returns a pointer to the input value
func Uint8(v uint8) *uint8 {
return &v
}
// Uint16 returns a poUinter to the input value
func Uint16(v uint16) *uint16 {
return &v
}
// Uint32 returns a pointer to the input value
func Uint32(v uint32) *uint32 {
return &v
}
// Uint64 returns a pointer to the input value
func Uint64(v uint64) *uint64 {
return &v
}
// String returns a pointer to the input value
func String(v string) *string {
return &v
}
// Bool returns a pointer to the input value
func Bool(v bool) *bool {
return &v
}
// Float32 returns a pointer to the input value
func Float32(v float32) *float32 {
return &v
}
// Float64 returns a pointer to the input value
func Float64(v float64) *float64 {
return &v
}
// Complex64 returns a pointer to the input value
func Complex64(v complex64) *complex64 {
return &v
}
// Complex128 returns a pointer to the input value
func Complex128(v complex128) *complex128 {
return &v
}
|
package network_test
import (
"fmt"
"testing"
"github.com/wheatevo/wslroutesvc/network"
)
type missingNewIfaceRunner struct{}
func (e *missingNewIfaceRunner) Run(name string, arg ...string) ([]byte, error) {
return []byte(""), fmt.Errorf("Could not run command")
}
type foundNewIfaceRunner struct {
cmdCount int
}
const netshIfaceOutput = `
Interface vEthernet (WSL) Parameters
----------------------------------------------
IfLuid : ethernet_32775
IfIndex : 39
State : connected
Metric : 15
Link MTU : 1500 bytes
Reachable Time : 30500 ms
Base Reachable Time : 30000 ms
Retransmission Interval : 1000 ms
DAD Transmits : 3
Site Prefix Length : 64
Site Id : 1
Forwarding : disabled
Advertising : disabled
Neighbor Discovery : enabled
Neighbor Unreachability Detection : enabled
Router Discovery : dhcp
Managed Address Configuration : enabled
Other Stateful Configuration : enabled
Weak Host Sends : disabled
Weak Host Receives : disabled
Use Automatic Metric : enabled
Ignore Default Routes : disabled
Advertised Router Lifetime : 1800 seconds
Advertise Default Route : disabled
Current Hop Limit : 0
Force ARPND Wake up patterns : disabled
Directed MAC Wake up patterns : disabled
ECN capability : application
RA Based DNS Config (RFC 6106) : disabled
DHCP/Static IP coexistence : disabled
`
const netshConfigOutput = `
Configuration for interface "vEthernet (WSL)"
DHCP enabled: No
IP Address: 172.29.192.1
Subnet Prefix: 172.29.192.0/20 (mask 255.255.240.0)
InterfaceMetric: 15
Statically Configured DNS Servers: None
Register with which suffix: None
Statically Configured WINS Servers: None
`
func (e *foundNewIfaceRunner) Run(name string, arg ...string) ([]byte, error) {
response := ""
if e.cmdCount == 0 {
response = netshIfaceOutput
} else if e.cmdCount == 1 {
response = netshConfigOutput
}
e.cmdCount++
return []byte(response), nil
}
func TestNewIface(t *testing.T) {
// when iface cannot be found
iface := network.NewIface("some missing interface", &missingNewIfaceRunner{})
if iface.ID != "" || iface.IP.String() != "<nil>" || iface.Network.String() != "<nil>" {
t.Errorf("Expected interface to have empty values, received %v", iface)
}
// when iface is found
iface = network.NewIface("vEthernet (WSL)", &foundNewIfaceRunner{0})
if iface.ID != "39" || iface.IP.String() != "172.29.192.1" || iface.Network.String() != "172.29.192.0/20" {
t.Errorf("Expected interface to have populated values, received %v", iface)
}
}
|
package services
import (
"fmt"
"github.com/apulis/AIArtsBackend/configs"
"github.com/apulis/AIArtsBackend/models"
urllib "net/url"
)
func GetAllTraining(userName string, page, size int, jobStatus, searchWord, orderBy, order string) ([]*models.Training, int, int, error) {
//把传输过来的searchword空格改为%20urlencode
url := fmt.Sprintf(`%s/ListJobsV3?userName=%s&jobOwner=%s&vcName=%s&jobType=%s&pageNum=%d&pageSize=%d&jobStatus=%s&searchWord=%s&orderBy=%s&order=%s`,
configs.Config.DltsUrl, userName, userName, models.DefaultVcName,
models.JobTypeArtsTraining,
page, size, jobStatus, urllib.PathEscape(searchWord),
orderBy, order)
jobList := &models.JobList{}
err := DoRequest(url, "GET", nil, nil, jobList)
if err != nil {
fmt.Printf("get all training err[%+v]", err)
return nil, 0, 0, err
}
trainings := make([]*models.Training, 0)
for _, v := range jobList.AllJobs {
trainings = append(trainings, &models.Training{
Id: v.JobId,
Name: v.JobName,
Engine: v.JobParams.Image,
DeviceType: v.JobParams.GpuType,
CodePath: v.JobParams.CodePath,
DeviceNum: v.JobParams.Resourcegpu,
StartupFile: v.JobParams.StartupFile,
OutputPath: v.JobParams.OutputPath,
DatasetPath: v.JobParams.DatasetPath,
Params: nil,
Status: v.JobStatus,
CreateTime: v.JobTime,
Desc: v.JobParams.Desc,
})
}
totalJobs := jobList.Meta.TotalJobs
totalPages := totalJobs / size
if (totalJobs % size) != 0 {
totalPages += 1
}
return trainings, totalJobs, totalPages, nil
}
func CreateTraining(userName string, training models.Training) (string, error) {
url := fmt.Sprintf("%s/PostJob", configs.Config.DltsUrl)
params := make(map[string]interface{})
params["userName"] = userName
params["jobName"] = training.Name
params["jobType"] = models.JobTypeArtsTraining
params["image"] = ConvertImage(training.Engine)
params["gpuType"] = training.DeviceType
params["resourcegpu"] = training.DeviceNum
params["DeviceNum"] = training.DeviceNum
params["cmd"] = "" // use StartupFile, params instead
if configs.Config.InteractiveModeJob {
params["cmd"] = "sleep infinity" // use StartupFile, params instead
} else {
fileType, err := CheckStartFileType(training.StartupFile)
if fileType == FILETYPE_PYTHON {
params["cmd"] = "python " + training.StartupFile
} else if fileType == FILETYPE_SHELL {
params["cmd"] = "bash " + training.StartupFile
}
if err != nil {
fmt.Printf("startupfile is invalid[%+v]\n", err)
return "", err
}
for k, v := range training.Params {
if k == "sudo" {
//添加sudo权限
params["cmd"] = "sudo " + v + " " + params["cmd"].(string)
} else if len(k) > 0 && len(v) > 0 {
params["cmd"] = params["cmd"].(string) + " --" + k + " " + v + " "
}
}
if len(training.DatasetPath) > 0 {
params["cmd"] = params["cmd"].(string) + " --data_path " + training.DatasetPath
}
//params中加入visualpath
if len(training.VisualPath) > 0 {
training.Params["visualPath"] = training.VisualPath
}
if len(training.OutputPath) > 0 {
params["cmd"] = params["cmd"].(string) + " --output_path " + training.OutputPath
}
}
params["startupFile"] = training.StartupFile
params["datasetPath"] = training.DatasetPath
params["codePath"] = training.CodePath
params["outputPath"] = training.OutputPath
params["scriptParams"] = training.Params
params["desc"] = training.Desc
params["containerUserId"] = 0
params["jobtrainingtype"] = training.JobTrainingType // "RegularJob"
params["preemptionAllowed"] = false
params["workPath"] = "./"
params["enableworkpath"] = true
params["enabledatapath"] = true
params["enablejobpath"] = true
params["jobPath"] = "./"
params["hostNetwork"] = false
params["isPrivileged"] = false
params["interactivePorts"] = false
params["numpsworker"] = training.NumPs
params["numps"] = training.NumPsWorker
params["vcName"] = models.DefaultVcName
params["team"] = models.DefaultVcName
id := &models.JobId{}
err := DoRequest(url, "POST", nil, params, id)
if err != nil {
fmt.Printf("create training err[%+v]\n", err)
return "", err
}
return id.Id, nil
}
func DeleteTraining(userName, id string) error {
url := fmt.Sprintf("%s/KillJob?userName=%s&jobId=%s", configs.Config.DltsUrl, userName, id)
params := make(map[string]interface{})
job := &models.Job{}
err := DoRequest(url, "GET", nil, params, job)
if err != nil {
fmt.Printf("delete training err[%+v]\n", err)
return err
}
return nil
}
func GetTraining(userName, id string) (*models.Training, error) {
url := fmt.Sprintf("%s/GetJobDetailV2?userName=%s&jobId=%s", configs.Config.DltsUrl, userName, id)
params := make(map[string]interface{})
job := &models.Job{}
training := &models.Training{}
err := DoRequest(url, "GET", nil, params, job)
if err != nil {
fmt.Printf("create training err[%+v]\n", err)
return nil, err
}
training.Id = job.JobId
training.Name = job.JobName
training.Engine = UnConvertImage(job.JobParams.Image)
training.DeviceNum = job.JobParams.Resourcegpu
training.DeviceType = job.JobParams.GpuType
training.Status = job.JobStatus
training.CreateTime = job.JobTime
training.JobTrainingType = job.JobParams.Jobtrainingtype
training.Params = nil
training.CodePath = job.JobParams.CodePath
training.StartupFile = job.JobParams.StartupFile
training.OutputPath = job.JobParams.OutputPath
training.DatasetPath = job.JobParams.DatasetPath
training.Status = job.JobStatus
training.Desc = job.JobParams.Desc
training.Params = job.JobParams.ScriptParams
return training, nil
}
func GetTrainingLog(userName, id string, pageNum int) (*models.JobLog, error) {
url := fmt.Sprintf("%s/GetJobLog?userName=%s&jobId=%s&page=%d", configs.Config.DltsUrl, userName, id, pageNum)
jobLog := &models.JobLog{}
jobLogFromDlts := &struct {
Cursor string `json:"cursor,omitempty"`
Log string `json:"log,omitempty"`
MaxPage int `json:"max_page"`
}{}
err := DoRequest(url, "GET", nil, nil, jobLogFromDlts)
if err != nil {
fmt.Printf("create training err[%+v]\n", err)
return nil, err
}
jobLog.Cursor = jobLogFromDlts.Cursor
jobLog.Log = jobLogFromDlts.Log
jobLog.MaxPage = jobLogFromDlts.MaxPage
return jobLog, nil
}
|
package main
import (
"encoding/json"
"errors"
"flag"
"io/ioutil"
"log"
"net/http"
"os"
"sort"
"strings"
"sync"
"time"
)
var apiKey = os.Getenv("PREDICT_API_KEY")
var myLog = log.New(os.Stderr, "app: ", log.LstdFlags | log.Lshortfile)
var tagMap TagMap
type PredictResp struct {
Status struct{
Code int `json:"code"`
Description string `json:"description"`
} `json:"status"`
Outputs []struct {
Input struct {
Data struct {
Image struct {
Url string `json:"url"`
} `json:"image"`
} `json:"data"`
} `json:"input"`
Data struct {
Concepts []struct {
Id string `json:"id"`
Name string `json:"name"`
Value float64 `json:"value"`
} `json:"concepts"`
} `json:"data"`
} `json:"outputs"`
}
type Pair struct {
Key string
Value float64
}
type PairList []Pair
type TagMap map[string]PairList
func (p PairList) Len() int { return len(p) }
func (p PairList) Less(i, j int) bool { return p[i].Value < p[j].Value }
func (p PairList) Swap(i, j int){ p[i], p[j] = p[j], p[i] }
func readMapFromJson(fileName string) TagMap { // read the built tagMap from the json file
jsonFile, err := os.Open(fileName)
if err != nil {
log.Println(errors.New("build the tagMap first with the command: -build=true [path to the image_file]"))
log.Fatal(err)
}
defer jsonFile.Close()
byteValue, _ := ioutil.ReadAll(jsonFile)
var tagMapFromJson TagMap
json.Unmarshal(byteValue, &tagMapFromJson)
return tagMapFromJson
}
func buildMap(imageFilePath string) { // build the tagMap with imageFile, save as json file
imagesData, err := ioutil.ReadFile(imageFilePath)
if err != nil {
panic(err)
}
images := strings.Split(string(imagesData), "\n")
m := make(map[string]map[string]float64)
predictChan := make(chan *PredictResp, 100)
rate := time.Second / 7
throttle := time.Tick(rate)
var wg sync.WaitGroup
for _,s := range images {
wg.Add(1)
<-throttle
go predict(apiKey, s, predictChan, &wg)
}
go func() {
wg.Wait()
close(predictChan)
}()
for prediction := range predictChan {
pred := prediction.Outputs[0].Data.Concepts
url := prediction.Outputs[0].Input.Data.Image.Url
for _,t := range pred {
if _, ok := m[t.Name]; ok {
m[t.Name][url] = t.Value
} else {
m[t.Name] = make(map[string]float64)
m[t.Name][url] = t.Value
}
}
}
newMap := make(map[string][]Pair)
for tag, url := range m {
pl := make(PairList, len(m[tag]))
i := 0
for k, v := range url {
pl[i] = Pair{k, v}
i++
}
sort.Sort(sort.Reverse(pl))
newMap[strings.ToLower(tag)] = pl
}
b, err := json.Marshal(newMap)
if err != nil {
log.Print(err)
}
jsonFile, err := os.Create("./tagMap.json")
if err != nil {
panic(err)
}
defer jsonFile.Close()
jsonFile.Write(b)
}
func searchKeyword(keyword string, tMap TagMap) []string {
var temp []string
if val, ok := tMap[keyword]; ok {
bound := 10
if len(val) < bound {
bound = len(val)
}
for i := 0; i < bound; i++ {
temp = append(temp, val[i].Key)
}
}
return temp
}
func predict(api_key string, photo_url string, c chan *PredictResp, wg *sync.WaitGroup) {
defer wg.Done()
client := &http.Client{}
api_url := "https://api.clarifai.com/v2/models/aaa03c23b3724a16a56b629203edc62c/outputs" // Use the general model
data_body := `{"inputs": [{"data": {"image": {"url":"` + photo_url + `"}}}]}`
req, err := http.NewRequest("POST", api_url, strings.NewReader(data_body))
if err != nil {
myLog.Fatal(err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Key " + api_key)
resp, err := client.Do(req)
if err != nil {
myLog.Fatal(err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
myLog.Fatal(errors.New(resp.Status))
}
var rb *PredictResp
if err := json.NewDecoder(resp.Body).Decode(&rb); err != nil {
myLog.Fatal(err)
}
c <- rb
}
func searchHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
myLog.Println(errors.New("method is not supported in /search endpoint: " + r.Method))
}
tagName := r.FormValue("tagName")
urlList := searchKeyword(tagName, tagMap)
json.NewEncoder(w).Encode(&urlList)
}
func main() {
buildPtr := flag.Bool("build", false, "build the image tag-map with command -build [path to the image_url .txt file")
flag.Parse()
imageFilePathInput := flag.Args()
if *buildPtr {
log.Println("Start building the tag map")
imageFilePath := "imagest.txt"
if len(imageFilePathInput) > 0 {
imageFilePath = imageFilePathInput[0]
}
buildMap(imageFilePath)
log.Println("Finished building the tag map")
}
tagMap = readMapFromJson("tagMap.json")
log.Println("Listening to http://localhost:5000")
http.Handle("/", http.FileServer(http.Dir("./public")))
http.HandleFunc("/search", searchHandler)
err := http.ListenAndServe(":5000", nil)
if err != nil {
myLog.Fatal(err)
}
}
|
package awstest_test
import (
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/kms"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/sqs"
"github.com/stretchr/testify/assert"
"github.com/socialpoint-labs/bsk/awsx/awstest"
"github.com/socialpoint-labs/bsk/uuid"
)
var resourceTypes = []string{
s3.ServiceName,
sqs.ServiceName,
kms.ServiceName,
dynamodb.ServiceName,
awstest.SQSFifoServiceName,
// kinesis.ServiceName,
}
func TestCreateResource(t *testing.T) {
for _, res := range resourceTypes {
awstest.AssertResourceExists(t, awstest.CreateResource(res), res)
}
}
func TestKMSAliasCreatedForResource(t *testing.T) {
a := assert.New(t)
keyID := awstest.CreateResource(kms.ServiceName)
svc := kms.New(awstest.NewSession())
res, err := svc.ListAliases(&kms.ListAliasesInput{Limit: aws.Int64(100)})
a.NoError(err)
exists := false
for _, a := range res.Aliases {
if a.TargetKeyId != nil && *a.TargetKeyId == *keyID {
exists = true
break
}
}
a.True(exists)
}
func TestAssertResourceExists(t *testing.T) {
mt := new(testing.T)
for _, res := range resourceTypes {
exists := awstest.AssertResourceExists(mt, aws.String(uuid.New()), res)
assert.False(t, exists, res)
}
}
|
package repository
import (
"fmt"
"time"
"github.com/meso-org/meso/config"
"github.com/beevik/guid"
)
//TODO: make this live in global somewhere
type JSONTime time.Time
func (t JSONTime) MarshalJSON() ([]byte, error) {
//do your serializing here
stamp := fmt.Sprintf("\"%s\"", time.Time(t).Format(config.Dateformat))
return []byte(stamp), nil
}
type PositionID string
type Position struct {
PositionID PositionID
FacilityID FacilityID
// TODO: change to an enum Ie. Respiratory Therapist,
Title string
Description string
StartDateTime JSONTime
EndDateTime JSONTime
}
type PositionRepository interface {
Store(position *Position) error
Find(x interface{}) (*Position, error)
FindAll() ([]*Position, error)
FindByFacilityID(facilityID FacilityID) ([]*Position, error)
}
// GeneratePositionID - return a new PositionID string
func GeneratePositionID() PositionID {
return PositionID(guid.NewString())
}
|
package p_01001_01100
// 1022. Sum of Root To Leaf Binary Numbers, https://leetcode.com/problems/sum-of-root-to-leaf-binary-numbers/
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func sumRootToLeaf(root *TreeNode) int {
sum := 0
dfs(root, &sum, 0)
return sum
}
func dfs(root *TreeNode, sum *int, cur int) {
if root == nil {
return
}
cur = cur << 1
cur += root.Val
if root.Left == nil && root.Right == nil {
*sum += cur
return
}
dfs(root.Left, sum, cur)
dfs(root.Right, sum, cur)
}
|
package cmd
//Mock game config
type game struct {
Name string
Target string
}
|
package object
import (
"ganymede/vector"
"math"
)
// Type defines an objects collision type
type collisionType = int
const (
collisionCircle = iota
collisionBoundingBox
)
type collider interface {
GetCollisionType() collisionType
GetPosition() vector.Vector
}
type boundingBoxCollider interface {
GetDimensions() vector.Vector
collider
}
type circleCollider interface {
GetRadius() float64
collider
}
// DetectCollision returns true if the two objects have collided
func DetectCollision(o1 collider, o2 collider) (bool, vector.Vector) {
o1Type := o1.GetCollisionType()
switch o1Type {
case collisionCircle:
c1 := o1.(circleCollider)
return circleAnd(c1, o2)
case collisionBoundingBox:
b1 := o1.(boundingBoxCollider)
return boundingBoxAnd(b1, o2)
default:
panic("Unknown collision type")
}
}
func circleAnd(c1 circleCollider, o2 collider) (bool, vector.Vector) {
o2Type := o2.GetCollisionType()
switch o2Type {
case collisionCircle:
c2 := o2.(circleCollider)
return circleAndCircle(c1, c2)
case collisionBoundingBox:
b2 := o2.(boundingBoxCollider)
return circleAndBB(c1, b2)
default:
panic("Unknown collision type")
}
}
func boundingBoxAnd(b1 boundingBoxCollider, o2 collider) (bool, vector.Vector) {
o2Type := o2.GetCollisionType()
switch o2Type {
case collisionCircle:
c2 := o2.(circleCollider)
return circleAndBB(c2, b1)
case collisionBoundingBox:
b2 := o2.(boundingBoxCollider)
return bBAndBB(b1, b2)
default:
panic("Unknown collision type")
}
}
func bBAndBB(b1 boundingBoxCollider, b2 boundingBoxCollider) (bool, vector.Vector) {
topLeft1 := b1.GetPosition()
topLeft2 := b2.GetPosition()
bottomRight1 := boundingBoxBottomRight(b1)
bottomRight2 := boundingBoxBottomRight(b2)
// if top left of one is lower than bottom right of other, or vice versa, no overlap
// if top left of one is to right of bottom right of other, or vice versa, no overlap
oneAboveTwo := topLeft1.Subtract(bottomRight2)
twoAboveOne := topLeft2.Subtract(bottomRight1)
for _, val := range append(oneAboveTwo.GetVals(), twoAboveOne.GetVals()...) {
if val > 0 {
return false, vector.Vector{}
}
}
return true, vector.Vector{}
}
func boundingBoxBottomRight(b boundingBoxCollider) vector.Vector {
return b.GetPosition().Add(b.GetDimensions())
}
func circleAndCircle(c1 circleCollider, c2 circleCollider) (bool, vector.Vector) {
maxDistance := c1.GetRadius() + c2.GetRadius()
collided := !distanceBetweenPointsIsGreaterThan(c1.GetPosition(), c2.GetPosition(), maxDistance)
return collided, c1.GetPosition().Subtract(c2.GetPosition())
}
func distanceBetweenPointsIsGreaterThan(p1, p2 vector.Vector, distance float64) bool {
dSq := math.Pow(distance, 2)
diff := p1.Subtract(p2)
distanceSq := diff.DotProduct(diff)
return distanceSq > dSq
}
func circleAndBB(c circleCollider, b boundingBoxCollider) (bool, vector.Vector) {
// is circle centre inside box?
pointNearestToCentre := nearestBoundingBoxEdge(c.GetPosition(), b)
diffVector := c.GetPosition().Subtract(pointNearestToCentre)
if isPointInsideBox(c.GetPosition(), b) {
return true, diffVector.Scale(-1)
}
collided := !distanceBetweenPointsIsGreaterThan(c.GetPosition(), pointNearestToCentre, c.GetRadius())
return collided, diffVector
}
func isPointInsideBox(point vector.Vector, b boundingBoxCollider) bool {
bbTopLeft := b.GetPosition()
bbBottomRight := boundingBoxBottomRight(b)
// is it above or left of top left?
// is it below or right of bottom right?
// if yes to any, not inside the box
vToTopLeft := point.Subtract(bbTopLeft)
vToBottomRight := bbBottomRight.Subtract(point)
for _, val := range append(vToTopLeft.GetVals(), vToBottomRight.GetVals()...) {
if val < 0 {
return false
}
}
return true
}
func nearestBoundingBoxEdge(point vector.Vector, b boundingBoxCollider) vector.Vector {
topLeft := b.GetPosition()
bottomRight := boundingBoxBottomRight(b)
p := point.GetVals()
vToBottomRight := bottomRight.Subtract(point).GetVals() // if negative, point was bigger
vToTopLeft := point.Subtract(topLeft).GetVals() // if negative, point was smaller
nearestPointCoords := []float64{}
// if outside of box edge, fix coord to that edge
for i := range vToBottomRight {
var newCoord float64
if vToBottomRight[i] < 0 {
newCoord = bottomRight.GetVals()[i]
} else if vToTopLeft[i] < 0 {
newCoord = topLeft.GetVals()[i]
} else {
newCoord = p[i]
}
nearestPointCoords = append(nearestPointCoords, newCoord)
}
return vector.NewVector(nearestPointCoords...)
}
|
package dao
import (
"mall/app/api/web/groups/conf"
"mall/app/api/web/groups/model"
"mall/lib/database/nosql/mongo"
"mall/lib/database/orm"
"mall/lib/database/redis"
"github.com/jinzhu/gorm"
)
type Dao struct {
orm *gorm.DB
mgo *mongo.Mongo
redigo *redis.Redis
}
func New(c *conf.Config) *Dao {
db := orm.New(c.Orm)
if !db.HasTable(&model.EweiShopGroupsVisitor{}) {
db.CreateTable(&model.EweiShopGroupsVisitor{})
}
return &Dao{
orm: db,
mgo: mongo.New(c.Mongo),
redigo: redis.New(c.Redis),
}
}
|
package logger
import (
"fmt"
"runtime"
"strings"
"sync"
"time"
"github.com/fatih/color"
"github.com/mattn/go-colorable"
)
// LogLevel = uint
type LogLevel uint
const (
// ErrorLevel = 1
ErrorLevel LogLevel = 1
// WarnLevel = 2
WarnLevel LogLevel = 2
// InfoLevel = 3
InfoLevel LogLevel = 3
// DebugLevel = 4
DebugLevel LogLevel = 4
)
var (
errorLabel string
warnLabel string
infoLabel string
debugLabel string
)
var out *color.Color
var m sync.Mutex
func init() {
if !color.NoColor {
errorLabel = "[" + color.New(color.FgRed).Sprint("ERROR") + "]"
warnLabel = "[" + color.New(color.FgYellow).Sprint("WARN") + "]"
infoLabel = "[" + color.New(color.FgCyan).Sprint("INFO") + "]"
debugLabel = "[" + color.New(color.FgMagenta).Sprint("DEBUG") + "]"
} else {
errorLabel = "[ERROR]"
warnLabel = "[WARN]"
infoLabel = "[INFO]"
debugLabel = "[DEBUG]"
}
out = color.New()
}
var logLevel = InfoLevel
// Errorf logs formatted message of arguments.
func Errorf(format string, msgs ...interface{}) {
if logLevel < ErrorLevel {
return
}
m.Lock()
defer m.Unlock()
msgs = append([]interface{}{getDebugPrefix()}, msgs...)
out.Fprintf(colorable.NewColorableStderr(), errorLabel+"%s "+format+"\n", msgs...)
}
// Error logs message of arguments.
func Error(msgs ...interface{}) {
if logLevel < ErrorLevel {
return
}
m.Lock()
defer m.Unlock()
cmsg := getDebugPrefix()
msgs = append([]interface{}{errorLabel + cmsg}, msgs...)
out.Fprintln(colorable.NewColorableStderr(), msgs...)
}
// Warnf logs formatted message of arguments.
func Warnf(format string, msgs ...interface{}) {
if logLevel < WarnLevel {
return
}
m.Lock()
defer m.Unlock()
msgs = append([]interface{}{getDebugPrefix()}, msgs...)
out.Printf(warnLabel+"%s "+format+"\n", msgs...)
}
// Warn logs message of arguments.
func Warn(msgs ...interface{}) {
if logLevel < WarnLevel {
return
}
m.Lock()
defer m.Unlock()
cmsg := getDebugPrefix()
msgs = append([]interface{}{warnLabel + cmsg}, msgs...)
out.Println(msgs...)
}
// Infof logs formatted message of arguments.
func Infof(format string, msgs ...interface{}) {
if logLevel < InfoLevel {
return
}
m.Lock()
defer m.Unlock()
msgs = append([]interface{}{getDebugPrefix()}, msgs...)
out.Printf(infoLabel+"%s "+format+"\n", msgs...)
}
// Info logs message of arguments.
func Info(msgs ...interface{}) {
if logLevel < InfoLevel {
return
}
m.Lock()
defer m.Unlock()
cmsg := getDebugPrefix()
msgs = append([]interface{}{infoLabel + cmsg}, msgs...)
out.Println(msgs...)
}
// Debugf logs formatted message of arguments.
func Debugf(format string, msgs ...interface{}) {
if logLevel < DebugLevel {
return
}
m.Lock()
defer m.Unlock()
msgs = append([]interface{}{getDebugPrefix()}, msgs...)
out.Printf(debugLabel+"%s "+format+"\n", msgs...)
}
// Debug logs message of arguments.
func Debug(msgs ...interface{}) {
if logLevel < DebugLevel {
return
}
m.Lock()
defer m.Unlock()
cmsg := getDebugPrefix()
msgs = append([]interface{}{debugLabel + cmsg}, msgs...)
out.Println(msgs...)
}
func getDebugPrefix() string {
const voltDirName = "github.com/vim-volt/volt/"
if logLevel < DebugLevel {
return ""
}
_, fn, line, _ := runtime.Caller(2)
idx := strings.Index(fn, voltDirName)
if idx >= 0 {
fn = fn[idx+len(voltDirName):]
}
return fmt.Sprintf("[%s][%s:%d]", time.Now().UTC().Format("15:04:05.000"), fn, line)
}
// SetLevel sets current log level to level.
func SetLevel(level LogLevel) {
logLevel = level
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package memo
import (
"fmt"
"testing"
"github.com/cockroachdb/cockroach/pkg/sql/opt"
"github.com/cockroachdb/cockroach/pkg/sql/opt/props"
)
func TestJoinCardinality(t *testing.T) {
c := func(min, max uint32) props.Cardinality {
return props.Cardinality{Min: min, Max: max}
}
type testCase struct {
left props.Cardinality
right props.Cardinality
expected props.Cardinality
}
testCaseGroups := []struct {
joinType opt.Operator
filter string // "true", "false", or "other"
testCases []testCase
}{
{ // Inner join, true filter.
joinType: opt.InnerJoinOp,
filter: "true",
testCases: []testCase{
{left: c(0, 10), right: c(0, 10), expected: c(0, 100)},
{left: c(5, 10), right: c(0, 10), expected: c(0, 100)},
{left: c(0, 10), right: c(5, 10), expected: c(0, 100)},
{left: c(5, 10), right: c(5, 10), expected: c(25, 100)},
},
},
{ // Inner join, false filter.
joinType: opt.InnerJoinOp,
filter: "false",
testCases: []testCase{
{left: c(0, 10), right: c(0, 10), expected: c(0, 0)},
{left: c(5, 10), right: c(0, 10), expected: c(0, 0)},
{left: c(0, 10), right: c(5, 10), expected: c(0, 0)},
{left: c(5, 10), right: c(5, 10), expected: c(0, 0)},
},
},
{ // Inner join, other filter.
joinType: opt.InnerJoinOp,
filter: "other",
testCases: []testCase{
{left: c(0, 10), right: c(0, 10), expected: c(0, 100)},
{left: c(5, 10), right: c(0, 10), expected: c(0, 100)},
{left: c(0, 10), right: c(5, 10), expected: c(0, 100)},
{left: c(5, 10), right: c(5, 10), expected: c(0, 100)},
},
},
{ // Left join, true filter.
joinType: opt.LeftJoinOp,
filter: "true",
testCases: []testCase{
{left: c(0, 10), right: c(0, 10), expected: c(0, 100)},
{left: c(5, 10), right: c(0, 10), expected: c(5, 100)},
{left: c(0, 10), right: c(5, 10), expected: c(0, 100)},
{left: c(5, 10), right: c(5, 10), expected: c(25, 100)},
},
},
{ // Left join, false filter.
joinType: opt.LeftJoinOp,
filter: "false",
testCases: []testCase{
{left: c(0, 10), right: c(0, 10), expected: c(0, 10)},
{left: c(5, 10), right: c(0, 10), expected: c(5, 10)},
{left: c(0, 10), right: c(5, 10), expected: c(0, 10)},
{left: c(5, 10), right: c(5, 10), expected: c(5, 10)},
},
},
{ // Left join, other filter.
joinType: opt.LeftJoinOp,
filter: "other",
testCases: []testCase{
{left: c(0, 10), right: c(0, 10), expected: c(0, 100)},
{left: c(5, 10), right: c(0, 10), expected: c(5, 100)},
{left: c(0, 10), right: c(5, 10), expected: c(0, 100)},
{left: c(5, 10), right: c(5, 10), expected: c(5, 100)},
},
},
{ // Right join, true filter.
joinType: opt.RightJoinOp,
filter: "true",
testCases: []testCase{
{left: c(0, 10), right: c(0, 10), expected: c(0, 100)},
{left: c(5, 10), right: c(0, 10), expected: c(0, 100)},
{left: c(0, 10), right: c(5, 10), expected: c(5, 100)},
{left: c(5, 10), right: c(5, 10), expected: c(25, 100)},
},
},
{ // Right join, false filter.
joinType: opt.RightJoinOp,
filter: "false",
testCases: []testCase{
{left: c(0, 10), right: c(0, 10), expected: c(0, 10)},
{left: c(5, 10), right: c(0, 10), expected: c(0, 10)},
{left: c(0, 10), right: c(5, 10), expected: c(5, 10)},
{left: c(5, 10), right: c(5, 10), expected: c(5, 10)},
},
},
{ // Right join, other filter.
joinType: opt.RightJoinOp,
filter: "other",
testCases: []testCase{
{left: c(0, 10), right: c(0, 10), expected: c(0, 100)},
{left: c(5, 10), right: c(0, 10), expected: c(0, 100)},
{left: c(0, 10), right: c(5, 10), expected: c(5, 100)},
{left: c(5, 10), right: c(5, 10), expected: c(5, 100)},
},
},
{ // Full join, true filter.
joinType: opt.FullJoinOp,
filter: "true",
testCases: []testCase{
{left: c(0, 1), right: c(0, 1), expected: c(0, 2)},
{left: c(1, 1), right: c(1, 1), expected: c(1, 2)},
{left: c(0, 10), right: c(0, 10), expected: c(0, 100)},
{left: c(5, 10), right: c(0, 10), expected: c(5, 100)},
{left: c(0, 10), right: c(5, 10), expected: c(5, 100)},
{left: c(5, 10), right: c(5, 10), expected: c(25, 100)},
{left: c(7, 10), right: c(8, 10), expected: c(56, 100)},
{left: c(8, 10), right: c(7, 10), expected: c(56, 100)},
},
},
{ // Full join, false filter.
joinType: opt.FullJoinOp,
filter: "false",
testCases: []testCase{
{left: c(0, 1), right: c(0, 1), expected: c(0, 2)},
{left: c(1, 1), right: c(1, 1), expected: c(2, 2)},
{left: c(2, 5), right: c(3, 8), expected: c(5, 13)},
{left: c(0, 10), right: c(0, 10), expected: c(0, 20)},
{left: c(5, 10), right: c(0, 10), expected: c(5, 20)},
{left: c(0, 10), right: c(5, 10), expected: c(5, 20)},
{left: c(5, 10), right: c(5, 10), expected: c(10, 20)},
{left: c(7, 10), right: c(8, 10), expected: c(15, 20)},
{left: c(8, 10), right: c(7, 10), expected: c(15, 20)},
},
},
{ // Full join, other filter.
joinType: opt.FullJoinOp,
filter: "other",
testCases: []testCase{
{left: c(0, 1), right: c(0, 1), expected: c(0, 2)},
{left: c(1, 1), right: c(1, 1), expected: c(1, 2)},
{left: c(2, 5), right: c(3, 8), expected: c(3, 40)},
{left: c(0, 10), right: c(0, 10), expected: c(0, 100)},
{left: c(5, 10), right: c(0, 10), expected: c(5, 100)},
{left: c(0, 10), right: c(5, 10), expected: c(5, 100)},
{left: c(5, 10), right: c(5, 10), expected: c(5, 100)},
{left: c(7, 10), right: c(8, 10), expected: c(8, 100)},
{left: c(8, 10), right: c(7, 10), expected: c(8, 100)},
},
},
}
for _, group := range testCaseGroups {
t.Run(fmt.Sprintf("%s/%s", group.joinType, group.filter), func(t *testing.T) {
for i, tc := range group.testCases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
h := &joinPropsHelper{}
h.joinType = group.joinType
h.leftProps = &props.Relational{Cardinality: tc.left}
h.rightProps = &props.Relational{Cardinality: tc.right}
h.filterIsTrue = (group.filter == "true")
h.filterIsFalse = (group.filter == "false")
res := h.cardinality()
if res != tc.expected {
t.Errorf(
"left=%s right=%s: expected %s, got %s\n", tc.left, tc.right, tc.expected, res,
)
}
})
}
})
}
}
|
package instance
import (
"fmt"
"os"
)
type Backend string
const (
Node Backend = "node"
Npm Backend = "npm"
Python Backend = "python"
Web Backend = "web"
Flask Backend = "flask"
)
func IsBackendValid(bkend Backend) bool {
switch bkend {
case Npm, Node, Python, Web, Flask:
return true
}
_, _ = fmt.Fprintf(os.Stderr, "invalid enum '%s'\n", bkend)
return false
}
|
package main
import (
"encoding/json"
"fmt"
"net/http"
"os"
)
// Class data mobil (melakukan pemodelan data moobil dengan member(info detail) ID, Merk, dan Tahun)
type mobil struct {
ID int `json:"id"`
Merk string `json:"merk"`
Tahun int `json:"tahun"`
}
// Variable database in memory "sementara"
var (
database = make(map[int]mobil)
)
// JSON Respon
func setResp(res http.ResponseWriter, msg []byte, httpCode int) {
res.Header().Set("Content-type", "application/json")
res.WriteHeader(httpCode)
res.Write(msg)
}
func main() {
// init database
database[1] = mobil{ID: 1, Merk: "Honda Jazz", Tahun: 2020}
database[2] = mobil{ID: 2, Merk: "Toyota Avanza", Tahun: 2019}
database[3] = mobil{ID: 3, Merk: "Suzuki Baleno", Tahun: 2007}
// main route (route utama)
http.HandleFunc("/", func(res http.ResponseWriter, req *http.Request) {
msg := []byte(`"{message}" : "Server is up!"`)
setResp(res, msg, http.StatusOK)
})
// route get data mobil (pemanggilan seluruh data mobil)
http.HandleFunc("/get-mobils", func(res http.ResponseWriter, req *http.Request) {
// Validasi method
if req.Method != "GET" {
msg := []byte(`"{message}" : "invalid http method"`)
setResp(res, msg, http.StatusOK)
return
}
// temp list class data mobil
var mobils []mobil
// loop data mobil into mobils
for _, mobil := range database {
mobils = append(mobils, mobil)
}
// encode to JSON if not error
JSONmobils, err := json.Marshal(&mobils)
if err != nil {
msg := []byte(`"{message}" : "internal server error"`)
setResp(res, msg, http.StatusInternalServerError)
return
}
setResp(res, JSONmobils, http.StatusOK)
})
// route add mobil (menambahkan data mobil ke database)
http.HandleFunc("/add-mobil", func(res http.ResponseWriter, req *http.Request) {
// Validasi Method
if req.Method != "POST" {
msg := []byte(`"{message}" : "invalid http method"`)
setResp(res, msg, http.StatusOK)
return
}
var mobilx mobil // variable class mobil
payload := req.Body // mengambil/req payload yang dikirim dari sisi client
defer req.Body.Close() // menutup req
err := json.NewDecoder(payload).Decode(&mobilx) // melakukan deserialize / decode dari payload ke item mobilx
// Validasi jika data error / tidak terisi misalnya
if err != nil {
msg := []byte(`"{message}" : "error when parsing data"`)
setResp(res, msg, http.StatusInternalServerError)
return
}
// menambahkan data dari moobilx ke database dengan ID (sesuai format map)
database[mobilx.ID] = mobilx
msg := []byte(`"{message}" : "success create data mobil"`)
setResp(res, msg, http.StatusOK)
})
// Config port
err := http.ListenAndServe(":65", nil)
if err != nil {
fmt.Print(err)
os.Exit(1)
}
}
|
package alipay
import (
"time"
"errors"
"net/http"
"net/url"
"io/ioutil"
"github.com/acupple/alipay/enums"
"github.com/acupple/alipay/models/refund"
)
type Refunds struct {
Component
}
func NewRefunds(alipay Alipay) Refunds {
return Pays{
Component.Alipay: alipay,
}
}
func (r *Refunds)Refund(detail refund.RefundDetail) (bool, error) {
refundPrams, err := r.buildRefundParams(detail)
if err != nil {
return false, err
}
url,_ := url.Parse(GATEWAY + "_input_charset=" + r.Alipay.InputCharset)
q := url.Query()
for key, value := range refundPrams {
q.Set(key, value)
}
url.RawQuery = q.Encode()
resp, err := http.Get(url.String());
if err != nil {
return false, err
}
if resp.StatusCode != http.StatusOK {
return false, errors.New("Bad refund request")
}
bytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return false, err
}
if string(bytes) == "T" {
return true, nil
}
return false, nil
}
func (r *Refunds)buildRefundParams(detail refund.RefundDetail) (map[string]string, error){
if detail.BatchNo == "" {
return nil, errors.New("Refund Batch No. is empty")
}
if detail.DetailDatas == nil || len(detail.DetailDatas) <= 0 {
return nil, errors.New("Refund detail datas are empty")
}
refundPrams := make(map[string]string)
for key, value := range r.Alipay.RefundConfig {
refundPrams[key] = value
}
refundPrams[enums.SERVICE] = enums.REFUND_NO_PWD
if r.Alipay.Email != "" {
refundPrams[enums.SELLER_EMAIL] = r.Alipay.Email
}
if detail.NotifyUrl != "" {
refundPrams[enums.NOTIFY_URL] = detail.NotifyUrl
}
refundPrams[enums.SELLER_USER_ID] = r.Alipay.MerchantId
refundPrams[enums.REFUND_DATE] = time.Now().Format("yyyy-MM-dd HH:mm:ss")
refundPrams[enums.BATCH_NO] = detail.BatchNo
refundPrams[enums.BATCH_NUM] = len(detail.DetailDatas)
refundPrams[enums.DETAIL_DATA] = detail.FormatDetailDatas()
r.BuildMD5SignParams(refundPrams)
return refundPrams, nil
}
|
package main
import (
"github.com/yuwe1/pgim/api/logic"
"github.com/yuwe1/pgim/pkg/client/dbpool"
"github.com/yuwe1/pgim/pkg/logger"
"github.com/yuwe1/pgim/pkg/util"
common "github.com/yuwe1/pgim/pkg"
)
func main() {
common.Init()
session, err, p, c := dbpool.GetSession()
defer func() {
if session != nil {
session.Relase(p, c)
}
if err != nil {
logger.Sugar.Error(err)
}
}()
db := session.DB
// 初始化自增id配置
util.InitUID(db)
// 初始化rpc client
// 初始化rpc服务
logic.StartRpcServer()
logger.Logger.Info("logic server start")
select {}
}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package memevent implements the memory usage events controller, which
// periodically emits events via the eventchannel.
package memevent
import (
"time"
"gvisor.dev/gvisor/pkg/eventchannel"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/metric"
"gvisor.dev/gvisor/pkg/sentry/kernel"
pb "gvisor.dev/gvisor/pkg/sentry/kernel/memevent/memory_events_go_proto"
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/sync"
)
var totalTicks = metric.MustCreateNewUint64Metric("/memory_events/ticks", false /*sync*/, "Total number of memory event periods that have elapsed since startup.")
var totalEvents = metric.MustCreateNewUint64Metric("/memory_events/events", false /*sync*/, "Total number of memory events emitted.")
// MemoryEvents describes the configuration for the global memory event emitter.
type MemoryEvents struct {
k *kernel.Kernel
// The period is how often to emit an event. The memory events goroutine
// will ensure a minimum of one event is emitted per this period, regardless
// how of much memory usage has changed.
period time.Duration
// Writing to this channel indicates the memory goroutine should stop.
stop chan struct{}
// done is used to signal when the memory event goroutine has exited.
done sync.WaitGroup
}
// New creates a new MemoryEvents.
func New(k *kernel.Kernel, period time.Duration) *MemoryEvents {
return &MemoryEvents{
k: k,
period: period,
stop: make(chan struct{}),
}
}
// Stop stops the memory usage events emitter goroutine. Stop must not be called
// concurrently with Start and may only be called once.
func (m *MemoryEvents) Stop() {
close(m.stop)
m.done.Wait()
}
// Start starts the memory usage events emitter goroutine. Start must not be
// called concurrently with Stop and may only be called once.
func (m *MemoryEvents) Start() {
if m.period == 0 {
return
}
m.done.Add(1)
go m.run() // S/R-SAFE: doesn't interact with saved state.
}
func (m *MemoryEvents) run() {
defer m.done.Done()
// Emit the first event immediately on startup.
totalTicks.Increment()
m.emit()
ticker := time.NewTicker(m.period)
defer ticker.Stop()
for {
select {
case <-m.stop:
return
case <-ticker.C:
totalTicks.Increment()
m.emit()
}
}
}
func (m *MemoryEvents) emit() {
totalPlatform, err := m.k.MemoryFile().TotalUsage()
if err != nil {
log.Warningf("Failed to fetch memory usage for memory events: %v", err)
return
}
snapshot, _ := usage.MemoryAccounting.Copy()
total := totalPlatform + snapshot.Mapped
totalEvents.Increment()
eventchannel.Emit(&pb.MemoryUsageEvent{
Mapped: snapshot.Mapped,
Total: total,
})
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.