text stringlengths 11 4.05M |
|---|
package problem0190
import "testing"
func TestSolve(t *testing.T) {
t.Log(reverseBits(0))
t.Log(reverseBits(0b00000010100101000001111010011100))
}
|
// Adapter contains the functions and objects to convert vulcan library specific interfaces that are more generic
// into vulcan daemon specific interfaces and data structures.
package adapter
import (
"github.com/mailgun/vulcan"
"github.com/mailgun/vulcan/limit/connlimit"
"github.com/mailgun/vulcan/limit/tokenbucket"
"github.com/mailgun/vulcan/loadbalance/roundrobin"
"github.com/mailgun/vulcan/location/httploc"
"github.com/mailgun/vulcan/metrics"
"github.com/mailgun/vulcan/route/hostroute"
"github.com/mailgun/vulcan/route/pathroute"
. "github.com/mailgun/vulcand/backend"
"time"
)
func NewRateLimiter(rl *RateLimit) (*tokenbucket.TokenLimiter, error) {
mapper, err := VariableToMapper(rl.Variable)
if err != nil {
return nil, err
}
rate := tokenbucket.Rate{Units: int64(rl.Requests), Period: time.Second * time.Duration(rl.PeriodSeconds)}
return tokenbucket.NewTokenLimiterWithOptions(mapper, rate, tokenbucket.Options{Burst: rl.Burst})
}
func NewConnLimiter(cl *ConnLimit) (*connlimit.ConnectionLimiter, error) {
mapper, err := VariableToMapper(cl.Variable)
if err != nil {
return nil, err
}
return connlimit.NewConnectionLimiter(mapper, cl.Connections)
}
// Adapter helps to convert vulcan library-specific interfaces to vulcand interfaces and data structures
type Adapter struct {
proxy *vulcan.Proxy
}
func NewAdapter(proxy *vulcan.Proxy) *Adapter {
return &Adapter{
proxy: proxy,
}
}
func (a *Adapter) GetHostRouter() *hostroute.HostRouter {
return a.proxy.GetRouter().(*hostroute.HostRouter)
}
func (a *Adapter) GetPathRouter(hostname string) *pathroute.PathRouter {
r := a.GetHostRouter().GetRouter(hostname)
if r == nil {
return nil
}
return r.(*pathroute.PathRouter)
}
func (a *Adapter) GetHttpLocation(hostname string, locationId string) *httploc.HttpLocation {
router := a.GetPathRouter(hostname)
if router == nil {
return nil
}
ilo := router.GetLocationById(locationId)
if ilo == nil {
return nil
}
return ilo.(*httploc.HttpLocation)
}
func (a *Adapter) GetHttpLocationLb(hostname string, locationId string) *roundrobin.RoundRobin {
loc := a.GetHttpLocation(hostname, locationId)
if loc == nil {
return nil
}
return loc.GetLoadBalancer().(*roundrobin.RoundRobin)
}
func (a *Adapter) GetStats(hostname, locationId, endpointId string) *EndpointStats {
rr := a.GetHttpLocationLb(hostname, locationId)
if rr == nil {
return nil
}
endpoint := rr.FindEndpointById(endpointId)
if endpoint == nil {
return nil
}
meterI := endpoint.GetMeter()
if meterI == nil {
return nil
}
meter := meterI.(*metrics.RollingMeter)
return &EndpointStats{
Successes: meter.SuccessCount(),
Failures: meter.FailureCount(),
PeriodSeconds: int(meter.WindowSize() / time.Second),
FailRate: meter.GetRate(),
}
}
|
package libModel
import (
"fmt"
)
const (
errSchemaFormat = "Schema Error:[%s]\n"
errUnknownTypeFormat = "unknown datatype: columnName:%s, columnType:[%s]"
)
func errSchema(errMsg string) error {
return fmt.Errorf(errSchemaFormat, errMsg)
}
func errUnknownType(columnName, columnType string) error {
return errSchema(fmt.Sprintf(errUnknownTypeFormat, columnName, columnType))
}
|
package controller
import (
"walletApi/src/common"
"walletApi/src/model"
"github.com/astaxie/beego"
"github.com/dchest/captcha"
)
type MainController struct {
beego.Controller
}
func (c *MainController) Get() {
//需要先获取一下session,以免c.CruSession未空
if c.CruSession == nil {
c.GetSession(common.USER_INFO)
}
u, ok := c.Ctx.Input.Session(common.USER_INFO).(*model.User)
if !ok {
d := struct {
CaptchaId string
}{
captcha.NewLen(4),
}
c.Data["CaptchaId"] = d.CaptchaId
c.TplName = "login.html"
} else {
c.Data["userName"] = u.UserName
c.Data["nickName"] = u.NickName
if u.UserName == "admin" {
c.Data["IsAdmin"] = true
}
model.UpdateLastLoginTime(u.UserName)
c.TplName = "index.html"
}
}
|
package nanokontrol2
import (
"github.com/telyn/midi/korg/korgsysex/format4"
"github.com/telyn/midi/sysex"
)
func ParseSysEx(in format4.Message) (out sysex.SysExer, err error) {
msgType := in.Data[0]
switch msgType {
case DataDumpRequestID:
msg := DataDumpRequest{}
err = msg.Parse(in.Data[1:])
out = msg
case DataDumpTwoByteResponseID:
msg := DataDumpTwoByteResponse{}
err = msg.Parse(in.Data[1:])
out = msg
case SetModeRequestID:
msg := SetModeRequest{}
err = msg.Parse(in.Data[1:])
out = msg
case SetModeResponseID:
msg := SetModeResponse{}
err = msg.Parse(in.Data[1:])
out = msg
}
return
}
|
package models
import (
"crypto/md5"
"fmt"
"github.com/astaxie/beego/orm"
"strconv"
"strings"
)
// TableName 设置BackendUser表名
func (a *AdminBackendUser) TableName() string {
return AdminBackendUserTBName()
}
// AdminBackendUserQueryParam 用于查询的类
type AdminBackendUserQueryParam struct {
BaseQueryParam
UserName string `json:"userName"` //模糊查询
RealName string `json:"realName"` //模糊查询
Mobile string `json:"mobile"` //精确查询
Status string `json:"status"` //为空不查询,有值精确查询
Id int `json:"id"` //Id查询单条
}
// AdminBackendUser 实体类
type AdminBackendUser struct {
Id int `orm:"pk;column(id)"json:"id"form:"id"`
RealName string `orm:"column(real_name)"json:"realName"form:"realName"`
UserName string `orm:"column(user_name)"json:"userName"form:"userName"`
//密码
UserPwd string `orm:"column(user_pwd)"json:"userPwd"form:"userPwd"`
//是否超级用户 0关闭 1启用
IsSuper int `orm:"column(is_super)"json:"isSuper"form:"isSuper"`
//状态 0关闭 1启用
Status int `orm:"column(status)"json:"status"form:"status"`
//电话
Mobile string `orm:"column(mobile);size(16)"json:"mobile"form:"mobile"`
//邮件
Email string `orm:"column(email);size(256)"json:"email"form:"email"`
//头像
Avatar string `orm:"column(avatar);size(256)"json:"avatar"form:"avatar"`
RoleIds []int `orm:"-" form:"roleIds"json:"roleIds"`
AdminRoleBackendUserRel []*AdminRoleBackendUserRel `orm:"reverse(many)"json:"_"` // 设置一对多的反向关系
ResourceUrlForList []string `orm:"-"json:"_"`
//CreateCourses []*AdminCourse `rom:"reverse(many)"` // 设置一对多的反向关系
//Creator *AdminRoleBackendUserRel `orm:"rel(fk)"` //设置一对多关系
RolesStr string `orm:"column(roles_str)"json:"-"form:"-"`
//连表
FinancialConfigHistoricalRecords []*FinancialProductHistoricalRecord `orm:"reverse(many)"json:"-"form:"-"`
FinancialProducts []*FinancialProduct `orm:"reverse(many)"json:"-"form:"-"`
//特殊字段
Ids string `orm:"-"json:"ids"form:"ids"`
RolesName []string `orm:"-"json:"rolesName"form:"-"`
}
func (this *AdminBackendUser) GetToken() string {
data := []byte(this.UserPwd + this.UserName)
has := md5.Sum(data)
md5str := fmt.Sprintf("%x", has)
return md5str
}
// AdminBackendUserPageList 获取分页数据
func AdminBackendUserPageList(params *AdminBackendUserQueryParam) ([]*AdminBackendUser, int64) {
query := orm.NewOrm().QueryTable(AdminBackendUserTBName())
data := make([]*AdminBackendUser, 0)
//默认排序
sortorder := "Id"
switch params.Sort {
case "Id":
sortorder = "Id"
}
if params.Order == "desc" {
sortorder = "-" + sortorder
}
if params.UserName != "" {
query = query.Filter("username__icontains", params.UserName)
}
if params.RealName != "" {
query = query.Filter("realname__icontains", params.RealName)
}
if len(params.Mobile) > 0 {
query = query.Filter("mobile__icontains", params.Mobile)
}
if len(params.Status) > 0 {
query = query.Filter("status__iexact", params.Status)
}
total, _ := query.Count()
query = query.OrderBy(sortorder).Limit(params.Limit, (params.Offset-1)*params.Limit)
//query = query.RelatedSel("AdminRoleBackendUserRel")
query.All(&data)
//AdminRolesByIds
roleIds := make([]int, 0)
roleIdsMap := make(map[int]struct{})
for _, obj := range data {
list := strings.Split(obj.RolesStr, ",")
for _, s := range list {
if i, err := strconv.Atoi(s); err == nil {
obj.RoleIds = append(obj.RoleIds, i)
if _, found := roleIdsMap[i]; !found {
roleIdsMap[i] = struct{}{}
roleIds = append(roleIds, i)
}
}
}
}
//角色
roleObjs := AdminRolesByIds(roleIds)
for _, obj := range data {
//拥有角色查询
for _, rid := range obj.RoleIds {
if role, found := roleObjs[rid]; found {
obj.RolesName = append(obj.RolesName, role.Name)
}
}
//密码不返回
obj.UserPwd = ""
}
return data, total
}
// AdminBackendUserOne 根据id获取单条
func AdminBackendUserOne(id int) (*AdminBackendUser, error) {
o := orm.NewOrm()
m := AdminBackendUser{Id: id}
err := o.Read(&m)
if err != nil {
return nil, err
}
return &m, nil
}
// AdminBackendUserOneByName 根据 用户名获取单条
func AdminBackendUserOneByName(username string) (*AdminBackendUser, error) {
m := AdminBackendUser{}
if err := orm.NewOrm().QueryTable(AdminBackendUserTBName()).Filter("userName", username).One(&m); err != nil {
return nil, err
}
return &m, nil
}
//AdminBackendUserOneMobile 根据 用户电话获取单条
func AdminBackendUserOneMobile(mobile string) (*AdminBackendUser, error) {
m := AdminBackendUser{}
if err := orm.NewOrm().QueryTable(AdminBackendUserTBName()).Filter("mobile", mobile).One(&m); err != nil {
return nil, err
}
return &m, nil
}
// AdminBackendUserOneByUserName 根据用户名密码获取单条
func AdminBackendUserOneByUserName(username, userpwd string) (*AdminBackendUser, error) {
m := AdminBackendUser{}
err := orm.NewOrm().QueryTable(AdminBackendUserTBName()).Filter("userName", username).Filter("userPwd", userpwd).One(&m)
//err := orm.NewOrm().QueryTable(AdminBackendUserTBName()).Filter("userName", username).One(&m)
if err != nil {
return nil, err
}
return &m, nil
}
|
package main
import (
"fmt"
"log"
"os"
"time"
"github.com/faiface/beep"
"github.com/faiface/beep/effects"
"github.com/faiface/beep/mp3"
"github.com/faiface/beep/speaker"
)
func main() {
f, err := os.Open("../Miami_Slice_-_04_-_Step_Into_Me.mp3")
if err != nil {
log.Fatal(err)
}
streamer, format, err := mp3.Decode(f)
if err != nil {
log.Fatal(err)
}
defer streamer.Close()
speaker.Init(format.SampleRate, format.SampleRate.N(time.Second/10))
ctrl := &beep.Ctrl{Streamer: beep.Loop(-1, streamer), Paused: false}
volume := &effects.Volume{
Streamer: ctrl,
Base: 2,
Volume: 0,
Silent: false,
}
speedy := beep.ResampleRatio(4, 1, volume)
speaker.Play(speedy)
for {
fmt.Print("Press [ENTER] to pause/resume. ")
fmt.Scanln()
speaker.Lock()
ctrl.Paused = !ctrl.Paused
volume.Volume += 0.5
speedy.SetRatio(speedy.Ratio() + 0.1)
speaker.Unlock()
}
}
|
package models
import "github.com/jinzhu/gorm"
type GoodsType struct {
gorm.Model
Name string
}
|
package exphttp
import (
"context"
"net/http"
)
func newContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
addr := r.RemoteAddr
newCtx := context.WithValue(r.Context(), "remote-addr", addr)
next.ServeHTTP(w, r.WithContext(newCtx))
})
}
|
package main
import (
"io/ioutil"
"log"
"net/http"
"reflect"
"sort"
"strings"
"github.com/labstack/echo"
"github.com/naoina/toml"
)
// 路由的处理器
type routeHandler struct {
handlerName string
httpMethod string
method reflect.Value
}
// 路由的中间件
type validator struct {
routePrefix string
handlerName string
skipRoutes map[string]*interface{}
validateMethod echo.MiddlewareFunc
}
// 封装后的 Echo Server
type MyEchoServer struct {
server *echo.Echo // 封装后的 server 实例
route2Handler map[string]*routeHandler // 一条 route 对应一个 handler
route2Validators map[string][]*validator // 一个 route 对应多种 validator
handler2Routes map[string][]string // 一个 handler 对应多条 route
handler2Validator map[string]*validator // 一个 handler 对应一种 validator
verifyPrefixs []string // 需要中间件验证的 route 前缀
}
// 路由和中间件配置
type Conf struct {
Routes map[string]string // route 及其 handler
Validators map[string][]string // route prefix 及其 validators
}
// 读取配置
func initEnv() *MyEchoServer {
// 读取路由文件
rdata, err := ioutil.ReadFile("./conf.toml")
checkErr("read routes.toml error: ", err)
var conf Conf
err = toml.Unmarshal(rdata, &conf)
checkErr("unmarshal toml data error: ", err)
// 初始化 MyEchoServer 实例
// 生成 route 与 handler 的双向映射关系
route2Handler := make(map[string]*routeHandler, 10)
handler2Routes := make(map[string][]string, 10)
for route, handler := range conf.Routes {
mr := strings.SplitN(route, ":", 2) // 分割 "POST:/user"
method := http.MethodPost
if len(mr) > 1 {
reqMethod := strings.ToUpper(mr[0])
switch reqMethod {
case "FILE":
method = "FILE"
case "STATIC":
method = "STATIC"
default:
method = reqMethod
}
route = mr[1]
}
// 建立 route 与 handler 的一对一关系
route2Handler[route] = &routeHandler{handlerName: handler, httpMethod: method}
// 建立 handler 与 routes 的一对多关系
routes, ok := handler2Routes[handler]
if !ok {
routes = make([]string, 0, 2)
}
routes = append(routes, route)
sort.Strings(routes)
handler2Routes[handler] = routes
}
// 遍历生成 route 与 validator、handler 的双向映射关系
verifyPrefixs := make([]string, 0, len(conf.Validators))
route2Validators := make(map[string][]*validator, 10)
handler2Validator := make(map[string]*validator)
for route, handlers := range conf.Validators {
// 建立 route 与 validators 的一对多关系
verifier, ok := route2Validators[route]
if !ok {
verifier = make([]*validator, 0, len(handlers))
verifyPrefixs = append(verifyPrefixs, route)
}
// 遍历多个中间件
for _, handler := range handlers {
h := strings.TrimPrefix(handler, "!")
v, ok := handler2Validator[h]
// 建立 handler 与 validator 的一对一关系
if !ok {
v = &validator{handlerName: h, skipRoutes: make(map[string]*interface{}, 2)}
handler2Validator[h] = v
}
// 以 ! 开头的 handler 失效,对该路由不使用该中间件
if strings.HasPrefix(handler, "!") {
v.skipRoutes[route] = nil
}
verifier = append(verifier, v)
}
route2Validators[route] = verifier
}
sort.Strings(verifyPrefixs)
// 创建封装后的 echo server
return &MyEchoServer{
server: echo.New(),
route2Handler: route2Handler,
route2Validators: route2Validators,
handler2Routes: handler2Routes,
handler2Validator: handler2Validator,
verifyPrefixs: verifyPrefixs,
}
}
func main() {
echoServer := initEnv()
echoServer.registerHandler(&UserInfoHandler{})
echoServer.registerValidator(&CommonValidator{})
echoServer.start()
// fmt.Printf("%+v", echoServer) // 读取成功
}
// 启动 Server
func (s *MyEchoServer) start() {
// 检查中间件是否都注入成功
for r, vs := range s.route2Validators {
for _, v := range vs {
if nil == v.validateMethod {
panic(r + " -> " + v.handlerName + " is NOT INJECT")
}
}
}
// 取出所有路由
routes := make([]string, 0, 10)
for r := range s.route2Handler {
routes = append(routes, r)
}
sort.Strings(routes)
// 为所有路由注册处理器
for _, route := range routes {
handler := s.route2Handler[route]
// 检查 handler 是否注入
m := strings.ToUpper(handler.httpMethod)
if (handler.method.Kind() == reflect.Invalid) && m != "FILE" && m != "STATIC" {
log.Panicf("ERROR:\nHandler Not Exist: %s -> %s", route, handler.handlerName)
}
// 注册路由的处理器
handleFunc := func(ctx echo.Context) error {
context := reflect.ValueOf(ctx)
handler.method.Call([]reflect.Value{context})
return nil
}
// 注册路由的中间件
usedValidators := make([]echo.MiddlewareFunc, 0, 10)
for _, prefix := range s.verifyPrefixs {
if strings.HasPrefix(route, prefix) {
validators, ok := s.route2Validators[prefix]
if ok {
FLAG:
for _, v := range validators {
// 检查当前路由是否要跳过当前中间件
for skipPrefix := range v.skipRoutes {
if strings.HasPrefix(route, skipPrefix) {
log.Printf("INFO:\nRoute Skipped Vlidator: %s -x-> %s", route, v.handlerName)
continue FLAG
}
}
// 为当前路由添加中间件
usedValidators = append(usedValidators, v.validateMethod)
}
}
}
}
// 根据 handler 类型来发布 route
switch m {
case http.MethodGet:
s.server.GET(route, handleFunc, usedValidators...)
case http.MethodPost:
s.server.POST(route, handleFunc, usedValidators...)
case http.MethodHead:
s.server.HEAD(route, handleFunc, usedValidators...)
case "FILE":
s.server.File(route, handler.handlerName)
case "STATIC":
s.server.Static(route, handler.handlerName)
default:
s.server.GET(route, handleFunc, usedValidators...)
}
log.Fatalln(s.server.Start(":2333"))
}
}
type UserInfoHandler struct{}
func (u *UserInfoHandler) GetUserInfo(ctx echo.Context) {
println("调用 GetUserInfo 处理业务逻辑")
println("获取请求的 name 参数,值为: ", ctx.FormValue("name"))
}
type CommonValidator struct{}
func (v *CommonValidator) CheckSession(next echo.HandlerFunc) echo.HandlerFunc {
return func(ctx echo.Context) error {
println("调用 CheckSession 通过 Session 中间件检测")
next(ctx)
return nil
}
}
// 为 server 注册中间件的处理器
func (s *MyEchoServer) registerValidator(v interface{}) *MyEchoServer {
vVal := reflect.ValueOf(v)
vType := vVal.Elem().Type()
vName := vType.String()
// 所有待验证的路由
routes := make([]string, 0, 10)
for route := range s.route2Validators {
routes = append(routes, route)
}
sort.Strings(routes)
// 遍历所有需要处理的路由
used := false
for _, route := range routes {
// 该路由需要处理的所有中间件
validators := s.route2Validators[route]
for _, v := range validators {
// 当前中间件合处理当前路由
if strings.HasPrefix(v.handlerName, vName) {
handlerName := strings.TrimPrefix(strings.TrimPrefix(v.handlerName, vName), ".")
method := vVal.MethodByName(handlerName)
// 检查 handler 是否可用
if method.Kind() == reflect.Invalid || method.IsNil() {
log.Panicf("ERROR:\nMethod %s Not Exist In %s", method, vName)
} else {
// 检查 handler 的类型
ok := method.Type().ConvertibleTo(reflect.TypeOf((func(echo.HandlerFunc) echo.HandlerFunc)(nil)))
if !ok {
log.Panicf("ERROR:\nMethod %s Not MiddlewareFunc", handlerName)
}
// 建立中间件与处理器的映射关系
v.validateMethod = method.Interface().(func(echo.HandlerFunc) echo.HandlerFunc)
used = true
}
log.Printf("VALIDATOR INFO:\nRegister Succeed: %s -> %s.%s", route, vName, handlerName)
}
}
}
if !used {
log.Printf("WARN:\nNot Used: %s", vName)
}
return s
}
// 为 server 注册路由的处理器
func (s *MyEchoServer) registerHandler(h interface{}) *MyEchoServer {
rHVal := reflect.ValueOf(h)
rHType := rHVal.Elem().Type()
rHPath := rHType.String() // package.Struct.HandlerFunc
// 路由的所有 handler
handlers := make([]string, 0, 10)
for handler := range s.handler2Routes {
handlers = append(handlers, handler)
}
sort.Strings(handlers)
used := false
// 遍历所有 handler 下的所有 route
for _, handler := range handlers {
routes := s.handler2Routes[handler]
for _, route := range routes {
// 当前 handler 处理当前的 route
if strings.HasPrefix(handler, rHPath) {
handlerName := strings.TrimPrefix(strings.TrimPrefix(handler, rHPath), ".")
method := rHVal.MethodByName(handlerName)
if method.Kind() == reflect.Invalid || method.IsNil() {
log.Panicf("ERROR:\nMethod %s Not Exist In %s", method, rHPath)
}
// 建立一对一的映射关系
s.route2Handler[route].method = method
used = true
log.Printf("ROUTE INFO:\nRegister Succeed: %s -> %s.%s", route, rHPath, handlerName)
}
}
}
if !used {
log.Printf("WARN:\nNot Used: %s", rHPath)
}
return s
}
func checkErr(info string, err error) {
if err != nil {
log.Fatalln(info, err)
}
}
|
package main
import (
"fmt"
"os"
)
const (
goroutines = 10
)
func main() {
counter := make(chan int, 1)
for i := 0; i < goroutines; i++ {
go func(counter chan int) {
val := <-counter
val++
fmt.Println("counter:", val)
if val == goroutines {
os.Exit(0)
}
counter <- val
}(counter)
}
counter <- 0
for {
}
}
|
package actions
import (
"fmt"
"log"
"syscall"
"time"
)
//not to self - syscall reboot magic numbers reference: https://golang.org/pkg/syscall/?GOOS=linux&GOARCH=mips64le
func SystemPower(action string)(actionResp Power, error error){
if err := validatePowerAction(action); err != nil{
return Power{},err
}
status := Power{}
switch action{
case "reboot":
go reboot()
status.Status = fmt.Sprintf("async %s in progress...", action)
return status, nil
case "shutdown":
go shutdown()
status.Status = fmt.Sprintf("async %s in progress...", action)
return status, nil
default:
return Power{}, fmt.Errorf("power action %s is invalid", action)
}
}
func validatePowerAction(action string) error{
actions := []string{
"reboot",
"shutdown",
}
for _, a := range actions{
if a == action{
return nil
}
}
return fmt.Errorf("power action is invalid")
}
func reboot(){
//best effort no return in goroutine
time.Sleep(time.Second * 2)
var linuxRebootMagic1 uintptr = 0xfee1dead
var linuxRebootMagic2 uintptr = 672274793
var linuxRebootCmdRestart uintptr = 0x1234567 //LINUX_REBOOT_CMD_RESTART
_,_, errno := syscall.Syscall(syscall.SYS_REBOOT, linuxRebootMagic1, linuxRebootMagic2, linuxRebootCmdRestart)
if errno != 0{
log.Printf("system reboot failed with error code %d", errno)
}
}
func shutdown(){
//best effort no return in goroutine
time.Sleep(time.Second * 2)
var linuxRebootMagic1 uintptr = 0xfee1dead
var linuxRebootMagic2 uintptr = 672274793
var linuxRebootCmdShutdown uintptr = 0x4321fedc //LINUX_REBOOT_CMD_POWER_OFF
_,_, errno := syscall.Syscall(syscall.SYS_REBOOT, linuxRebootMagic1, linuxRebootMagic2, linuxRebootCmdShutdown)
if errno != 0{
log.Printf("system shutdown failed with error code %d", errno)
}
}
|
package stub
import (
api "github.com/operator-framework/operator-sdk-samples/vault-operator/pkg/apis/vault/v1alpha1"
"github.com/operator-framework/operator-sdk-samples/vault-operator/pkg/vault"
"github.com/operator-framework/operator-sdk/pkg/sdk/handler"
"github.com/operator-framework/operator-sdk/pkg/sdk/types"
)
func NewHandler() handler.Handler {
return &Handler{}
}
type Handler struct {
// Fill me
}
func (h *Handler) Handle(ctx types.Context, event types.Event) error {
switch o := event.Object.(type) {
case *api.VaultService:
return vault.Reconcile(o)
}
return nil
}
|
/*
Copyright 2021 RadonDB.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"reflect"
"github.com/presslabs/controller-util/syncer"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
policyv1beta1 "k8s.io/api/policy/v1beta1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
apiv1alpha1 "github.com/radondb/radondb-mysql-kubernetes/api/v1alpha1"
"github.com/radondb/radondb-mysql-kubernetes/cluster"
clustersyncer "github.com/radondb/radondb-mysql-kubernetes/cluster/syncer"
)
// ClusterReconciler reconciles a Cluster object
type ClusterReconciler struct {
client.Client
Scheme *runtime.Scheme
Recorder record.EventRecorder
}
// +kubebuilder:rbac:groups=mysql.radondb.com,resources=clusters,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=mysql.radondb.com,resources=clusters/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=mysql.radondb.com,resources=clusters/finalizers,verbs=update
// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=core,resources=configmaps;secrets;services;pods;persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=core,resources=events,verbs=get;create;patch
// +kubebuilder:rbac:groups=core,resources=serviceaccounts,verbs=get;list;watch;create;update
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles;rolebindings,verbs=get;list;watch;create;update
// +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=policy,resources=poddisruptionbudgets,verbs=get;list;watch;create;update;patch;delete
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
// the Cluster object against the actual cluster state, and then
// perform operations to make the cluster state reflect the state specified by
// the user.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.9.2/pkg/reconcile
func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := log.FromContext(ctx).WithName("controllers").WithName("Cluster")
instance := cluster.New(&apiv1alpha1.Cluster{})
err := r.Get(ctx, req.NamespacedName, instance.Unwrap())
if err != nil {
if errors.IsNotFound(err) {
// Object not found, return. Created objects are automatically garbage collected.
// For additional cleanup logic use finalizers.
log.Info("instance not found, maybe removed")
return ctrl.Result{}, nil
}
return ctrl.Result{}, err
}
if err = instance.Validate(); err != nil {
return ctrl.Result{}, err
}
status := *instance.Status.DeepCopy()
defer func() {
if !reflect.DeepEqual(status, instance.Status) {
sErr := r.Status().Update(ctx, instance.Unwrap())
if sErr != nil {
log.Error(sErr, "failed to update cluster status")
}
}
}()
configMapSyncer := clustersyncer.NewConfigMapSyncer(r.Client, instance)
if err = syncer.Sync(ctx, configMapSyncer, r.Recorder); err != nil {
return ctrl.Result{}, err
}
secretSyncer := clustersyncer.NewSecretSyncer(r.Client, instance)
if err = syncer.Sync(ctx, secretSyncer, r.Recorder); err != nil {
return ctrl.Result{}, err
}
cmRev := configMapSyncer.Object().(*corev1.ConfigMap).ResourceVersion
sctRev := secretSyncer.Object().(*corev1.Secret).ResourceVersion
// run the syncers for services, pdb and statefulset
syncers := []syncer.Interface{
clustersyncer.NewRoleSyncer(r.Client, instance),
clustersyncer.NewRoleBindingSyncer(r.Client, instance),
clustersyncer.NewServiceAccountSyncer(r.Client, instance),
clustersyncer.NewHeadlessSVCSyncer(r.Client, instance),
clustersyncer.NewLeaderSVCSyncer(r.Client, instance),
clustersyncer.NewFollowerSVCSyncer(r.Client, instance),
clustersyncer.NewStatefulSetSyncer(r.Client, instance, cmRev, sctRev),
clustersyncer.NewPDBSyncer(r.Client, instance),
}
// run the syncers
for _, sync := range syncers {
if err = syncer.Sync(ctx, sync, r.Recorder); err != nil {
return ctrl.Result{}, err
}
}
return ctrl.Result{}, nil
}
// SetupWithManager sets up the controller with the Manager.
func (r *ClusterReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&apiv1alpha1.Cluster{}).
Owns(&appsv1.StatefulSet{}).
Owns(&corev1.ConfigMap{}).
Owns(&corev1.Service{}).
Owns(&rbacv1.Role{}).
Owns(&rbacv1.RoleBinding{}).
Owns(&corev1.ServiceAccount{}).
Owns(&corev1.Secret{}).
Owns(&policyv1beta1.PodDisruptionBudget{}).
Complete(r)
}
|
package handler
import (
"../domain/model"
"../domain/service"
user "../proto/user"
"context"
)
type User struct {
UserDataService service.IUserDataService
}
func (u *User) Register(ctx context.Context, userRegisterRequest *user.UserRegisterRequest, userRegisterResponse *user.UserRegisterResponse) error {
userRegisterInfo := &model.User{
UserName: userRegisterRequest.UserName,
FirstName: userRegisterRequest.FirstName,
HashPassword: userRegisterRequest.Pwd,
}
_, err := u.UserDataService.AddUser(userRegisterInfo)
if err != nil {
return err
}
userRegisterResponse.Message = "Added User successfully"
return nil
}
func (u *User) Login(ctx context.Context, userLoginRequest *user.UserLoginRequest, userLoginResponse *user.UserLoginResponse) error {
isOk, err := u.UserDataService.CheckPwd(userLoginRequest.UserName, userLoginRequest.Pwd)
if err != nil {
return err
}
userLoginResponse.IsSuccess = isOk
userLoginResponse.Message = "Logged in successfully"
return nil
}
func (u *User) GetUserInfo(ctx context.Context, userInfoRequest *user.UserInfoRequest, userInfoResponse *user.UserInfoResponse) error {
userInfo, err := u.UserDataService.FindUserByName(userInfoRequest.UserName)
if err != nil {
return err
}
userInfoResponse = UserInfoTrans(userInfo)
return nil
}
func UserInfoTrans(userModel *model.User) *user.UserInfoResponse {
response := &user.UserInfoResponse{}
response.UserName = userModel.UserName
response.FirstName = userModel.FirstName
response.UserId = userModel.ID
return response
}
|
package htmlHeadings
import (
"log"
"net/http"
"testing"
"github.com/PuerkitoBio/goquery"
)
func TestHtmlHeadings(t *testing.T) {
testUrl := "https://www.htmldog.com/guides/html/beginner/headings/"
expectedResult := map[string]int{"h1": 1, "h2": 4, "h3": 2, "h4": 0, "h5": 0, "h6": 0}
doc, err := pingURL(testUrl)
if err != nil {
t.Errorf("Failed: could not ping url")
}
result := FindHeadings(*doc)
for _, each := range result {
if expectedResult[each.Heading] != each.Count {
t.Errorf("Failed: expected headers not found for %s - expected : %d, actual : %d", each.Heading, expectedResult[each.Heading], each.Count)
}
}
}
func pingURL(url string) (*goquery.Document, error) {
res, err := http.Get(url)
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.StatusCode != 200 {
log.Fatalf("status code error: %d %s", res.StatusCode, res.Status)
}
// Load the HTML document
doc, err := goquery.NewDocumentFromReader(res.Body)
if err != nil {
return nil, err
}
return doc, nil
}
|
package mysql
type TableRow struct {
Name string
Engine string
Collation string
}
type FieldRow struct {
Field string
Type string
Collation string
Null string
Default string
Extra string
Comment string
}
type IndexRow struct {
Name string `xorm:"Key_name"`
NonUnique bool `xorm:"Non_unique"`
Field string `xorm:"Column_name"`
}
|
// Package timesafeguard collects the time of other nodes and ensures the
// remote times are not diverging too much from the local time. This is useful
// to ensure the following situation does not happen:
//
// 1. In a network of 3 nodes, node 1 goes down.
//
// 2. Node A comes back up, but with a 1-hour clock drift due to the hardware
// clock being in local time instead of UTC. ntpd refuses to correct the
// time because the drift is too big.
//
// 3. Node A re-joins the network.
//
// 4. At some point, node A becomes the leader. Message timestamps made a jump
// from e.g. 1432323893 to 1432327493, i.e. one hour into the future.
//
// 5. At some point, a different node becomes the leader. When trying to apply
// a message, the node panics because the message timestamp is not
// monotonically increasing.
//
// 6. The network needs to be frozen for an hour to be healthy again.
//
// timesafeguard ensures that the time is not off by more than
// |ElectionTimeout|, which presents the lower bound on how long an election
// takes:
//
// 1. In order for a candidate to win an election, the candidate needs to have
// the most recently committed entry in its log, otherwise it will not get a
// majority of votes.
//
// 2. The only way to get the most recently committed entry is to receive an
// appendEntries RPC, which resets the election timer. Hence, the earliest
// point at which any node can start an election that has any chance to be
// successful is after |ElectionTimeout| passed.
//
// Since during an election, no new messages are committed, a clock drift of
// |ElectionTimeout| is okay: the first message on the new leader will be
// committed late enough that it has a higher timestamp than the last message
// on the old leader.
package timesafeguard
import (
"flag"
"fmt"
"log"
"strings"
"sync"
"time"
"github.com/robustirc/internal/health"
"github.com/stapelberg/glog"
)
var DisableTimesafeguard = flag.Bool(
"disable_timesafeguard",
false,
"Disables checking whether the time is in sync with other nodes before joining the network (DANGEROUS!)")
const ElectionTimeout = 2 * time.Second
type timeResult struct {
// Start is the point in local time before we started talking to the target.
Start time.Time
// End is the point in local time after we decoded the response.
End time.Time
// Result is the remote time of when the target replied.
Result time.Time
}
func (t *timeResult) String() string {
return fmt.Sprintf("Local: %v, Remote: %v ± %v", t.Start, t.Result, t.End.Sub(t.Start))
}
func (t timeResult) worstCaseDrift() time.Duration {
// The worst-case drift is the difference between local time when
// starting the measurement and remote time plus however long the
// measurement itself took.
drift := t.Result.Sub(t.Start)
if drift < 0 {
drift = -drift
}
drift += t.End.Sub(t.Start)
return drift
}
func getServerTime(server, networkPassword string) (timeResult, health.ServerStatus, error) {
start := time.Now()
status, err := health.GetServerStatus(server, networkPassword)
return timeResult{
Start: start,
End: time.Now(),
Result: status.CurrentTime,
}, status, err
}
// The first error talking to any of |servers| will be returned.
func collectTime(servers []string, networkPassword string) ([]timeResult, error) {
var wg sync.WaitGroup
results := make([]timeResult, len(servers))
errChan := make(chan error, len(servers))
for idx, server := range servers {
wg.Add(1)
go func(idx int, server string) {
defer wg.Done()
result, _, err := getServerTime(server, networkPassword)
if err != nil {
errChan <- err
return
}
results[idx] = result
}(idx, server)
}
wg.Wait()
select {
case err := <-errChan:
close(errChan)
for _ = range errChan {
}
return results, err
default:
return results, nil
}
}
// timeInSync returns whether all remote times are synchronized to
// local time ± |ElectionTimeout|.
func timeInSync(results []timeResult) bool {
for _, result := range results {
if result.worstCaseDrift() >= ElectionTimeout {
return false
}
}
return true
}
func synchronizedWithNetwork(results []timeResult) error {
log.Printf("Collected time measurements:\n")
var nonZeroResults []timeResult
for _, result := range results {
if result.Result.IsZero() {
log.Printf(" %s (ignoring)\n", result.String())
} else {
log.Printf(" %s\n", result.String())
nonZeroResults = append(nonZeroResults, result)
}
}
if timeInSync(nonZeroResults) {
return nil
}
var errDetails []string
for _, result := range nonZeroResults {
if result.worstCaseDrift() >= ElectionTimeout {
errDetails = append(errDetails, result.String())
}
}
if *DisableTimesafeguard {
log.Printf("Local time is too different from remote time, but timesafeguard is disabled.\nConflicting remote times: %s",
strings.Join(errDetails, "\n"))
return nil
}
return fmt.Errorf("Local time is too different from remote time, refusing to join network.\nConflicting remote times: %s",
strings.Join(errDetails, "\n"))
}
// SynchronizedWithMasterAndNetwork returns an error if the time of either
// |join| or any other node in the network is too far off the local time.
func SynchronizedWithMasterAndNetwork(peerAddr, join, networkPassword string) error {
result, status, err := getServerTime(join, networkPassword)
if err != nil {
log.Fatalf("Could not join %q: %v\n", join, err)
}
var peers []string
for _, peer := range status.Peers {
if peer != peerAddr && peer != join {
peers = append(peers, peer)
}
}
log.Printf("Collecting time of remaining nodes %v\n", peers)
results, err := collectTime(peers, networkPassword)
if err != nil {
glog.Warningf("Could not collect time from all of %v: %v\n", peers, err)
}
results = append(results, result)
return synchronizedWithNetwork(results)
}
// SynchronizedWithNetwork returns an error if the time of any of |peers| is
// too far off the local time.
func SynchronizedWithNetwork(peerAddr string, peers []string, networkPassword string) error {
var collectPeers []string
for _, peer := range peers {
if peer != peerAddr {
collectPeers = append(collectPeers, peer)
}
}
log.Printf("Collecting time of remaining nodes %v\n", collectPeers)
results, err := collectTime(collectPeers, networkPassword)
if err != nil {
glog.Warningf("Could not collect time from all of %v: %v\n", collectPeers, err)
}
return synchronizedWithNetwork(results)
}
|
package app
import (
"github.com/cosmos/cosmos-sdk/baseapp"
storetypes "github.com/cosmos/cosmos-sdk/store/types"
sdk "github.com/cosmos/cosmos-sdk/types"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
markertypes "github.com/provenance-io/provenance/x/marker/types"
)
var (
noopHandler = func(ctx sdk.Context, plan upgradetypes.Plan) {
ctx.Logger().Info("Applying no-op upgrade plan for release " + plan.Name)
}
)
type appUpgradeHandler = func(*App, sdk.Context, upgradetypes.Plan)
type appUpgrade struct {
Added []string
Deleted []string
Renamed []storetypes.StoreRename
Handler appUpgradeHandler
}
var handlers = map[string]appUpgrade{
"v0.2.0": {},
"v0.2.1": {
Handler: func(app *App, ctx sdk.Context, plan upgradetypes.Plan) {
app.MarkerKeeper.SetParams(ctx, markertypes.DefaultParams())
},
},
// TODO - Add new upgrade definitions here.
}
func InstallCustomUpgradeHandlers(app *App) {
// Register all explicit appUpgrades
for name, upgrade := range handlers {
// If the handler has been defined, add it here, otherwise, use no-op.
var handler upgradetypes.UpgradeHandler
if upgrade.Handler == nil {
handler = noopHandler
} else {
ref := upgrade
handler = func(ctx sdk.Context, plan upgradetypes.Plan) {
ref.Handler(app, ctx, plan)
}
}
app.UpgradeKeeper.SetUpgradeHandler(name, handler)
}
}
// CustomUpgradeStoreLoader provides upgrade handlers for store and application module upgrades at specified versions
func CustomUpgradeStoreLoader(app *App, info storetypes.UpgradeInfo) baseapp.StoreLoader {
// Current upgrade info is empty or we are at the wrong height, skip this.
if info.Name == "" || info.Height-1 != app.LastBlockHeight() {
return nil
}
// Find the upgrade handler that matches this currently executing upgrade.
for name, upgrade := range handlers {
// If the plan is executing this block, set the store locator to create any
// missing modules, delete unused modules, or rename any keys required in the plan.
if info.Name == name && !app.UpgradeKeeper.IsSkipHeight(info.Height) {
storeUpgrades := storetypes.StoreUpgrades{
Added: upgrade.Added,
Renamed: upgrade.Renamed,
Deleted: upgrade.Deleted,
}
if isEmptyUpgrade(storeUpgrades) {
app.Logger().Info("No store upgrades required",
"plan", name,
"height", info.Height,
)
return nil
}
app.Logger().Info("Store upgrades",
"plan", name,
"height", info.Height,
"upgrade.added", upgrade.Added,
"upgrade.deleted", upgrade.Deleted,
"upgrade.renamed", upgrade.Renamed,
)
return upgradetypes.UpgradeStoreLoader(info.Height, &storeUpgrades)
}
}
return nil
}
func isEmptyUpgrade(upgrades storetypes.StoreUpgrades) bool {
return len(upgrades.Renamed) == 0 && len(upgrades.Deleted) == 0 && len(upgrades.Added) == 0
}
|
package 数
// climbStairs 获取爬到第n阶的方法数,一次可以爬 1步 or 2步。
func climbStairs(n int) int {
// 1. 定义。
dp := make([]int, 50) // dp[i] 表示爬到第i阶的方法数。
// 2. 初始化。
dp[1] = 1
dp[2] = 2
// 3. 动态规划。
for i := 3; i <= n; i++ {
dp[i] = dp[i-1] + dp[i-2]
}
// 4. 返回。
return dp[n]
}
|
package connection
type Connector interface {
Dial() (ConnectorReadWriter, error)
}
type ConnectorReadWriter interface {
Close() error
Write(command string) error
Read() ([]byte, error)
} |
/*
* @lc app=leetcode.cn id=509 lang=golang
*
* [509] 斐波那契数
*/
package solution
// @lc code=start
func fib(N int) int {
if N <= 1 {
return N
}
N1, N2 := 0, 1
var res int
for i := 2; i <= N; i++ {
res = N1 + N2
N1 = N2
N2 = res
}
return res
}
// @lc code=end
|
package znr_test
import (
"fmt"
"reflect"
"strings"
"testing"
znr "github.com/billglover/zn-reader"
)
func TestKnownPhrases(t *testing.T) {
vl := znr.VocabList{
znr.Vocab{Writing: "你"},
znr.Vocab{Writing: "是"},
znr.Vocab{Writing: "好"},
znr.Vocab{Writing: "友"},
znr.Vocab{Writing: "你好"},
}
tr := znr.NewTrie()
for _, v := range vl {
tr.Insert(v.Writing)
}
cases := []struct {
txt string
tr znr.Trie
out []string
}{
{txt: "你好世界", tr: tr, out: []string{"你好"}},
{txt: "你好世界,你今天怎么样?", tr: tr, out: []string{"你好", "你"}},
{txt: "你是谁?", tr: tr, out: []string{"你", "是"}},
{txt: "我是你的好朋友", tr: tr, out: []string{"是", "你", "好", "友"}},
}
for _, c := range cases {
known, err := tr.KnownPhrases(c.txt)
if err != nil {
t.Error(err)
}
if reflect.DeepEqual(known, c.out) == false {
t.Errorf("%v != %v", strings.Join(known, ","), c.out)
}
}
}
func BenchmarkKnownPhrases(b *testing.B) {
vl := znr.VocabList{
znr.Vocab{Writing: "你"},
znr.Vocab{Writing: "是"},
znr.Vocab{Writing: "好"},
znr.Vocab{Writing: "友"},
znr.Vocab{Writing: "你好"},
}
tr := znr.NewTrie()
for _, v := range vl {
tr.Insert(v.Writing)
}
for t := 8; t < 12; t++ {
l := 1 << t
b.Run(fmt.Sprintf("%03d", l), func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _ = tr.KnownPhrases(txt[0:l])
}
})
}
}
func TestTrieInsert(t *testing.T) {
cases := []struct {
txt string
f bool
}{
{txt: "北京", f: true},
{txt: "中国", f: true},
{txt: "北", f: true},
{txt: "安静", f: false},
{txt: "", f: false},
}
tr := znr.NewTrie()
for _, c := range cases {
if c.f == false {
continue
}
tr.Insert(c.txt)
}
for _, c := range cases {
if f := tr.Find(c.txt); f != c.f {
t.Errorf("%s found: %t expected %t", c.txt, f, c.f)
}
}
}
// Source: https://zh.wikipedia.org/wiki/中华人民共和国
var txt = `中华人民共和国,简称中国或大陸[註 1],是一个位於欧亚大陆东部的社会主义国家及主权国家,法定首都为北京[15]。中国領土陸地面積估約
960萬平方公里,是世界上纯陸地[註 14]面積第二大、陸地[註 15]面積第三大、總面積第三大或第四大的國家[註 16][16],其分為23個省份[註 17]、
5個自治區、4個直轄市和2個特別行政區。中國地势西高东低而呈現三级阶梯分布,大部分地区属于溫帶、副熱帶季风气候,地理景致與氣候型態丰富多樣,
有冰川、丹霞、黃土、沙漠、喀斯特等多种地貌[17],中国北方分布有乾草原和荒漠,南方有热带雨林,西部至西南部則有天山山脈、帕米爾高原、青藏高原、
喀喇崑崙山脈和喜馬拉雅山脈,东临太平洋。领海由渤海(内海)以及黄海、东海、南海三大边海组成[18],水域面积约470万平方千米,分布有大小岛屿7600个。
中国疆域東至黑龙江省佳木斯市抚远市的黑瞎子岛中部,西至新疆境内的帕米尔高原,北至黑龙江省大兴安岭地区的漠河县,南至南海曾母暗沙。
中国是目前世界上人口最多的國家,約有14億人(不包括香港、澳门特别行政区及未实际管辖的台湾省)[20],同時也是一个多民族国家,共有受到官方承認的
民族56個,其中汉族人口佔91.51%[21]。以普通话和规范汉字为国家通用语言文字,少数民族地区可使用自己民族的語言文字。自1986年实行九年義務教育制
度,就读公立学校的学生由政府提供其学费。
中国目前为世界第二大经济体,GDP总量仅次于美国。1978年改革開放後,中國成为經濟成長最快的經濟體之一[22][23]。当前,该国对外贸易额世界第一,
是世界上最大的商品出口國及第二大的進口國,依國內生產總值按購買力平價位列世界第一、而國際匯率則排名世界第二[24]。2018年,中国國內生產總值依
購買力平價為255,161.28亿美元[25],同比2017年增长16,914.92亿美元,增长速度为7.10% ,增量同样居全球第一,增速在总量排名前20的国家中位居
第一[12]。2000年时中国人均GDP仅有959美元,但到2019年,依据國際匯率計算,中国GDP为14.36万亿美元,人均GDP已超过1万美元,而部分东部沿海省
市的人均國內生產總值更已经超过2~2.5万美元,对于众多的14亿人口大国能够达到这个数值,表明中国的经济体量和消费市场巨大。改革开放以来,贫困问题
已得到改善,然而國民贫富差距大等社會问题仍需解决[26][27]。
科技方面,中国在航天航空、装备制造业、高速鐵路、新能源、核技术、超级计算机、量子网络、人工智能、5G通訊等領域有较强实力,研發經費則位居世界第
二,也是世界第二个超万亿美元投入研发的国家[28]。國防預算為世界第二高每年超过1700亿美元的军费投入,擁有世界規模最大作战力量的常備部隊及三位
一體的核打擊能力并拥有在亚太地区局部优势的作战能力和拥有一支蓝水海军的作战力量。[29][30]
1949年中国共产党领导中国人民解放军在内战中取得优势,实际控制了中國大陸,并于同年10月1日宣布建立中华人民共和国和中央人民政府,与遷至台灣地區
的中華民國政府形成兩岸分治的格局至今。该国成立初期遵循和平共处五项原则的外交政策,在1971年取得在聯合國的中國代表權同时继承了原中華民國的联合
国安理会常任理事国地位后陆续加入了部分联合国其他专门机构,并广泛参与重要國際組織例如国际奥委会、亚太经合组织、二十国集团、世界贸易组织,并成
为了上海合作组织、金砖国家、一带一路、亚洲基础设施投资银行、区域全面经济伙伴关系协定等国际合作组织项目的发起国和创始国。随着该国的国际影响力
增强,已被许多国家、组织、智库视为世界重要的潜在超级大國之一和世界经济重要支柱。`
|
package review
import (
"fmt"
"html"
"html/template"
"regexp"
"strings"
)
// ProductReview represents a client's product review
type ProductReview struct {
ProductID int `json:"productid"`
Review string `json:"review"`
ReviewerName string `json:"name"`
EmailAddress string `json:"email"`
Rating int `json:"rating"`
}
// Sanitize escapes html and javascript in the review, to help prevent XSS attacks
func (r *ProductReview) Sanitize() {
r.Review = html.EscapeString(r.Review)
r.Review = template.JSEscapeString(r.Review)
}
// NotifyClient notifies a client about their review with the given msg and notifiers
func (r *ProductReview) NotifyClient(msg string, approved bool, notifiers ...ClientNotifier) (errors []error) {
for _, notifier := range notifiers {
err := notifier.Notify(r, approved, msg)
if err != nil {
errors = append(errors, err)
}
}
return errors
}
// ApproveReview vets the product review for approval using the passed in Reviewers
func (r *ProductReview) ApproveReview(reviewers ...Reviewer) bool {
for _, reviewer := range reviewers {
if reviewer.Review(r) == false {
return false
}
}
return true
}
// Validate ensures all the input values in the review are valid
func (r *ProductReview) Validate() (errors []error) {
// check params exist
var params []string
if r.ProductID == 0 {
params = append(params, "Product ID")
}
if r.Review == "" {
params = append(params, "Review Text")
}
if r.ReviewerName == "" {
params = append(params, "Reviewer Name")
}
if r.EmailAddress == "" {
params = append(params, "Email Address")
}
if r.Rating == 0 {
params = append(params, "Rating")
}
if params != nil {
err := fmt.Errorf("Missing param(s): %s", strings.Join(params, ", "))
errors = append(errors, err)
}
// validate reviewer name
if r.ReviewerName != "" {
if ok, _ := regexp.MatchString(`^[a-zA-Z0-9].$`, r.ReviewerName); ok {
err := fmt.Errorf("Invalid reviewer name format: please use only characters and digits")
errors = append(errors, err)
}
}
// validate email address format
if r.EmailAddress != "" {
regex := regexp.MustCompile(`^[A-Za-z0-9._%-]+@[A-Za-z0-9.-]+[.][A-Za-z]+$`)
if !regex.MatchString(r.EmailAddress) {
err := fmt.Errorf("Invalid email address format")
errors = append(errors, err)
} else if len(r.EmailAddress) > 50 {
err := fmt.Errorf("Email address exceeds max limit of 50 characters")
errors = append(errors, err)
}
}
// validate rating
if r.Rating != 0 && (r.Rating > 5 || r.Rating < 1) {
err := fmt.Errorf("Rating must be a value in the range of 1 to 5")
errors = append(errors, err)
}
// validate comment, if any
if r.Review != "" && len(r.Review) > 3850 {
err := fmt.Errorf("Review length is limited to 3850 characters")
errors = append(errors, err)
}
return errors
}
|
package main
import "fmt"
func sum(s []int, c chan int) {
sum := 0
for _, v := range s {
sum += v
}
c <- sum
}
func fib(n int, c chan int) {
x, y := 0, 1
for i := 0; i < n; i++ {
c <- x
x, y = y, x+y
}
close(c)
}
func main() {
s := []int{7, 2, 8, -9, 4, 0}
c := make(chan int)
go sum(s[:len(s)/2], c)
go sum(s[len(s)/2:], c)
x, y := <-c, <-c
fmt.Println(x, y, x+y)
//带缓冲的channel
ch := make(chan int, 2)
ch <- 8
ch <- 9
fmt.Println(<-ch)
fmt.Println(<-ch)
//range 和close
//循环 for i := range c 会不断从信道接收值,直到它被关闭
//close关闭channel(只有发送者才能关闭),向关闭的channel发送数据会引起panic
//一般情况下channel不需要关闭,除非为了中止tange
z := make(chan int, 10)
go fib(cap(z), z)
//v, ok := <-ch
for i := range z {
fmt.Println(i)
}
}
|
package exer9
const Message = "Hello world!" |
package repository
import (
"encoding/json"
"io/ioutil"
"net/http"
"net/url"
"time"
)
type Ouin struct {
Heading string `json:"heading"`
Text string `json:"text"`
Page int `json:"page"`
Offset int `json:"offset"`
}
func GetOuinList(tango string) []Ouin {
v := url.Values{}
v.Add("api", "1")
v.Add("dict", "広辞苑")
v.Add("q", tango)
url := "https://sakura-paris.org/dict/?" + v.Encode()
client := &http.Client{Timeout: time.Duration(10) * time.Second}
resp, err := client.Get(url)
if err != nil {
return []Ouin{}
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return []Ouin{}
}
ouinList := make([]Ouin, 0)
err = json.Unmarshal(body, &ouinList)
if err != nil {
return []Ouin{}
}
defer resp.Body.Close()
return ouinList
}
|
package matcher
import (
"github.com/fmstephe/matching_engine/coordinator"
"github.com/fmstephe/matching_engine/msg"
"net"
"runtime"
"strconv"
"testing"
)
// Because we are communicating via UDP, messages could arrive out of order, in practice they travel in-order via localhost
const (
matcherOrigin = iota
clientOrigin = iota
)
type netwkTesterMaker struct {
ip [4]byte
freePort int
}
func newMatchTesterMaker() MatchTesterMaker {
return &netwkTesterMaker{ip: [4]byte{127, 0, 0, 1}, freePort: 1201}
}
func (tm *netwkTesterMaker) Make() MatchTester {
serverPort := tm.freePort
tm.freePort++
clientPort := tm.freePort
tm.freePort++
// Build matcher
m := NewMatcher(100)
coordinator.InMemory(mkReadConn(serverPort), mkWriteConn(clientPort), m, matcherOrigin, "Matching Engine", false)
// Build client
receivedMsgs := make(chan *msg.Message, 1000)
toSendMsgs := make(chan *msg.Message, 1000)
c := newClient(receivedMsgs, toSendMsgs)
coordinator.InMemory(mkReadConn(clientPort), mkWriteConn(serverPort), c, clientOrigin, "Test Client ", false)
return &netwkTester{receivedMsgs: receivedMsgs, toSendMsgs: toSendMsgs}
}
type netwkTester struct {
receivedMsgs chan *msg.Message
toSendMsgs chan *msg.Message
}
func (nt *netwkTester) Send(t *testing.T, m *msg.Message) {
nt.toSendMsgs <- m
}
func (nt *netwkTester) Expect(t *testing.T, e *msg.Message) {
r := <-nt.receivedMsgs
validate(t, r, e, 2)
}
func (nt *netwkTester) ExpectOneOf(t *testing.T, es ...*msg.Message) {
r := <-nt.receivedMsgs
for _, e := range es {
if *e == *r {
return
}
}
t.Errorf("Expecting one of %v, received %v instead", es, r)
}
func (nt *netwkTester) Cleanup(t *testing.T) {
m := &msg.Message{}
m.Kind = msg.SHUTDOWN
nt.toSendMsgs <- m
}
type client struct {
coordinator.AppMsgHelper
receivedMsgs chan *msg.Message
toSendMsgs chan *msg.Message
}
func newClient(receivedMsgs, toSendMsgs chan *msg.Message) *client {
return &client{receivedMsgs: receivedMsgs, toSendMsgs: toSendMsgs}
}
func (c *client) Run() {
for {
select {
case m := <-c.In:
if m.Kind == msg.SHUTDOWN {
c.Out <- m
return
}
if m != nil {
c.receivedMsgs <- m
}
case m := <-c.toSendMsgs:
c.Out <- m
}
}
}
func mkWriteConn(port int) *net.UDPConn {
addr, err := net.ResolveUDPAddr("udp", ":"+strconv.Itoa(port))
if err != nil {
panic(err)
}
conn, err := net.DialUDP("udp", nil, addr)
if err != nil {
panic(err)
}
return conn
}
func mkReadConn(port int) *net.UDPConn {
addr, err := net.ResolveUDPAddr("udp", ":"+strconv.Itoa(port))
if err != nil {
panic(err)
}
conn, err := net.ListenUDP("udp", addr)
if err != nil {
panic(err)
}
return conn
}
func validate(t *testing.T, m, e *msg.Message, stackOffset int) {
if *m != *e {
_, fname, lnum, _ := runtime.Caller(stackOffset)
t.Errorf("\nExpecting: %v\nFound: %v \n%s:%d", e, m, fname, lnum)
}
}
func TestRunCoordinatedTestSuite(t *testing.T) {
RunTestSuite(t, newMatchTesterMaker())
}
|
package data
import "time"
type Contest struct {
ID uint `gorm:"column:id;primary_key"`
Name string
FreezeAt time.Time `gorm:"column:freezeAt"`
Start time.Time
End time.Time
}
func (Contest) TableName() string {
return "Contests"
}
type User struct {
ID uint `gorm:"column:id;primary_key"`
Name string
StrId string `gorm:"column:strId"`
GroupName string `gorm:"column:groupName"`
IsAdmin bool `gorm:"column:isAdmin"`
ContestID uint `gorm:"column:ContestId"`
}
func (User) TableName() string {
return "Users"
}
type Problem struct {
ID uint `gorm:"column:id;primary_key"`
Code string `gorm:"type:text"`
ContestID uint `gorm:"column:ContestId"`
Score uint `gorm:"score"`
}
func (Problem) TableName() string {
return "Problems"
}
type Submission struct {
ID uint `gorm:"column:id;primary_key"`
Result int
ProblemCode string
CreatedAt time.Time `gorm:"column:createdAt"`
ContestID uint `gorm:"column:ContestId"`
ProblemID uint `gorm:"column:ProblemId"`
UserID uint `gorm:"column:UserId"`
}
func (Submission) TableName() string {
return "Submissions"
}
const(
ACCEPTED = 4
PRESENTATION_ERROR = 5
WRONG_ANSWER = 6
TIME_LIMIT_EXCEED = 7
MEMORY_LIMIT_EXCEED = 8
OUTPUT_LIMIT_EXCEED = 9
RUNTIME_ERROR = 10
COMPILE_ERROR = 11
)
func IsAccepted(status int) bool {
return status == ACCEPTED
}
func IsError(status int) bool {
return status != ACCEPTED
} |
// Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package checksum
import (
"bytes"
"io"
"strings"
"testing"
encrypt2 "github.com/pingcap/tidb/util/encrypt"
"github.com/stretchr/testify/require"
)
func TestChecksumReadAt(t *testing.T) {
f := newFakeFile()
w := newTestBuff("0123456789", 510)
csw := NewWriter(NewWriter(NewWriter(NewWriter(f))))
n1, err := csw.Write(w.Bytes())
require.NoError(t, err)
n2, err := csw.Write(w.Bytes())
require.NoError(t, err)
err = csw.Close()
require.NoError(t, err)
assertReadAt := func(off int64, assertErr error, assertN int, assertString string) {
cs := NewReader(NewReader(NewReader(NewReader(f))))
r := make([]byte, 10)
n, err := cs.ReadAt(r, off)
require.ErrorIs(t, err, assertErr)
require.Equal(t, assertN, n)
require.Equal(t, assertString, string(r))
}
assertReadAt(0, nil, 10, "0123456789")
assertReadAt(5, nil, 10, "5678901234")
assertReadAt(int64(n1+n2)-5, io.EOF, 5, "56789\x00\x00\x00\x00\x00")
}
// TestAddOneByte ensures that whether encrypted or not, when reading data,
// both the current block and the following block have errors.
func TestAddOneByte(t *testing.T) {
t.Run("unencrypted", func(t *testing.T) {
testAddOneByte(t, false)
})
t.Run("encrypted", func(t *testing.T) {
testAddOneByte(t, true)
})
}
func testAddOneByte(t *testing.T, encrypt bool) {
f := newFakeFile()
insertPos := 5000
fc := func(b []byte, offset int) []byte {
if offset < insertPos && offset+len(b) >= insertPos {
pos := insertPos - offset
b = append(append(b[:pos], 0), b[pos:]...)
}
return b
}
ctrCipher, done := assertUnderlyingWrite(t, encrypt, f, fc)
if done {
return
}
for i := 0; ; i++ {
err := underlyingReadAt(f, encrypt, ctrCipher, 10, i*1000)
if err == io.EOF {
break
}
if i < 5 {
require.NoError(t, err)
} else {
require.ErrorIs(t, err, errChecksumFail)
}
}
}
// TestDeleteOneByte ensures that whether encrypted or not, when reading data,
// both the current block and the following block have errors.
func TestDeleteOneByte(t *testing.T) {
t.Run("unencrypted", func(t *testing.T) {
testDeleteOneByte(t, false)
})
t.Run("encrypted", func(t *testing.T) {
testDeleteOneByte(t, true)
})
}
func testDeleteOneByte(t *testing.T, encrypt bool) {
f := newFakeFile()
deletePos := 5000
fc := func(b []byte, offset int) []byte {
if offset < deletePos && offset+len(b) >= deletePos {
pos := deletePos - offset
b = append(b[:pos-1], b[pos:]...)
}
return b
}
ctrCipher, done := assertUnderlyingWrite(t, encrypt, f, fc)
if done {
return
}
for i := 0; ; i++ {
err := underlyingReadAt(f, encrypt, ctrCipher, 10, i*1000)
if err == io.EOF {
break
}
if i < 5 {
require.NoError(t, err)
} else {
require.ErrorIs(t, err, errChecksumFail)
}
}
}
// TestModifyOneByte ensures that whether encrypted or not, when reading data,
// only the current block has error.
func TestModifyOneByte(t *testing.T) {
t.Run("unencrypted", func(t *testing.T) {
testModifyOneByte(t, false)
})
t.Run("encrypted", func(t *testing.T) {
testModifyOneByte(t, true)
})
}
func testModifyOneByte(t *testing.T, encrypt bool) {
f := newFakeFile()
modifyPos := 5000
fc := func(b []byte, offset int) []byte {
if offset < modifyPos && offset+len(b) >= modifyPos {
pos := modifyPos - offset
b[pos-1] = b[pos-1] - 1
}
return b
}
ctrCipher, done := assertUnderlyingWrite(t, encrypt, f, fc)
if done {
return
}
for i := 0; ; i++ {
err := underlyingReadAt(f, encrypt, ctrCipher, 10, i*1000)
if err == io.EOF {
break
}
if i != 5 {
require.NoError(t, err)
} else {
require.ErrorIs(t, err, errChecksumFail)
}
}
}
// TestReadEmptyFile ensures that whether encrypted or not, no error will occur.
func TestReadEmptyFile(t *testing.T) {
t.Run("unencrypted", func(t *testing.T) {
testReadEmptyFile(t, false)
})
t.Run("encrypted", func(t *testing.T) {
testReadEmptyFile(t, true)
})
}
func testReadEmptyFile(t *testing.T, encrypt bool) {
f := newFakeFile()
var err error
var ctrCipher *encrypt2.CtrCipher
if encrypt {
ctrCipher, err = encrypt2.NewCtrCipher()
if err != nil {
return
}
}
for i := 0; i <= 10; i++ {
var underlying io.ReaderAt = f
if encrypt {
underlying = encrypt2.NewReader(underlying, ctrCipher)
}
underlying = NewReader(underlying)
r := make([]byte, 10)
_, err := underlying.ReadAt(r, int64(i*1020))
require.ErrorIs(t, err, io.EOF)
}
}
// TestModifyThreeBytes ensures whether encrypted or not, when reading data,
// only the current block has error.
func TestModifyThreeBytes(t *testing.T) {
t.Run("unencrypted", func(t *testing.T) {
testModifyThreeBytes(t, false)
})
t.Run("encrypted", func(t *testing.T) {
testModifyThreeBytes(t, true)
})
}
func testModifyThreeBytes(t *testing.T, encrypt bool) {
f := newFakeFile()
modifyPos := 5000
fc := func(b []byte, offset int) []byte {
if offset < modifyPos && offset+len(b) >= modifyPos {
// modify 3 bytes
if len(b) == 1024 {
b[200] = b[200] - 1
b[300] = b[300] - 1
b[400] = b[400] - 1
}
}
return b
}
ctrCipher, done := assertUnderlyingWrite(t, encrypt, f, fc)
if done {
return
}
for i := 0; ; i++ {
err := underlyingReadAt(f, encrypt, ctrCipher, 10, i*1000)
if err == io.EOF {
break
}
if i != 5 {
require.NoError(t, err)
} else {
require.ErrorIs(t, err, errChecksumFail)
}
}
}
// TestReadDifferentBlockSize ensures whether encrypted or not,
// the result is right for cases:
// 1. Read blocks using offset at once
// 2. Read all data at once.
func TestReadDifferentBlockSize(t *testing.T) {
t.Run("unencrypted", func(t *testing.T) {
testReadDifferentBlockSize(t, false)
})
t.Run("encrypted", func(t *testing.T) {
testReadDifferentBlockSize(t, true)
})
}
func testReadDifferentBlockSize(t *testing.T, encrypt bool) {
f := newFakeFile()
var err error
var underlying io.WriteCloser = f
var ctrCipher *encrypt2.CtrCipher
if encrypt {
ctrCipher, err = encrypt2.NewCtrCipher()
if err != nil {
return
}
underlying = encrypt2.NewWriter(underlying, ctrCipher)
}
underlying = NewWriter(underlying)
w := newTestBuff("0123456789", 510)
_, err = underlying.Write(w.Bytes())
require.NoError(t, err)
_, err = underlying.Write(w.Bytes())
require.NoError(t, err)
err = underlying.Close()
require.NoError(t, err)
assertReadAt := assertReadAtFunc(t, encrypt, ctrCipher)
// 2000-3000, across 2 blocks
assertReadAt(2000, make([]byte, 1000), nil, 1000, strings.Repeat("0123456789", 100), f)
// 3005-6005, across 4 blocks
assertReadAt(3005, make([]byte, 3000), nil, 3000, strings.Repeat("5678901234", 300), f)
// 10000-10200, not eof
assertReadAt(10000, make([]byte, 200), nil, 200, strings.Repeat("0123456789", 20), f)
// 10000-10200, eof
assertReadAt(10000, make([]byte, 201), io.EOF, 200, strings.Join([]string{strings.Repeat("0123456789", 20), "\x00"}, ""), f)
// 5000-10200, not eof
assertReadAt(5000, make([]byte, 5200), nil, 5200, strings.Repeat("0123456789", 520), f)
// 5000-10200, eof
assertReadAt(5000, make([]byte, 6000), io.EOF, 5200, strings.Join([]string{strings.Repeat("0123456789", 520), strings.Repeat("\x00", 800)}, ""), f)
// 0-10200, not eof
assertReadAt(0, make([]byte, 10200), nil, 10200, strings.Repeat("0123456789", 1020), f)
// 0-10200, eof
assertReadAt(0, make([]byte, 11000), io.EOF, 10200, strings.Join([]string{strings.Repeat("0123456789", 1020), strings.Repeat("\x00", 800)}, ""), f)
}
// TestWriteDifferentBlockSize ensures whether encrypted or not, after writing data,
// it can read data correctly for cases:
// 1. Write some block at once.
// 2. Write some block and append some block.
func TestWriteDifferentBlockSize(t *testing.T) {
t.Run("unencrypted", func(t *testing.T) {
testWriteDifferentBlockSize(t, false)
})
t.Run("encrypted", func(t *testing.T) {
testWriteDifferentBlockSize(t, true)
})
}
func testWriteDifferentBlockSize(t *testing.T, encrypt bool) {
f1 := newFakeFile()
f2 := newFakeFile()
var err error
w := newTestBuff("0123456789", 510)
w.Write(w.Bytes())
var ctrCipher *encrypt2.CtrCipher
if encrypt {
ctrCipher, err = encrypt2.NewCtrCipher()
if err != nil {
return
}
}
var underlying1 io.WriteCloser = f1
var underlying2 io.WriteCloser = f2
if encrypt {
underlying1 = encrypt2.NewWriter(underlying1, ctrCipher)
underlying2 = encrypt2.NewWriter(underlying2, ctrCipher)
}
underlying1 = NewWriter(underlying1)
underlying2 = NewWriter(underlying2)
// Write all data.
_, err = underlying1.Write(w.Bytes())
require.NoError(t, err)
err = underlying1.Close()
require.NoError(t, err)
// Write data by 100 bytes one batch.
lastPos := 0
for i := 100; ; i += 100 {
if i >= len(w.Bytes()) {
_, err = underlying2.Write(w.Bytes()[lastPos:])
require.NoError(t, err)
break
}
_, err = underlying2.Write(w.Bytes()[lastPos:i])
require.NoError(t, err)
lastPos = i
}
err = underlying2.Close()
require.NoError(t, err)
// check two files is same
require.EqualValues(t, f1.buf.Bytes(), f2.buf.Bytes())
// check data
assertReadAt := assertReadAtFunc(t, encrypt, ctrCipher)
assertReadAt(0, make([]byte, 10200), nil, 10200, strings.Repeat("0123456789", 1020), f1)
assertReadAt(0, make([]byte, 10200), nil, 10200, strings.Repeat("0123456789", 1020), f2)
}
func TestChecksumWriter(t *testing.T) {
f := newFakeFile()
buf := newTestBuff("0123456789", 100)
// Write 1000 bytes and flush.
w := NewWriter(f)
n, err := w.Write(buf.Bytes())
require.NoError(t, err)
require.Equal(t, 1000, n)
err = w.Flush()
require.NoError(t, err)
checkFlushedData(t, f, 0, 1000, 1000, nil, buf.Bytes())
// All data flushed, so no data in cache.
cacheOff := w.GetCacheDataOffset()
require.Equal(t, int64(1000), cacheOff)
}
func TestChecksumWriterAutoFlush(t *testing.T) {
f := newFakeFile()
buf := newTestBuff("0123456789", 102)
w := NewWriter(f)
n, err := w.Write(buf.Bytes())
require.NoError(t, err)
require.Equal(t, len(buf.Bytes()), n)
// This write will trigger flush.
n, err = w.Write([]byte("0"))
require.NoError(t, err)
require.Equal(t, 1, n)
checkFlushedData(t, f, 0, 1020, 1020, nil, buf.Bytes())
cacheOff := w.GetCacheDataOffset()
require.Equal(t, int64(len(buf.Bytes())), cacheOff)
}
func newTestBuff(str string, n int) *bytes.Buffer {
buf := bytes.NewBuffer(nil)
testData := str
for i := 0; i < n; i++ {
buf.WriteString(testData)
}
return buf
}
type mockWriter struct {
err error
w io.WriteCloser
f func(b []byte, offset int) []byte
offset int
}
func newMockWriter(w io.WriteCloser, f func(b []byte, offset int) []byte) *mockWriter {
return &mockWriter{w: w, f: f}
}
func (w *mockWriter) Write(p []byte) (n int, err error) {
// always write successfully.
n = len(p)
if w.f != nil {
p = w.f(p, w.offset)
}
nn, err := w.w.Write(p)
if err != nil {
return n, err
}
w.offset += nn
return n, err
}
func (w *mockWriter) Close() (err error) {
if w.err != nil {
return w.err
}
return w.w.Close()
}
func assertUnderlyingWrite(t *testing.T, encrypt bool, f io.WriteCloser, fc func(b []byte, offset int) []byte) (*encrypt2.CtrCipher, bool) {
var underlying io.WriteCloser = newMockWriter(f, fc)
var ctrCipher *encrypt2.CtrCipher
var err error
if encrypt {
ctrCipher, err = encrypt2.NewCtrCipher()
if err != nil {
return nil, true
}
underlying = encrypt2.NewWriter(underlying, ctrCipher)
}
underlying = NewWriter(underlying)
w := newTestBuff("0123456789", 510)
_, err = underlying.Write(w.Bytes())
require.NoError(t, err)
_, err = underlying.Write(w.Bytes())
require.NoError(t, err)
err = underlying.Close()
require.NoError(t, err)
return ctrCipher, false
}
func underlyingReadAt(f io.ReaderAt, encrypt bool, ctrCipher *encrypt2.CtrCipher, n, off int) error {
var underlying = f
if encrypt {
underlying = encrypt2.NewReader(underlying, ctrCipher)
}
underlying = NewReader(underlying)
r := make([]byte, n)
_, err := underlying.ReadAt(r, int64(off))
return err
}
func assertReadAtFunc(t *testing.T, encrypt bool, ctrCipher *encrypt2.CtrCipher) func(off int64, r []byte, assertErr error, assertN int, assertString string, f io.ReaderAt) {
return func(off int64, r []byte, assertErr error, assertN int, assertString string, f io.ReaderAt) {
var underlying = f
if encrypt {
underlying = encrypt2.NewReader(underlying, ctrCipher)
}
underlying = NewReader(underlying)
n, err := underlying.ReadAt(r, off)
require.ErrorIs(t, err, assertErr)
require.Equal(t, assertN, n)
require.Equal(t, assertString, string(r))
}
}
var checkFlushedData = func(t *testing.T, f io.ReaderAt, off int64, readBufLen int, assertN int, assertErr error, assertRes []byte) {
readBuf := make([]byte, readBufLen)
r := NewReader(f)
n, err := r.ReadAt(readBuf, off)
require.ErrorIs(t, err, assertErr)
require.Equal(t, assertN, n)
require.Equal(t, 0, bytes.Compare(readBuf, assertRes))
}
func newFakeFile() *fakeFile {
return &fakeFile{buf: bytes.NewBuffer(nil)}
}
type fakeFile struct {
buf *bytes.Buffer
}
func (f *fakeFile) Write(p []byte) (n int, err error) {
return f.buf.Write(p)
}
func (f *fakeFile) Close() error {
return nil
}
func (f *fakeFile) ReadAt(p []byte, off int64) (n int, err error) {
w := f.buf.Bytes()
lw := int64(len(w))
if off > lw {
return 0, io.EOF
}
lc := copy(p, w[off:])
if int64(lc) == lw-off {
return lc, io.EOF
}
return lc, nil
}
|
package db
import (
_ "github.com/mattn/go-sqlite3"
"github.com/stretchr/testify/assert"
)
func (s *StoreSuite) TestGetHarvestablePlant() {
s.store.CreatePlantTypes()
s.store.CreateModuleWithPlants(1, "Basil")
s.store.CreateModuleWithPlants(2, "Lettuce")
inputs := []*PlantType{{Name: "Basil"}, {Name: "Lettuce"}}
expectedOutcome := []*PositionOnFarm{{3, 1}, {6, 2}}
for i, element := range inputs {
harvestablePlant, err := s.store.GetHarvestablePlant(element)
if err != nil {
s.T().Fatal(err)
}
assert.Equal(s.T(), harvestablePlant, expectedOutcome[i])
}
}
func (s *StoreSuite) TestHarvestDone() {
s.store.CreatePlantTypes()
s.store.CreateModuleWithPlants(1, "Basil")
s.store.CreateModuleWithPlants(2, "Lettuce")
harvestDone, err := s.store.HarvestDone(&PositionOnFarm{6, 1})
if err != nil {
s.T().Fatal(err)
}
expected := &Status{"harvest done"}
assert.Equal(s.T(), harvestDone, expected)
plantablePlants, err := s.store.GetPlantsPerType("plantable")
if err != nil {
s.T().Fatal(err)
}
dbChange := []*PlantsPerPlantType{{"Basil", 1}, {"Lettuce", 0}}
assert.Equal(s.T(), plantablePlants, dbChange)
}
|
// generated by jsonenums -type=Privacy -suffix=_enum; DO NOT EDIT
package schema
import (
"encoding/json"
"fmt"
)
var (
_PrivacyNameToValue = map[string]Privacy{
"PrivacyPersonal": PrivacyPersonal,
"PrivacyPublic": PrivacyPublic,
"PrivacyPrivate": PrivacyPrivate,
"PrivacyProtected": PrivacyProtected,
"PrivacySecret": PrivacySecret,
}
_PrivacyValueToName = map[Privacy]string{
PrivacyPersonal: "PrivacyPersonal",
PrivacyPublic: "PrivacyPublic",
PrivacyPrivate: "PrivacyPrivate",
PrivacyProtected: "PrivacyProtected",
PrivacySecret: "PrivacySecret",
}
)
func init() {
var v Privacy
if _, ok := interface{}(v).(fmt.Stringer); ok {
_PrivacyNameToValue = map[string]Privacy{
interface{}(PrivacyPersonal).(fmt.Stringer).String(): PrivacyPersonal,
interface{}(PrivacyPublic).(fmt.Stringer).String(): PrivacyPublic,
interface{}(PrivacyPrivate).(fmt.Stringer).String(): PrivacyPrivate,
interface{}(PrivacyProtected).(fmt.Stringer).String(): PrivacyProtected,
interface{}(PrivacySecret).(fmt.Stringer).String(): PrivacySecret,
}
}
}
// MarshalJSON is generated so Privacy satisfies json.Marshaler.
func (r Privacy) MarshalJSON() ([]byte, error) {
if s, ok := interface{}(r).(fmt.Stringer); ok {
return json.Marshal(s.String())
}
s, ok := _PrivacyValueToName[r]
if !ok {
return nil, fmt.Errorf("invalid Privacy: %d", r)
}
return json.Marshal(s)
}
// UnmarshalJSON is generated so Privacy satisfies json.Unmarshaler.
func (r *Privacy) UnmarshalJSON(data []byte) error {
var s string
if err := json.Unmarshal(data, &s); err != nil {
return fmt.Errorf("Privacy should be a string, got %s", data)
}
v, ok := _PrivacyNameToValue[s]
if !ok {
return fmt.Errorf("invalid Privacy %q", s)
}
*r = v
return nil
}
|
package main
import "github.com/miguelhun/go-microservices/src/api/app"
func main() {
app.StartApplication()
}
|
package main
import (
"bytes"
"math"
"os"
"path/filepath"
"strings"
)
const baseStaticURL = "http://localhost:8080/v1/MyCloud/static/"
const baseFilesURL = "http://localhost:8080/v1/MyCloud/files/"
const clientsBaseDir = "/home/orestis/MyCloud"
func getPathFromURLParam(par string) string {
dirs := strings.Split(par, "_")
path := ""
for _, dirName := range dirs {
path = filepath.Join(path, dirName)
}
return path
}
func getFileIconLink(filename string, isDir bool) string {
fileIcons := map[string]string{
".pdf": "pdf.jpg",
".py": "python.jpg",
}
const defaultFileIcon = "file.jpg"
const directoryIcon = "folder.jpg"
var b bytes.Buffer
if isDir {
b.WriteString(baseStaticURL)
b.WriteString(directoryIcon)
return b.String()
}
extension := filepath.Ext(filename)
icon, hasKey := fileIcons[extension]
if !hasKey {
b.WriteString(baseStaticURL)
b.WriteString(defaultFileIcon)
return b.String()
}
b.WriteString(baseStaticURL)
b.WriteString(icon)
return b.String()
}
func getFileLink(filename string) string {
var b bytes.Buffer
b.WriteString(baseFilesURL)
b.WriteString(filename) // append
//fmt.Println(b.String()) // abcdef
return b.String()
}
//Return the size of path Directory in Gb
func getDirSize(path string) (float64, error) {
var size int64
err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
size += info.Size()
}
return err
})
sizeGB := float64(size) / 1024.0 / 1024.0 / 1024.0
return toFixed(sizeGB, 3), err
}
func round(num float64) int {
return int(num + math.Copysign(0.5, num))
}
func toFixed(num float64, precision int) float64 {
output := math.Pow(10, float64(precision))
return float64(round(num*output)) / output
}
|
package config_test
import (
"testing"
"github.com/mjpitz/highlander-proxy/internal/config"
"github.com/stretchr/testify/require"
)
func TestRouteSlice(t *testing.T) {
const goodA = "tcp://0.0.0.0:8080|tcp://localhost:8080"
const goodB = "tcp://0.0.0.0:8090|tcp://localhost:8090"
const bad = "invalid:/address/scheme"
routes := &config.RouteSlice{}
err := routes.Set(goodA)
require.Nil(t, err)
err = routes.Set(goodB)
require.Nil(t, err)
err = routes.Set(bad)
require.NotNil(t, err)
require.Equal(t, "route: missing pipe", err.Error())
slice := routes.Routes()
require.Len(t, slice, 2)
// entry 1
require.Equal(t, "tcp", slice[0].From().Scheme)
require.Equal(t, "0.0.0.0:8080", slice[0].From().Host)
require.Equal(t, "8080", slice[0].From().Port())
require.Equal(t, "tcp", slice[0].To().Scheme)
require.Equal(t, "localhost:8080", slice[0].To().Host)
require.Equal(t, "8080", slice[0].To().Port())
// entry 2
require.Equal(t, "tcp", slice[1].From().Scheme)
require.Equal(t, "0.0.0.0:8090", slice[1].From().Host)
require.Equal(t, "8090", slice[1].From().Port())
require.Equal(t, "tcp", slice[1].To().Scheme)
require.Equal(t, "localhost:8090", slice[1].To().Host)
require.Equal(t, "8090", slice[1].To().Port())
}
|
package main
import "fmt"
type Tree struct {
value int
left *Tree
right *Tree
}
func buildTree() Tree {
//return &Tree{4, Tree{2, Tree{1, nil, nil}, Tree{3, nil, nil}}, Tree{7, Tree{6, nil, nil}, Tree{9, nil, nil}}}
t := Tree{4, nil, nil}
t1 := Tree{2, nil, nil}
t2 := Tree{7, nil, nil}
t.left = &t1
t.right = &t2
t3 := Tree{1, nil, nil}
t4 := Tree{3, nil, nil}
t1.left = &t3
t1.right = &t4
t5 := Tree{6, nil, nil}
t6 := Tree{9, nil, nil}
t2.left = &t5
t2.right = &t6
return t
}
func traverseTree(t Tree) {
fmt.Println(t.value)
if t.left != nil {
traverseTree(*t.left)
}
if t.right != nil {
traverseTree(*t.right)
}
}
func reverseTree(t Tree) {
//fmt.Println(t.value)
fmt.Println("----------------")
if t.left != nil && t.right != nil {
tmp := t.left
t.left = t.right
t.right = tmp
fmt.Println(t)
fmt.Println(t.left)
fmt.Println(t.right)
reverseTree(*t.left)
reverseTree(*t.right)
}
}
func main() {
t := buildTree()
fmt.Println(t)
//fmt.Println(t)
//fmt.Println(*t.left)
reverseTree(t)
fmt.Println(t)
}
|
package interfaceDemo
import "fmt"
// 如果接口里面有方法的话,必须要通过结构体或自定义类型实现这个接口
// 使用结构体来实现 接口
type Phone struct {
Name string
}
// 手机要实现Usber接口的话,必须实现usb接口的所有方法
func (p Phone) start() {
fmt.Println(p.Name, "启动")
}
func (p Phone) stop() {
fmt.Println(p.Name, "关闭")
}
func main() {
var phone Usber = Phone{
"三星手机",
}
phone.start()
phone.stop()
}
|
package zlog
import (
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"os"
)
type CutConf struct {
FileName string
MaxSize int
MaxBackups int
MaxAge int
Compress bool
LocalTime bool
BufferSize int //单位为m
}
func ZapCut(c CutConf, z zapcore.EncoderConfig, l zap.AtomicLevel) zapcore.Core {
lum := &Logger{Filename: c.FileName, MaxSize: c.MaxSize, MaxBackups: c.MaxBackups, MaxAge: c.MaxAge, Compress: c.Compress, LocalTime: c.LocalTime, bufferSize: c.BufferSize}
zlvl := zap.LevelEnablerFunc(func(lvl zapcore.Level) bool {
return lvl >= l.Level()
})
_, err := os_Stat(lum.Filename)
if os.IsNotExist(err) {
lum.openNew()
}
code := zapcore.NewJSONEncoder(z)
return zapcore.NewCore(code, zapcore.AddSync(lum), zlvl)
}
|
package main
import (
"flag"
"log"
"time"
)
var configFile string
func init() {
flag.StringVar(&configFile, "configfile", "./config.toml", "file that will be parsed for configuration")
}
func main() {
flag.Parse()
config := readConfig()
CfVars := getCloudflareObjects(config)
log.Printf("Will check DNS record every %d minutes.\n", config.RefreshRate)
// Get zoneID
zoneID, err := getZoneID(config, CfVars)
if err != nil {
log.Fatal(err)
}
for {
// Will change first time run
curIp := "1.1.1.1"
// Get ext IP
tmpIp, err := get_ext_ip(config.ExtIpUrl)
if err != nil {
log.Panic(err)
}
if tmpIp != curIp {
curIp = tmpIp
// Populate struct that will be added as record to CF
subDomainRecord := createRecord(config, curIp, config.Subdomain)
// List DNS records that matches subdomain in config file.
dnsRecords, err := listDNSRecords(config, CfVars, zoneID, config.Subdomain)
if err != nil {
log.Println(err)
}
err = checkRecords(config, CfVars, zoneID, dnsRecords, subDomainRecord, curIp)
if err != nil {
log.Fatal(err)
}
}
time.Sleep(time.Duration(config.RefreshRate) * time.Minute)
}
}
|
package migrationfiles
import (
logging "github.com/ipfs/go-log"
"github.com/syndtr/goleveldb/leveldb"
)
var log = logging.Logger("migrate-files")
// Initial00 Does nothing
func Initial00(db *leveldb.DB) error {
log.Infof("00Initial Migration Run successfully")
return nil
}
|
package main
import (
"net/http"
"github.com/gin-gonic/gin"
)
func main() {
gin.SetMode(gin.ReleaseMode)
r := gin.Default() // router
r.LoadHTMLGlob("templates/*")
// http://localhost:8080/
r.GET("/", func(c *gin.Context) {
c.JSON(200, gin.H{
"message": "root",
})
})
// http://localhost:8080/ping
r.GET("/ping", func(c *gin.Context) {
c.JSON(200, gin.H{
"message": "pong",
})
})
// http://localhost:8080/ginny
r.GET("/ginny", func(c *gin.Context) {
c.HTML(
http.StatusOK,
"index.html",
gin.H{},
)
})
// http://localhost:8080/counting
count := 0
r.GET("/counting", func(c *gin.Context) {
count++
if count > 10 {
count = 0
}
c.HTML(
http.StatusOK,
"counting.html",
gin.H{"count": count},
)
})
r.Run() // listen and serve on 0.0.0.0:8080 (for windows "localhost:8080")
}
|
package dolbutil_test
import (
"math/rand"
"sync"
. "github.com/bryanl/dolb/dolbutil"
. "github.com/onsi/ginkgo"
)
var _ = Describe("Random", func() {
It("is concurrency safe", func() {
rnd := rand.New(NewSource())
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
wg.Add(1)
go func() {
rnd.Int63()
wg.Done()
}()
}
wg.Wait()
})
})
|
package main
import (
"fmt"
"time"
"github.com/shirou/gopsutil/cpu"
"github.com/shirou/gopsutil/mem"
)
func main() {
v, _ := mem.VirtualMemory()
fmt.Printf("Total: %v, Free:%v, UsedPercent:%f%%\n", v.Total, v.Free, v.UsedPercent)
percent, _ := cpu.Percent(time.Second, true)
fmt.Println(percent)
ccSay()
if !deadCode {
fmt.Println("i'm not dead")
}
}
|
package main
/*
* @lc app=leetcode id=109 lang=golang
*
* [109] Convert Sorted List to Binary Search Tree
*/
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
var dummy = &ListNode{9527, nil}
func sortedListToBST(head *ListNode) *TreeNode {
if head == nil {
return nil
}
dummy.Next = head
slow, fast := dummy, dummy
for fast.Next != nil && fast.Next.Next != nil {
slow = slow.Next
fast = fast.Next.Next
}
root := &TreeNode{slow.Next.Val, nil, nil}
head2 := slow.Next.Next
slow.Next = nil
head1 := dummy.Next
root.Left = sortedListToBST(head1)
root.Right = sortedListToBST(head2)
return root
}
|
// test-map project doc.go
/*
test-map document
*/
package main
|
// Code for running targets directly through Please.
package run
import (
"fmt"
"os"
"os/exec"
"strings"
"sync"
"syscall"
"golang.org/x/sync/errgroup"
"gopkg.in/op/go-logging.v1"
"build"
"core"
"output"
)
var log = logging.MustGetLogger("run")
// Run implements the running part of 'plz run'.
func Run(graph *core.BuildGraph, label core.BuildLabel, args []string) {
run(graph, label, args, false, false)
}
// Parallel runs a series of targets in parallel.
// Returns true if all were successful.
func Parallel(graph *core.BuildGraph, labels []core.BuildLabel, args []string, numTasks int, quiet bool) int {
pool := NewGoroutinePool(numTasks)
var g errgroup.Group
for _, label := range labels {
label := label // capture locally
g.Go(func() (err error) {
var wg sync.WaitGroup
wg.Add(1)
pool.Submit(func() {
if e := run(graph, label, args, true, quiet); e != nil {
err = e
}
wg.Done()
})
wg.Wait()
return
})
}
if err := g.Wait(); err != nil {
log.Error("Command failed: %s", err)
return err.(*exitError).code
}
return 0
}
// Sequential runs a series of targets sequentially.
// Returns true if all were successful.
func Sequential(graph *core.BuildGraph, labels []core.BuildLabel, args []string, quiet bool) int {
for _, label := range labels {
log.Notice("Running %s", label)
if err := run(graph, label, args, true, quiet); err != nil {
log.Error("%s", err)
return err.code
}
}
return 0
}
// run implements the internal logic about running a target.
// If fork is true then we fork to run the target and return any error from the subprocesses.
// If it's false this function never returns (because we either win or die; it's like
// Game of Thrones except rather less glamorous).
func run(graph *core.BuildGraph, label core.BuildLabel, args []string, fork, quiet bool) *exitError {
target := graph.TargetOrDie(label)
if !target.IsBinary {
log.Fatalf("Target %s cannot be run; it's not marked as binary", label)
}
// ReplaceSequences always quotes stuff in case it contains spaces or special characters,
// that works fine if we interpret it as a shell but not to pass it as an argument here.
arg0 := strings.Trim(build.ReplaceSequences(target, fmt.Sprintf("$(out_exe %s)", target.Label)), "\"")
// Handle targets where $(exe ...) returns something nontrivial
splitCmd := strings.Split(arg0, " ")
if !strings.Contains(splitCmd[0], "/") {
// Probably it's a java -jar, we need an absolute path to it.
cmd, err := exec.LookPath(splitCmd[0])
if err != nil {
log.Fatalf("Can't find binary %s", splitCmd[0])
}
splitCmd[0] = cmd
}
args = append(splitCmd, args...)
log.Info("Running target %s...", strings.Join(args, " "))
output.SetWindowTitle("plz run: " + strings.Join(args, " "))
if !fork {
// Plain 'plz run'. One way or another we never return from the following line.
must(syscall.Exec(splitCmd[0], args, os.Environ()), args)
}
// Run as a normal subcommand.
// Note that we don't connect stdin. It doesn't make sense for multiple processes.
cmd := core.ExecCommand(splitCmd[0], args[1:]...) // args here don't include argv[0]
if !quiet {
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
must(cmd.Start(), args)
err := cmd.Wait()
return toExitError(err, cmd, nil)
}
out, err := cmd.CombinedOutput()
return toExitError(err, cmd, out)
}
// must dies if the given error is non-nil.
func must(err error, cmd []string) {
if err != nil {
log.Fatalf("Error running command %s: %s", strings.Join(cmd, " "), err)
}
}
// toExitError attempts to extract the exit code from an error.
func toExitError(err error, cmd *exec.Cmd, out []byte) *exitError {
exitCode := 1
if err == nil {
return nil
} else if exitError, ok := err.(*exec.ExitError); ok {
// This is a little hairy; there isn't a good way of getting the exit code,
// but this should be reasonably portable (at least to the platforms we care about).
if status, ok := exitError.Sys().(syscall.WaitStatus); ok {
exitCode = status.ExitStatus()
}
}
return &exitError{
msg: fmt.Sprintf("Error running command %s: %s\n%s", strings.Join(cmd.Args, " "), err, string(out)),
code: exitCode,
}
}
type exitError struct {
msg string
code int
}
func (e *exitError) Error() string {
return e.msg
}
|
package main
import (
"fmt"
"time"
)
func display(ch chan int) {
time.Sleep(5 * time.Second)
fmt.Println("Inside display()")
ch <- 1234
}
func main() {
ch := make(chan int)
go display(ch)
x := <-ch
fmt.Println("Inside main()")
fmt.Println("Printing x in main() after taking from channel:", x)
}
|
package http
import (
"encoding/json"
"fmt"
"github.com/paddycakes/arranmore-api/internal/sensor"
"net/http"
)
import "github.com/gorilla/mux"
// Handler - stores pointer to metrics service
type Handler struct{
Router *mux.Router
Service *sensor.Service
}
// Response - an object to store responses from the api
type Response struct {
Message string
Error string
}
// NewHandler - returns a pointer to a Handler
func NewHandler(service *sensor.Service) *Handler {
return &Handler{
Service: service,
}
}
// SetupRoutes - sets up all routes for application
func (h *Handler) SetupRoutes() {
fmt.Println("Setting Up Routes")
h.Router = mux.NewRouter()
h.Router.HandleFunc("/api/{apiKey}/sensor/{deviceId}/metrics", h.GetSensorMetrics).Methods("GET")
h.Router.HandleFunc("/api/health", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.WriteHeader(http.StatusOK)
if err := json.NewEncoder(w).Encode(Response{Message: "I am alive!"}); err != nil {
panic(err)
}
})
}
// GetSensorMetrics - retrieve metrics for sensor ID
func (h *Handler) GetSensorMetrics(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
vars := mux.Vars(r)
apiKey := vars["apiKey"]
deviceId := vars["deviceId"]
//i, err := strconv.ParseUint(id, 10, 64)
//if err != nil {
// sendErrorResponse(w, "Unable to parse UInt from ID", err)
// return
//}
metrics, err := h.Service.GetMetrics(apiKey, deviceId)
if err != nil {
sendErrorResponse(w, "Error Retrieving Sensor Metrics for Client ID", err)
return
}
if err := json.NewEncoder(w).Encode(metrics); err != nil {
panic(err)
}
w.WriteHeader(http.StatusOK)
}
func sendErrorResponse(w http.ResponseWriter, message string, err error) {
w.WriteHeader(http.StatusInternalServerError)
if err := json.NewEncoder(w).Encode(Response{Message: message, Error: err.Error()}); err != nil {
panic(err)
}
} |
package blowcbc
import (
"crypto/blowfish"
)
func printBytes(pb []byte) {
for i := range pb {
print(pb[i], ",")
}
println("")
}
func padd(blocks []byte, bz byte) []byte {
// blocks length
bl := len(blocks)
p := bz - (byte(bl) % bz)
if p == 0 {
p = bz
}
padded := make([]byte, bl+int(p))
copy(padded, blocks)
for i := 0; i < int(p); i++ {
padded[bl+i] = byte(p)
}
return padded
}
func depadd(blocks []byte) []byte {
depadded := blocks[0 : len(blocks)-int(blocks[len(blocks)-1])]
return depadded
}
func CBCEncrypt(pt, key, iv []byte) []byte {
blow, _ := blowfish.NewCipher(key)
defer blow.Reset()
bz := blow.BlockSize()
padded := padd(pt, byte(bz))
encrypted := make([]byte, len(padded))
for i := 0; i < len(padded); i += bz {
for j := 0; j < bz; j++ {
padded[i+j] ^= iv[j]
}
blow.Encrypt(padded[i:i+bz], encrypted[i:i+bz])
iv = encrypted[i : i+bz]
}
return encrypted
}
func CBCDecrypt(ct, key, iv []byte) []byte {
blow, _ := blowfish.NewCipher(key)
defer blow.Reset()
bz := blow.BlockSize()
dc := make([]byte, len(ct))
for i := 0; i < len(ct); i += bz {
blow.Decrypt(ct[i:i+bz], dc[i:i+bz])
for j := 0; j < bz; j++ {
dc[i+j] ^= iv[j]
}
iv = ct[i : i+bz]
}
decrypted := depadd(dc)
return decrypted
}
|
package server
import (
"log"
"time"
"github.com/gomodule/redigo/redis"
)
var (
redisPool *redis.Pool
statIncrScript *redis.Script = redis.NewScript(3, `
local skey = KEYS[1]
local sfield = KEYS[2]
local sexp = KEYS[3]
redis.call('HMSET', skey, sfield, 0)
redis.call('EXPIREAT', skey, sexp)
return redis.call('HINCRBY', skey, sfield, 1)
`)
)
func statIncr(c redis.Conn, key string, field string) int64 {
var n, exp int64
var exists int
var err error
key = key + ":" + time.Now().Format("2006-01-02")
exists, err = redis.Int(c.Do("EXISTS", key))
if err != nil {
log.Printf("redis stat increment error (%s)", err.Error())
return 0
}
if exists < 1 {
exp = time.Now().AddDate(0, 1, 0).Unix()
n, err = redis.Int64(statIncrScript.Do(c, key, field, exp))
if err != nil {
log.Printf("redis stat increment error (%s)", err.Error())
}
return n
}
n, err = redis.Int64(c.Do("HINCRBY", key, field, 1))
if err != nil {
log.Printf("redis stat increment error (%s)", err.Error())
}
return n
}
|
/*
Copyright (c) 2017-2018 Simon Schmidt
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package remote
import "time"
import "github.com/valyala/fasthttp"
import "github.com/maxymania/fastnntp-polyglot-labs/bucketstore"
import "github.com/maxymania/fastnntp-polyglot/buffer"
import "github.com/maxymania/fastnntp-polyglot-labs/bufferex"
import "github.com/maxymania/fastnntp-polyglot-labs/binarix"
import "fmt"
import "github.com/maxymania/fastnntp-polyglot-labs/bucketstore/degrader"
type HttpClient interface{
DoDeadline(req *fasthttp.Request, resp *fasthttp.Response, deadline time.Time) error
}
func required(req *fasthttp.Request) {
req.SetHost("unknown")
}
type Client struct{
client HttpClient
uuid []byte
degr degrader.Degrader
}
func NewClient(c HttpClient, uuid []byte) *Client {
return &Client{c,uuid,degrader.Degrader{}}
}
func (c *Client) setUrl(req *fasthttp.Request, id []byte, date []byte) {
encl := codec.EncodedLen(len(id))
n := 1+len(c.uuid)+1+encl+1+len(date)
buf := buffer.Get(n)
defer buffer.Put(buf)
uri := append(append(append((*buf)[:0],'/'),c.uuid...),'/')
uri = uri[:len(uri)+encl]
codec.Encode(uri[len(uri)-encl:],id)
if len(date)>0 { uri = append(append(uri,'/'),date...) }
req.SetRequestURIBytes(uri)
}
func (c *Client) Put(id, overv, head, body []byte, expire time.Time) error {
var buf [32]byte
var numbuf [16]byte
req := fasthttp.AcquireRequest()
resp := fasthttp.AcquireResponse()
c.setUrl(req,id,expire.AppendFormat(buf[:0],URLDate))
req.Header.SetMethod("PUT")
req.Header.SetBytesV("X-Over",binarix.Itoa(int64(len(overv)),numbuf[:0]))
req.Header.SetBytesV("X-Head",binarix.Itoa(int64(len(head )),numbuf[:0]))
req.Header.SetBytesV("X-Body",binarix.Itoa(int64(len(body )),numbuf[:0]))
req.AppendBody(overv)
req.AppendBody(head)
req.AppendBody(body)
required(req)
err := c.client.DoDeadline(req,resp,time.Now().Add(time.Second))
if err!=nil { return err }
fasthttp.ReleaseRequest(req)
defer fasthttp.ReleaseResponse(resp)
switch resp.StatusCode() {
case fasthttp.StatusBadRequest: return bucketstore.EBadRequest
case fasthttp.StatusCreated: return nil
case fasthttp.StatusConflict: return bucketstore.EExists
case fasthttp.StatusInsufficientStorage: return bucketstore.EOutOfStorage
case fasthttp.StatusNotFound: return bucketstore.ENoBucket
case statusTemporaryFailure: return bucketstore.ETemporaryFailure
default: return bucketstore.EDiskFailure
}
panic("unreachable")
}
func (c *Client) Get(id []byte, overv, head, body *bufferex.Binary) (ok bool,err error) {
var buf [1]byte
req := fasthttp.AcquireRequest()
resp := fasthttp.AcquireResponse()
if overv!= nil { buf[0]|=1 }
if head != nil { buf[0]|=2 }
if body != nil { buf[0]|=4 }
buf[0] += '0'
c.setUrl(req,id,buf[:])
req.Header.SetMethod("GET")
required(req)
err = c.client.DoDeadline(req,resp,time.Now().Add(time.Second))
if err!=nil { return }
fasthttp.ReleaseRequest(req)
defer fasthttp.ReleaseResponse(resp)
overl := binarix.Atoi(resp.Header.Peek("X-Over"))
headl := binarix.Atoi(resp.Header.Peek("X-Head"))
bodyl := binarix.Atoi(resp.Header.Peek("X-Body"))
if overv!=nil && overl==0 { return }
if head !=nil && headl==0 { return }
if body !=nil && bodyl==0 { return }
rdata := resp.Body()
if int64(len(rdata))!=(overl+headl+bodyl) {
return
}
if overv!=nil { *overv = bufferex.NewBinary(rdata[:overl]) }
rdata = rdata[overl:]
if head!=nil { *head = bufferex.NewBinary(rdata[:headl]) }
rdata = rdata[headl:]
if body!=nil { *body = bufferex.NewBinary(rdata) }
ok = true
return
}
func (c *Client) Expire(expire time.Time) error {
var buf [32]byte
req := fasthttp.AcquireRequest()
resp := fasthttp.AcquireResponse()
req.Header.SetMethod("DELETE")
req.SetRequestURI(fmt.Sprintf("/%s/%s",c.uuid,expire.AppendFormat(buf[:0],URLDate)))
required(req)
err := c.client.DoDeadline(req,resp,time.Now().Add(time.Second))
if err!=nil { return err }
fasthttp.ReleaseRequest(req)
defer fasthttp.ReleaseResponse(resp)
if resp.StatusCode()!=fasthttp.StatusNoContent { return bucketstore.EDiskFailure }
return nil
}
func (c *Client) FreeStorage() (int64,error) {
req := fasthttp.AcquireRequest()
resp := fasthttp.AcquireResponse()
req.Header.SetMethod("HEAD")
req.SetRequestURI(fmt.Sprintf("/%s",c.uuid))
required(req)
err := c.client.DoDeadline(req,resp,time.Now().Add(time.Second))
if err!=nil { return 0,err }
fasthttp.ReleaseRequest(req)
defer fasthttp.ReleaseResponse(resp)
if resp.StatusCode()!=fasthttp.StatusNoContent { return 0,bucketstore.EDiskFailure }
return binarix.Atoi(resp.Header.Peek("X-Free-Storage")),nil
}
type MultiClient struct{
client HttpClient
}
func NewMultiClient(c HttpClient) *MultiClient {
return &MultiClient{c}
}
func (m *MultiClient) Submit(id, overv, head, body []byte, expire time.Time) (bucket bufferex.Binary,err error) {
const prefix = "/api.submit/"
var numbuf [16]byte
var buf [32]byte
req := fasthttp.AcquireRequest()
resp := fasthttp.AcquireResponse()
// Do it the inefficient way!
req.SetRequestURI(fmt.Sprintf("/api.submit/%s/%s",codec.EncodeToString(id),expire.AppendFormat(buf[:0],URLDate)))
req.Header.SetMethod("POST")
req.Header.SetBytesV("X-Over",binarix.Itoa(int64(len(overv)),numbuf[:0]))
req.Header.SetBytesV("X-Head",binarix.Itoa(int64(len(head )),numbuf[:0]))
req.Header.SetBytesV("X-Body",binarix.Itoa(int64(len(body )),numbuf[:0]))
req.AppendBody(overv)
req.AppendBody(head)
req.AppendBody(body)
required(req)
err = m.client.DoDeadline(req,resp,time.Now().Add(time.Second))
if err!=nil { return }
fasthttp.ReleaseRequest(req)
defer fasthttp.ReleaseResponse(resp)
switch resp.StatusCode() {
case fasthttp.StatusBadRequest: err = bucketstore.EBadRequest
case fasthttp.StatusCreated: err = nil
case fasthttp.StatusConflict: err = bucketstore.EExists
case fasthttp.StatusInsufficientStorage: err = bucketstore.EOutOfStorage
case fasthttp.StatusNotFound: err = bucketstore.ENoBucket
case statusTemporaryFailure: err = bucketstore.ETemporaryFailure
default: err = bucketstore.EDiskFailure
}
if err==nil {
bucket = bufferex.NewBinary(resp.Header.Peek("X-Bucket"))
}
return
}
func (m *MultiClient) OverPut(bucket []byte, id, overv, head, body []byte, expire time.Time) error {
c := Client{m.client,bucket,degrader.Degrader{}}
return c.Put(id,overv,head,body,expire)
}
func (m *MultiClient) OverGet(bucket []byte, id []byte, overv, head, body *bufferex.Binary) (ok bool,e error) {
c := Client{m.client,bucket,degrader.Degrader{}}
return c.Get(id,overv,head,body)
}
func (m *MultiClient) OverExpire(bucket []byte, expire time.Time) error {
c := Client{m.client,bucket,degrader.Degrader{}}
return c.Expire(expire)
}
func (m *MultiClient) OverFreeStorage(bucket []byte) (int64,error) {
c := Client{m.client,bucket,degrader.Degrader{}}
return c.FreeStorage()
}
|
/*
* Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package persistence
import (
"fmt"
"time"
"github.com/aws-samples/reactive-refarch-cloudformation/services/kinesis-consumer/model"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
"github.com/nu7hatch/gouuid"
)
// PersistData is a function to persist data in DynamoDB
func PersistData(ddb dynamodb.DynamoDB, tablename string, msg model.Message) (string, error) {
uuid, err := uuid.NewV4()
if err != nil {
fmt.Printf("Failed creating UUID %s", err)
} else {
fmt.Printf("Created UUID %s", uuid)
}
timestamp := time.Now().Format(time.StampMicro)
msg.ID = uuid.String()
msg.UpdatedAt = timestamp
av, err := dynamodbattribute.MarshalMap(msg)
if err != nil {
fmt.Println("Got error marshalling map:")
fmt.Println(err.Error())
}
input := &dynamodb.PutItemInput{
Item: av,
TableName: aws.String(tablename),
}
_, err = ddb.PutItem(input)
if err != nil {
fmt.Println("Got error calling PutItem:")
fmt.Println(err.Error())
}
return msg.ID, err
}
|
package main
import (
"bufio"
"fmt"
"net/http"
"github.com/Symantec/Dominator/lib/html"
)
type adminDashboardType struct {
htmlWriter html.HtmlWriter
}
func newAdminDashboard(htmlWriter html.HtmlWriter) *adminDashboardType {
return &adminDashboardType{
htmlWriter: htmlWriter,
}
}
func (dashboard *adminDashboardType) ServeHTTP(w http.ResponseWriter,
req *http.Request) {
if req.URL.Path != "/" {
http.NotFound(w, req)
return
}
writer := bufio.NewWriter(w)
defer writer.Flush()
fmt.Fprintln(writer, "<title>keymaster status page</title>")
fmt.Fprintln(writer, "<body>")
fmt.Fprintln(writer, "<center>")
fmt.Fprintln(writer, "<h1>keymaster status page</h1>")
fmt.Fprintln(writer, "</center>")
html.WriteHeaderWithRequest(writer, req)
fmt.Fprintln(writer, "<h3>")
dashboard.htmlWriter.WriteHtml(writer)
fmt.Fprintln(writer, "</h3>")
fmt.Fprintln(writer, "<hr>")
html.WriteFooter(writer)
fmt.Fprintln(writer, "</body>")
}
|
package point
import (
"fmt"
"math"
"sort"
)
// Point ...
type Point struct {
X float64 `json:"x"`
Y float64 `json:"y"`
}
type sortable struct {
pts []Point
less func(Point, Point) bool
}
// Sort ...
func Sort(pts []Point, less func(Point, Point) bool) {
s := sortable{pts, less}
sort.Sort(s)
}
// Find ...
func Find(pts []Point, f func(Point) bool) int {
if f == nil {
return -1
}
for i, pt := range pts {
if f(pt) {
return i
}
}
return -1
}
// Count ...
func Count(pts []Point, f func(Point) bool) int {
if f == nil {
return 0
}
cnt := 0
for _, pt := range pts {
if f(pt) {
cnt++
}
}
return cnt
}
// Distance ...
func Distance(a, b Point) float64 {
return math.Sqrt((a.X-b.X)*(a.X-b.X) + (a.Y-b.Y)*(a.Y-b.Y))
}
func (s sortable) Len() int {
return len(s.pts)
}
func (s sortable) Swap(i, j int) {
s.pts[i], s.pts[j] = s.pts[j], s.pts[i]
}
func (s sortable) Less(i, j int) bool {
if s.less == nil {
return false
}
return s.less(s.pts[i], s.pts[j])
}
func (p Point) String() string {
return fmt.Sprintf("(%.2f, %.2f)", p.X, p.Y)
}
|
package metadata
import (
"strconv"
"encoding/json"
"incognito-chain/common"
metadataCommon "incognito-chain/metadata/common"
)
// PDEWithdrawalRequest - privacy dex withdrawal request
type PDEWithdrawalRequest struct {
WithdrawerAddressStr string
WithdrawalToken1IDStr string
WithdrawalToken2IDStr string
WithdrawalShareAmt uint64
MetadataBaseWithSignature
}
type PDEWithdrawalRequestAction struct {
Meta PDEWithdrawalRequest
TxReqID common.Hash
ShardID byte
}
type PDEWithdrawalAcceptedContent struct {
WithdrawalTokenIDStr string
WithdrawerAddressStr string
DeductingPoolValue uint64
DeductingShares uint64
PairToken1IDStr string
PairToken2IDStr string
TxReqID common.Hash
ShardID byte
}
func NewPDEWithdrawalRequest(
withdrawerAddressStr string,
withdrawalToken1IDStr string,
withdrawalToken2IDStr string,
withdrawalShareAmt uint64,
metaType int,
) (*PDEWithdrawalRequest, error) {
metadataBase := NewMetadataBaseWithSignature(metaType)
pdeWithdrawalRequest := &PDEWithdrawalRequest{
WithdrawerAddressStr: withdrawerAddressStr,
WithdrawalToken1IDStr: withdrawalToken1IDStr,
WithdrawalToken2IDStr: withdrawalToken2IDStr,
WithdrawalShareAmt: withdrawalShareAmt,
}
pdeWithdrawalRequest.MetadataBaseWithSignature = *metadataBase
return pdeWithdrawalRequest, nil
}
func (pc PDEWithdrawalRequest) Hash() *common.Hash {
record := pc.MetadataBase.Hash().String()
record += pc.WithdrawerAddressStr
record += pc.WithdrawalToken1IDStr
record += pc.WithdrawalToken2IDStr
record += strconv.FormatUint(pc.WithdrawalShareAmt, 10)
if pc.Sig != nil && len(pc.Sig) != 0 {
record += string(pc.Sig)
}
// final hash
hash := common.HashH([]byte(record))
return &hash
}
func (pc PDEWithdrawalRequest) HashWithoutSig() *common.Hash {
record := pc.MetadataBase.Hash().String()
record += pc.WithdrawerAddressStr
record += pc.WithdrawalToken1IDStr
record += pc.WithdrawalToken2IDStr
record += strconv.FormatUint(pc.WithdrawalShareAmt, 10)
// final hash
hash := common.HashH([]byte(record))
return &hash
}
func (pc *PDEWithdrawalRequest) UnmarshalJSON(raw []byte) error{
var temp struct{
WithdrawerAddressStr string
WithdrawalToken1IDStr string
WithdrawalToken2IDStr string
WithdrawalShareAmt metadataCommon.Uint64Reader
MetadataBaseWithSignature
}
err := json.Unmarshal(raw, &temp)
*pc = PDEWithdrawalRequest{
WithdrawerAddressStr: temp.WithdrawerAddressStr,
WithdrawalToken1IDStr: temp.WithdrawalToken1IDStr,
WithdrawalToken2IDStr: temp.WithdrawalToken2IDStr,
WithdrawalShareAmt: uint64(temp.WithdrawalShareAmt),
MetadataBaseWithSignature: temp.MetadataBaseWithSignature,
}
return err
}
|
package controller
import (
"encoding/json"
"fmt"
"log"
"net/http"
"github.com/oceanpkg/ocean-backend/internal/auth"
"github.com/oceanpkg/ocean-backend/internal/database"
"github.com/oceanpkg/ocean-backend/internal/util"
)
//UserData is a struct that defines the data format for the post request for a new user CHANGE TO FORM DATA
type UserData struct {
Email string `json:"email" schema:"email,required"`
Username string `json:"username" schema:"username,required"`
FirstName string `json:"firstName" schema:"firstname,required"`
LastName string `json:"lastName" schema:"lastname"`
Password string `json:"password" schema:"password,required"`
}
// CompleteUser is the handler for completing a user registration after signing in through google/github
func CompleteUser(w http.ResponseWriter, r *http.Request) {
c, err := r.Cookie("oauth_create")
if err == http.ErrNoCookie {
log.Printf("No cookie found")
w.WriteHeader(http.StatusUnauthorized)
return
} else if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
claims, err := auth.VerifyJWTOauth(c.Value)
if err == auth.ErrInvalidToken {
log.Printf("Invalid Token")
w.WriteHeader(http.StatusUnauthorized)
return
} else if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
// Parse additional information like password, etc and update user
w.Write([]byte(fmt.Sprintf("Welcome %s!", claims.UserID)))
}
// CreateUser is a rest endpoint for creating a new user
func CreateUser(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
var data UserData
err = decoder.Decode(&data, r.PostForm)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
// validate data
if !isValidCreateData(&data) {
w.WriteHeader(http.StatusBadRequest)
return
}
//check if duplicate email or username
var duplicateUser database.User
errEmail := database.DB.Where(&database.User{Email: data.Email}).First(&duplicateUser).Error
errUsername := database.DB.Where(&database.User{Username: data.Username}).First(&duplicateUser).Error
if errEmail == nil || errUsername == nil {
w.WriteHeader(http.StatusConflict)
return
}
//salt and hash password
passwordHash, err := util.GenHash(data.Password)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
user := &database.User{
Email: data.Email,
Username: data.Username,
FirstName: data.FirstName,
LastName: data.LastName,
PasswordHash: passwordHash,
}
//handle error chaining here
database.DB.Create(user)
w.Write([]byte("User succesfully created"))
}
// Login is the endpoint for logging in
func Login(w http.ResponseWriter, r *http.Request, c *auth.Credentials) bool {
log.Println("Login request with c =", c)
if !isValidLoginData(c) {
w.WriteHeader(http.StatusBadRequest)
return false
}
//get user by email
var user database.User
err := database.DB.Where(&database.User{Username: c.Username}).First(&user).Error
log.Println("Attempted login, database found: ", err, user)
//check if this is not null
if err != nil {
w.WriteHeader(http.StatusUnauthorized)
return false
}
//validate password
if !util.VerifyPassword(user.PasswordHash, c.Password) {
log.Println("Invalid Password")
w.WriteHeader(http.StatusUnauthorized)
return false
}
_, err = auth.JWTGen(w, r, c, user.ID, user.Email)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return false
}
return true
}
// LoginFrontEndWrapper is the endpoint for logging in from the front end
func LoginFrontEndWrapper(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
var c auth.Credentials
err = decoder.Decode(&c, r.PostForm)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
if Login(w, r, &c) {
http.Redirect(w, r, "/static/", http.StatusPermanentRedirect)
}
}
// LoginAPIWrapper is the endpoint for logging in from the cli
func LoginAPIWrapper(w http.ResponseWriter, r *http.Request) {
var c auth.Credentials
err := json.NewDecoder(r.Body).Decode(&c)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
if Login(w, r, &c) {
w.WriteHeader(http.StatusOK)
}
}
func isValidCreateData(data *UserData) bool {
if data.Email == "" || data.Username == "" || data.FirstName == "" || data.Password == "" {
return false
}
return util.IsValidName(data.Username) && util.IsValidEmail(data.Email)
}
func isValidLoginData(c *auth.Credentials) bool {
if c.Username == "" || c.Password == "" {
return false
}
return util.IsValidName(c.Username)
}
|
package controller
import (
"encoding/json"
"entity"
"github.com/gorilla/mux"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"usecase"
)
type PdaController struct {
PdaManager usecase.PDAManager
}
func (pdaController *PdaController) ListAllPDA(writer http.ResponseWriter, request *http.Request) {
names := pdaController.PdaManager.ListAllPDAs()
json, _ := json.Marshal(names)
writer.WriteHeader(200)
writer.Write(json)
}
func (pdaController *PdaController) OpenPDA(writer http.ResponseWriter, request *http.Request) {
params := mux.Vars(request)
pda_id := params["id"]
all, err := ioutil.ReadAll(request.Body)
pdaConf := string(all)
if err != nil {
http.Error(writer, err.Error(), http.StatusBadRequest)
return
}
err = pdaController.PdaManager.CreateNewPDA(pda_id, pdaConf)
if err != nil {
http.Error(writer, err.Error(), http.StatusBadRequest)
}
}
func (pdaController *PdaController) ResetPDA(writer http.ResponseWriter, request *http.Request) {
params := mux.Vars(request)
pda_id := params["id"]
//call manager for resetting the pda_id
pdaController.PdaManager.Reset(pda_id)
}
func (pdaController *PdaController) PutsToken(writer http.ResponseWriter, request *http.Request) {
params := mux.Vars(request)
pda_id := params["id"]
pdaStatus := pdaController.getPDAStatus(request)
if pdaStatus.PdaId == pda_id {
all, _ := ioutil.ReadAll(request.Body)
token := string(all)
position, _ := strconv.Atoi(params["position"])
//call manager to pass a token with position
err := pdaController.PdaManager.Puts(pda_id, token, position, pdaStatus)
if err != nil {
http.Error(writer, err.Error(), http.StatusInternalServerError)
}
cookieDetails := pdaController.PdaManager.GetCookieFor(pdaStatus.ReplicaId, pdaStatus.PdaId)
pdaController.WriteCookie(writer, cookieDetails)
} else {
http.Error(writer, "Connect to the PDA first!! then send token", 501)
}
}
func (pdaController *PdaController) PutsEOS(writer http.ResponseWriter, request *http.Request) {
params := mux.Vars(request)
pda_id := params["id"]
position, _ := strconv.Atoi(params["position"])
//call manager to call eos for pda_id, ignore position of eos
pdaStatus := pdaController.getPDAStatus(request)
if pdaStatus.PdaId == pda_id {
err := pdaController.PdaManager.PutsEOS(pda_id, position, pdaStatus)
if err != nil {
http.Error(writer, err.Error(), http.StatusInternalServerError)
return
}
cookieDetails := pdaController.PdaManager.GetCookieFor(pdaStatus.ReplicaId, pdaStatus.PdaId)
pdaController.WriteCookie(writer, cookieDetails)
} else {
http.Error(writer, "Connect to the PDA first,then send EOS", 502)
}
}
func (pdaController *PdaController) IsPDAAccepted(writer http.ResponseWriter, request *http.Request) {
params := mux.Vars(request)
pda_id := params["id"]
//call manager to call is_accepted method for pda_id
pdaStatus := pdaController.getPDAStatus(request)
if pdaStatus.PdaId == pda_id {
is_accepted, err := pdaController.PdaManager.Is_accepted(pda_id, pdaStatus)
if err != nil {
http.Error(writer, err.Error(), http.StatusInternalServerError)
return
}
accepted, err := json.Marshal(is_accepted)
writer.Write(accepted)
} else {
http.Error(writer, "connect to the PDA first, then call is_accepted ", 503)
}
}
func (pdaController *PdaController) PeekStack(writer http.ResponseWriter, request *http.Request) {
params := mux.Vars(request)
pda_id := params["id"]
peek_k, _ := strconv.Atoi(params["k"])
//call manager to call peek method for pda_id and position peek_k from top
pdaStatus := pdaController.getPDAStatus(request)
if pdaStatus.PdaId == pda_id {
top_k, err := pdaController.PdaManager.Peek(pda_id, peek_k, pdaStatus)
if err != nil {
http.Error(writer, err.Error(), http.StatusInternalServerError)
return
}
top_kjson, _ := json.Marshal(top_k)
writer.Write(top_kjson)
} else {
http.Error(writer, "First,Connect to the member of Replica group! ", 504)
}
}
func (pdaController *PdaController) StackSize(writer http.ResponseWriter, request *http.Request) {
params := mux.Vars(request)
pda_id := params["id"]
//call manager to return the value of stack.Size() for pda_id
pdaStatus := pdaController.getPDAStatus(request)
if pdaStatus.PdaId == pda_id {
size, err := pdaController.PdaManager.Size(pda_id, pdaStatus)
if err != nil {
http.Error(writer, err.Error(), http.StatusInternalServerError)
return
}
stacksize, _ := json.Marshal(size)
writer.Write(stacksize)
} else {
http.Error(writer, "First,Connect to the member of Replica group! ", 505)
}
}
func (pdaController *PdaController) CurrentStatePDA(writer http.ResponseWriter, request *http.Request) {
params := mux.Vars(request)
pda_id := params["id"]
//call manager to return the value of pdaprocessor.current_state() for pda_id
pdaStatus := pdaController.getPDAStatus(request)
if pdaStatus.PdaId == pda_id {
state, err := pdaController.PdaManager.Currentstate(pda_id, pdaStatus)
if err != nil {
http.Error(writer, err.Error(), http.StatusInternalServerError)
return
}
currentstate, _ := json.Marshal(state)
writer.Write(currentstate)
} else {
http.Error(writer, "First,Connect to the member of Replica group! ", 506)
}
}
func (pdaController *PdaController) QueuedTokenPDA(writer http.ResponseWriter, request *http.Request) {
params := mux.Vars(request)
pda_id := params["id"]
//call manager to return the value of queued tokens( that method id yet to be implemented) for pda_id
pdaStatus := pdaController.getPDAStatus(request)
if pdaStatus.PdaId == pda_id {
token, err := pdaController.PdaManager.Queued_token(pda_id, pdaStatus)
if err != nil {
http.Error(writer, err.Error(), http.StatusInternalServerError)
return
}
q_token, _ := json.Marshal(token)
writer.Write(q_token)
} else {
http.Error(writer, "First,Connect to the member of Replica group! ", 507)
}
}
func (pdaController *PdaController) SnapshotPDA(writer http.ResponseWriter, request *http.Request) {
params := mux.Vars(request)
pda_id := params["id"]
peek_k, _ := strconv.Atoi(params["k"])
//call manager to call three methods for pda_id: pdaprocessor.current_state(), queued tokens, peek(peek_k)
pdaStatus := pdaController.getPDAStatus(request)
if pdaStatus.PdaId == pda_id {
state, err := pdaController.PdaManager.Currentstate(pda_id, pdaStatus)
if err != nil {
http.Error(writer, err.Error(), http.StatusInternalServerError)
return
}
token, err := pdaController.PdaManager.Queued_token(pda_id, pdaStatus)
if err != nil {
http.Error(writer, err.Error(), http.StatusInternalServerError)
return
}
peek, err := pdaController.PdaManager.Peek(pda_id, peek_k, pdaStatus)
if err != nil {
http.Error(writer, err.Error(), http.StatusInternalServerError)
return
}
out := make([]string, 0)
out = append(out, state)
out = append(out, strings.Join(token, ","))
out = append(out, strings.Join(peek, " "))
json_out, _ := json.Marshal(out)
writer.Write(json_out)
} else {
http.Error(writer, "First,Connect to the member of Replica group! ", 508)
}
}
func (pdaController *PdaController) ClosePDA(writer http.ResponseWriter, request *http.Request) {
params := mux.Vars(request)
pda_id := params["id"]
//call manager to close the pda_id
err := pdaController.PdaManager.Close(pda_id)
if err != nil {
http.Error(writer, err.Error(), http.StatusInternalServerError)
return
}
}
func (pdaController *PdaController) DeletePDA(writer http.ResponseWriter, request *http.Request) {
params := mux.Vars(request)
pda_id := params["id"]
//call manager to delete the pda_id
err := pdaController.PdaManager.Deletepda(pda_id)
if err != nil {
http.Error(writer, err.Error(), http.StatusInternalServerError)
return
}
}
func (pdaController *PdaController) GetAllReplicaIds(writer http.ResponseWriter, request *http.Request) {
data, _ := json.Marshal(pdaController.PdaManager.GetAllReplicaIds())
writer.WriteHeader(200)
writer.Write(data)
}
func (pdaController *PdaController) CreateReplicaGroup(writer http.ResponseWriter, request *http.Request) {
params := mux.Vars(request)
replicaId, err := strconv.Atoi(params["gid"])
if err != nil {
http.Error(writer, "Invalid replica group ID provided", http.StatusBadRequest)
return
}
all, err := ioutil.ReadAll(request.Body)
conf := string(all)
if err != nil {
http.Error(writer, err.Error(), http.StatusBadRequest)
return
}
err = pdaController.PdaManager.CreateNewReplicaGroup(replicaId, conf)
}
func (pdaController *PdaController) ResetAllMembers(writer http.ResponseWriter, request *http.Request) {
params := mux.Vars(request)
replicaId, err := strconv.Atoi(params["gid"])
if err != nil {
http.Error(writer, "Invalid replica group ID provided", http.StatusBadRequest)
return
}
pdaController.PdaManager.ResetReplicaMembers(replicaId)
groupMember := pdaController.PdaManager.GetRandomMemberAddress(replicaId)
cookieDetails := pdaController.PdaManager.GetCookieFor(replicaId, groupMember)
pdaController.WriteCookie(writer, cookieDetails)
}
func (pdaController *PdaController) GetMembersAddress(writer http.ResponseWriter, request *http.Request) {
params := mux.Vars(request)
replicaId, err := strconv.Atoi(params["gid"])
if err != nil {
http.Error(writer, "Invalid replica group ID provided", http.StatusBadRequest)
return
}
groupMembers := pdaController.PdaManager.GetMemberAddress(replicaId)
writer.WriteHeader(200)
data, _ := json.Marshal(groupMembers)
writer.Write(data)
}
func (pdaController *PdaController) ConnectToAMember(writer http.ResponseWriter, request *http.Request) {
params := mux.Vars(request)
replicaId, err := strconv.Atoi(params["gid"])
if err != nil {
http.Error(writer, "Invalid replica group ID provided", http.StatusBadRequest)
return
}
pdsStatus := pdaController.getPDAStatus(request)
groupMember := pdaController.PdaManager.GetRandomMemberAddress(replicaId)
cookieDetails := pdaController.PdaManager.GetCookieFor(replicaId, groupMember)
if len(request.Cookies()) > 0 && pdsStatus.ReplicaId == replicaId {
cookieDetails.LastConsumedIndex = pdsStatus.LastConsumedIndex
cookieDetails.Stack = pdsStatus.Stack
cookieDetails.State = pdsStatus.State
cookieDetails.InputQueue = pdsStatus.InputQueue
cookieDetails.Clock = pdsStatus.Clock
}
pdaController.WriteCookie(writer, cookieDetails)
data, _ := json.Marshal(groupMember)
writer.WriteHeader(200)
writer.Write(data)
}
func (pdaController *PdaController) CloseReplicaGrp(writer http.ResponseWriter, request *http.Request) {
params := mux.Vars(request)
replicaId, err := strconv.Atoi(params["gid"])
if err != nil {
http.Error(writer, "Invalid replica group ID provided", http.StatusBadRequest)
return
}
pdaController.PdaManager.CloseReplicaGrpAndMembers(replicaId)
}
func (pdaController *PdaController) DeleteReplicaGrp(writer http.ResponseWriter, request *http.Request) {
params := mux.Vars(request)
replicaId, err := strconv.Atoi(params["gid"])
if err != nil {
http.Error(writer, "Invalid replica group ID provided", http.StatusBadRequest)
return
}
pdaController.PdaManager.DeleteReplicaGrpAndMembers(replicaId)
}
func (pdaController *PdaController) Joinpda(writer http.ResponseWriter, request *http.Request) {
params := mux.Vars(request)
pdaId := params["id"]
all, err := ioutil.ReadAll(request.Body)
replicaId, _ := strconv.Atoi(string(all))
if err != nil {
http.Error(writer, "Please pass Replica ID in body of the request", http.StatusBadRequest)
return
}
pdaController.PdaManager.JoinAReplicaGrp(pdaId, replicaId)
}
func (pdaController *PdaController) GetC3State(writer http.ResponseWriter, request *http.Request) {
pdsStatus := pdaController.getPDAStatus(request)
cookieInfo := entity.CookieInfo{
PdaId: pdsStatus.PdaId,
ReplicaId: pdsStatus.ReplicaId,
}
data, _ := json.Marshal(cookieInfo)
writer.WriteHeader(200)
writer.Write(data)
}
func (pdaController *PdaController) GetPDACode(writer http.ResponseWriter, request *http.Request) {
params := mux.Vars(request)
pdaId := params["id"]
code := pdaController.PdaManager.GetPDACode(pdaId)
data, _ := json.Marshal(code)
writer.WriteHeader(200)
writer.Write(data)
}
func (pdaController *PdaController) getPDAStatus(request *http.Request) entity.PDAStatus {
cookies := request.Cookies()
var pdaStatus entity.PDAStatus
for i := range cookies {
if cookies[i].Name == "pda" {
cookieInfo := getCookieValue(cookies[i].Value)
pdaStatus = pdaController.PdaManager.GetPDA(cookieInfo.ReplicaId, cookieInfo.PdaId)
}
}
return pdaStatus
}
func getCookieValue(value string) entity.CookieInfo {
var cookieInfo entity.CookieInfo
unescape, _ := url.PathUnescape(value)
all := strings.ReplaceAll(unescape, "\"", "")
all = strings.ReplaceAll(all, "'", "\"")
bytes := []byte(all)
json.Unmarshal(bytes, &cookieInfo)
return cookieInfo
}
func (pdaController *PdaController) WriteCookie(writer http.ResponseWriter, cookieDetails entity.PDAStatus) {
cookieInfo := entity.CookieInfo{
PdaId: cookieDetails.PdaId,
ReplicaId: cookieDetails.ReplicaId,
}
val, _ := json.Marshal(cookieInfo)
cookie := &http.Cookie{
Name: "pda",
Value: strings.ReplaceAll(string(val), "\"", "'"),
Path: "/",
}
http.SetCookie(writer, cookie)
}
|
package main
import (
"fmt"
)
func main() {
upperTriangle()
bottomTriangle()
}
func upperTriangle() {
output := ""
inputValue := 5
for i := 0; i < inputValue; i++ {
for j := inputValue; j > i; j-- {
output += " "
}
for k := 0; k <= i; k++ {
output += "* "
}
for l := 0; l < i; l++ {
output += "* "
}
if i < (inputValue - 1) {
output += "\n"
}
}
fmt.Println(output)
}
func bottomTriangle() {
output := ""
inputValue := 5
for i := 0; i <= inputValue; i++ {
for j := 0; j < i; j++ {
output += " "
}
for k := inputValue; k >= i; k-- {
output += "* "
}
for l := inputValue; l > i; l-- {
output += "* "
}
if i < (inputValue) {
output += "\n"
}
}
fmt.Println(output)
}
|
package rbt
import (
"testing"
"math/rand"
"time"
)
var _,_ = rand.Seed, time.Now
// fill rbtree with random numbers as keys
func newtree(t *testing.T, iters int) *RbMap {
rand.Seed(time.Now().UnixNano())
// rb tree with integer keys
r := NewRbMap(func(k1, k2 interface{}) bool {
return k1.(int) < (k2).(int)
})
if r == nil {
t.Fatalf("Can't create map of size %d", iters)
}
realsize := 0
for i := 0; i < iters; i++ {
num := rand.Intn(100000000)
if r.Insert(num, i) {
realsize++
}
}
if r.Size() != realsize {
t.Fatalf("Realsize mismatch: %d/%d", r.Size(), realsize);
}
r.verify()
return r
}
func xTestFill(t *testing.T) {
r := newtree(t, 100000)
cnt_forward, cnt_backward := 0, 0
for n := r.First(); n != nil; cnt_forward++ {
n = n.Next()
}
for n := r.Last(); n != nil; cnt_backward++ {
n = n.Prev()
}
if cnt_forward != r.Size() || cnt_backward != r.Size() { t.FailNow(); }
}
func TestFind(t *testing.T) {
r := newtree(t, 1000000)
kl := make(map[int]int)
n := r.First()
for i := 0; n != nil && i < r.Size()/11; i++ {
adv := rand.Intn(9) + 1
for j := 0; n != nil && j < adv; j++ {
n = n.Next()
}
if n != nil {
k := n.Key().(int)
kl[k] = k
}
}
for _, k := range kl {
n := r.FindNode(k)
if n == nil {
t.Fatalf("Key %d not found")
}
if n.Key().(int) != k {
t.Fatalf("Key mismatch: %d/%d", n.Key().(int), k)
}
r.DeleteNode(n)
v := r.Find(k)
if v != nil { t.Fatalf("Key %d, dup key %d", k, v) }
}
}
func TestDelete(t *testing.T) {
r := newtree(t, 100000)
i := 0
for n := r.First(); nil != n; n = r.First() {
r.DeleteNode(n)
if i == 10000 || i == 70000 {
r.verify()
}
i++
}
if r.Size() != 0 { t.Fatalf("tree size non-null after delete") }
r = newtree(t, 100000)
for n := r.Last(); nil != n; n = r.Last() {
r.DeleteNode(n)
}
if r.Size() != 0 { t.Fatalf("tree size non-null after delete") }
} |
package logdir
import (
"fmt"
"os"
"path/filepath"
"sync"
"github.com/wchargin/tensorboard-data-server/fs"
"github.com/wchargin/tensorboard-data-server/io/run"
)
// LoaderBuilder specifies options for a Loader.
type LoaderBuilder struct {
// FS is the filesystem to use for read operations.
FS fs.Filesystem
// Logdir is the root log directory to be loaded, as a path under FS.
Logdir string
}
// Start starts a loader in a new goroutine. It starts dormant. Call Reload on
// returned *Loader to poll.
func (b LoaderBuilder) Start() *Loader {
ll := &Loader{
fs: b.FS,
logdir: b.Logdir,
readers: make(map[string]*run.Reader),
data: make(map[string]*run.Accumulator),
reload: make(chan struct{}),
asleep: make(chan struct{}),
}
go ll.start()
return ll
}
// Loader loads all runs under a log directory and accumulates their contents.
type Loader struct {
// fs is the filesystem to use for read operations.
fs fs.Filesystem
// logdir is the root log directory being loaded, as a path under fs.
logdir string
// reload is an input channel that sees unit when this loader should
// wake up.
reload chan struct{}
// asleep is an output channel that sees unit when this loader has read
// to EOF and gone to sleep, to be awoken later via "reload".
asleep chan struct{}
// mu locks the readers and data maps, not any of their contents.
mu sync.RWMutex
// readers maps a run name to its active reader object.
readers map[string]*run.Reader
// data maps a run name to its event accumulator. Same domain as
// readers.
data map[string]*run.Accumulator
}
// Runs returns a map of all runs, keyed by name. The returned map is owned by
// the caller; its values are not.
func (ll *Loader) Runs() map[string]*run.Accumulator {
ll.mu.RLock()
defer ll.mu.RUnlock()
result := make(map[string]*run.Accumulator)
for k, v := range ll.data {
result[k] = v
}
return result
}
// Run returns the accumulator for the run with the given name (not path), or
// nil if there is no such run.
func (ll *Loader) Run(run string) *run.Accumulator {
ll.mu.RLock()
defer ll.mu.RUnlock()
return ll.data[run]
}
// start runs in its own goroutine, created by LoaderBuilder.Start.
func (ll *Loader) start() {
for range ll.reload {
rundirs, err := ll.rundirs()
if err != nil {
fmt.Fprintf(os.Stderr, "discovering runs: %v\n", err)
continue
}
ll.mkloaders(rundirs)
ll.doreload()
ll.asleep <- struct{}{}
}
}
// A rundirs value maps a run name to its path.
type rundirs map[string]string
// rundirs finds all run directories under the logdir, by looking for all
// tfevents files.
func (ll *Loader) rundirs() (rundirs, error) {
result := make(rundirs)
files, err := ll.fs.FindFiles(ll.logdir, "*tfevents*")
if err != nil {
return nil, err
}
for _, f := range files {
d := filepath.Dir(f) // TODO(@wchargin): fs integration?
if _, ok := result[d]; ok {
continue
}
name, err := filepath.Rel(ll.logdir, d)
if err != nil {
return nil, err
}
result[name] = d
}
return result, nil
}
// mkloaders synchronizes ll.readers and ll.data with the provided rundirs.
func (ll *Loader) mkloaders(rundirs rundirs) {
ll.mu.Lock()
defer ll.mu.Unlock()
for k, rr := range ll.readers {
if _, ok := rundirs[k]; ok {
continue // still exists
}
fmt.Fprintf(os.Stderr, "removing run %q\n", k)
if err := rr.Close(); err != nil {
fmt.Fprintf(os.Stderr, "closing run %q: %v\n", k, err)
}
delete(ll.readers, k)
delete(ll.data, k)
}
for k, dir := range rundirs {
if _, ok := ll.readers[k]; ok {
continue // already exists
}
fmt.Fprintf(os.Stderr, "discovered run %q\n", k)
rr := run.ReaderBuilder{FS: ll.fs, Dir: dir}.Start()
acc := run.NewAccumulator(rr)
ll.readers[k] = rr
ll.data[k] = acc
}
}
func (ll *Loader) doreload() {
var wg sync.WaitGroup
ll.mu.RLock()
readers := make([]*run.Reader, len(ll.readers))
{
i := 0
for _, rr := range ll.readers {
readers[i] = rr
i++
}
}
ll.mu.RUnlock()
wg.Add(len(readers))
for _, rr := range readers {
go func(rr *run.Reader) {
rr.Reload()
wg.Done()
}(rr)
}
wg.Wait()
}
// Reload polls the log directory and reloads runs. It blocks until the reload
// finishes. Must not be called concurrently with any other Reload. May be
// called concurrently with reads.
func (ll *Loader) Reload() {
ll.reload <- struct{}{}
<-ll.asleep
}
// Close implements io.Closer. If there are multiple errors when closing
// underlying run readers, an arbitrary one is returned.
func (ll *Loader) Close() error {
ll.mu.Lock()
defer ll.mu.Unlock()
ll.data = nil // gc
var firstErr error
for _, rr := range ll.readers {
err := rr.Close()
if firstErr == nil {
firstErr = err
}
}
return firstErr
}
|
package fstack_test
import (
"bytes"
"io/ioutil"
"os"
"testing"
"github.com/Komosa/fstack"
)
func TestNonMod(t *testing.T) {
f, err := ioutil.TempFile("", "fstack_test_file")
fatalMaybe(err, t)
defer os.Remove(f.Name())
data := []byte(`bottom
middle
top
`)
_, err = f.Write(data)
f.Close()
fatalMaybe(err, t)
st, err := fstack.New(f.Name())
fatalMaybe(err, t)
if s := st.Top(); s != "top" {
t.Errorf("top of stack should be `top`, got: %v", s)
}
if s := st.Size(); s != 3 {
t.Errorf("stack should have exactly 3 elements, got: %v", s)
}
if st.Empty() {
t.Error("stack should be not empty, got: empty=true")
}
err = st.Sync(0) // perms doesn't matter, file already exists
fatalMaybe(err, t)
got, err := ioutil.ReadFile(f.Name())
fatalMaybe(err, t)
if !bytes.Equal(data, got) {
t.Errorf("file content differs, exp: %#+v, got: %#+v", data, got)
}
}
func TestEmpty(t *testing.T) {
f, err := ioutil.TempFile("", "fstack_test_file")
fatalMaybe(err, t)
defer os.Remove(f.Name())
st, err := fstack.New(f.Name())
fatalMaybe(err, t)
err = st.Sync(0) // perms doesn't matter, file already exists
fatalMaybe(err, t)
got, err := ioutil.ReadFile(f.Name())
fatalMaybe(err, t)
exp := []byte{}
if !bytes.Equal(exp, got) {
t.Errorf("file content differs, exp: %#+v, got: %#+v", exp, got)
}
}
func fatalMaybe(err error, tb testing.TB) {
if err != nil {
tb.Fatal(err)
}
}
|
package profile
import (
"os"
"testing"
)
func TestWriteOne(t *testing.T) {
db := JsonFileDb{path: "/tmp/test_jsondb"}
var err error
profile := Profile{Name: "1", Email: "jsahd@as.com"}
os.Create(db.path)
if err = db.WriteProfiles([]Profile{profile}); err != nil {
t.Errorf("Can't write one profile: %s", err)
}
// check file
var profiles []Profile
if profiles, err = db.GetProfiles(); err != nil {
t.Errorf("Can't get profiles: %s", err)
}
if len(profiles) != 1 {
t.Errorf("There must be the one element")
}
}
func TestDelete(t *testing.T) {
db := JsonFileDb{path: "/tmp/test_jsondb_td"}
var err error
var profiles []Profile
os.Create(db.path)
if err = db.WriteProfiles([]Profile{Profile{Name: "1", Email: "jsahd@as.com"},
Profile{Name: "2", Email: "jsahd2@as.com"},
Profile{Name: "3", Email: "jsahd3@as.com"}}); err != nil {
t.Errorf("Can't write one profile: %s", err)
}
db.RemoveProfile("2")
profiles, err = db.GetProfiles()
if err != nil {
t.Errorf("Can't get the profiles: %s", err)
}
for num, elem := range profiles {
if num == 0 {
if elem.Name != "1" {
t.Errorf("Wrong content after deleting an element: %s", err)
}
}
if num == 1 {
if elem.Name != "3" {
t.Errorf("Wrong content after deleting an element: %s", err)
}
}
}
}
func TestValidation(t *testing.T) {
var err error
profile := Profile{Name: "1"}
if err = profile.SetEmail("a.user13@mail.ru"); err != nil {
t.Errorf("email validation failed")
}
}
|
package crawl
import (
"time"
. "./base"
)
type Tdatas struct {
Data []Tdata `json:"data"`
Typing typing_parser
Segment segment_parser
Hub hub_parser
tag string
start time.Time
min_hub_height int
base, next *Tdatas
}
func (p *typing_parser) tail(s *typing_parser, tail int) {
if l := len(p.Data); l > 0 {
start := l - tail
if start < 0 || start >= l {
start = 0
}
s.Data = p.Data[start:]
}
if l := len(p.Line); l > 0 {
start := l - tail
if start < 0 || start >= l {
start = 0
}
s.Line = p.Line[start:]
}
}
func (p *Tdatas) tail(s *Tdatas, tail int) {
l := len(p.Data)
if l < 1 {
return
}
start := l - tail
if start < 0 || start >= l {
start = 0
}
s.Data = p.Data[start:]
p.Typing.tail(&s.Typing, tail)
p.Segment.tail(&s.Segment.typing_parser, tail)
p.Hub.tail(&s.Hub.typing_parser, tail)
}
func (p *Tdatas) Init(hub_height int, tag string, base, next *Tdatas) {
p.min_hub_height = hub_height
p.tag = tag
p.Typing.tag = tag
p.Segment.tag = tag
p.Hub.tag = tag
p.base = base
p.next = next
}
func (p *Tdatas) First_lastday_data() int {
ldata := len(p.Data)
if ldata < 1 {
return 0
}
start := ldata - 240 - 10
if start < 0 {
start = 0
}
t := p.Data[ldata-1].Time.Truncate(time.Hour * 24)
i, _ := (TdataSlice(p.Data[start:])).Search(t)
return i + start
}
func (p *Tdatas) Drop_lastday_data() {
ldata := len(p.Data)
if ldata < 1 {
return
}
i := p.First_lastday_data()
if i < 1 {
p.Data = []Tdata{}
return
}
p.Data = p.Data[:i]
}
func (p *Tdatas) Add(data Tdata) (int, bool) {
if data.Volume == 0 && data.Open == 0 {
return 0, false
}
l := len(p.Data)
if l < 1 {
p.Data = append(p.Data, data)
return 0, true
}
if data.Time.After(p.Data[l-1].Time) {
p.Data = append(p.Data, data)
return l, true
}
if data.Time.Equal(p.Data[l-1].Time) {
p.Data[l-1] = data
return l - 1, false
}
i, ok := (TdataSlice(p.Data)).Search(data.Time)
if ok {
p.Data[i] = data
return i, false
}
if i < 1 {
p.Data = append([]Tdata{data}, p.Data...)
} else {
p.Data = append(p.Data, data)
copy(p.Data[i+1:], p.Data[i:])
p.Data[i] = data
}
return i, true
}
func (p *Tdatas) latest_time() time.Time {
if len(p.Data) < 1 {
return market_begin_day
}
return p.Data[len(p.Data)-1].Time
}
|
package block
import (
"errors"
"time"
"bytes"
"encoding/binary"
"reflect"
"github.com/jasoncodingnow/bitcoinLiteLite/consensus"
"github.com/jasoncodingnow/bitcoinLiteLite/crypto"
"github.com/jasoncodingnow/bitcoinLiteLite/tool"
)
type TransactionHeader struct {
From []byte
To []byte
PayloadHash []byte
PayloadLen uint32
Timestamp uint32
Nonce uint32
}
type Transaction struct {
Header TransactionHeader
Signature []byte
Payload []byte
}
type TransactionSlice []Transaction
//NewTransaction create new transaction
func NewTransaction(from, to, payload []byte) *Transaction {
t := &Transaction{Header: TransactionHeader{From: from, To: to}, Payload: payload}
t.Header.Timestamp = uint32(time.Now().Unix())
t.Header.PayloadHash = tool.SHA256(payload)
t.Header.PayloadLen = uint32(len(payload))
return t
}
func (t *Transaction) Hash() []byte {
headerBytes, _ := t.Header.MarshalBinary()
return tool.SHA256(headerBytes)
}
func (t *Transaction) Sign(privateKey string) []byte {
s, _ := crypto.Sign(t.Hash(), privateKey)
return []byte(s)
}
func (t *Transaction) VerifyTransaction(powPrefix []byte) bool {
h := t.Hash()
payloadHash := tool.SHA256(t.Payload)
return reflect.DeepEqual(payloadHash, t.Header.PayloadHash) && consensus.CheckProofOfWork(powPrefix, h) && crypto.Verify(h, string(t.Signature), string(t.Header.From))
}
//GenerateNonce pow过程,这里是每次交易需要消耗一定量的计算
func (t *Transaction) GenerateNonce(powPrefix []byte) uint32 {
for {
if consensus.CheckProofOfWork(powPrefix, t.Hash()) {
break
}
t.Header.Nonce++
}
return t.Header.Nonce
}
func (t *Transaction) MarshalBinary() ([]byte, error) {
headerBinary, err := t.Header.MarshalBinary()
if err != nil {
return nil, err
}
if len(headerBinary) != TransactionHeaderSize {
return nil, errors.New("header marshal len error")
}
return append(append(headerBinary, tool.FillBytesToFront(t.Signature, TransactionSignatureSize)...), t.Payload...), nil
}
func (t *Transaction) UnmarshalBinary(data []byte) ([]byte, error) {
buf := bytes.NewBuffer(data)
if len(data) < (TransactionHeaderSize + TransactionSignatureSize) {
return nil, errors.New("data length error when unmarshal binary to transaction")
}
h := TransactionHeader{}
if err := h.UnmarshalBinary(buf.Next(TransactionHeaderSize)); err != nil {
return nil, err
}
t.Header = h
t.Signature = tool.SliceByteWhenEncount(buf.Next(TransactionSignatureSize), 0)
t.Payload = buf.Next(int(t.Header.PayloadLen))
return buf.Next(MaxInt), nil
}
func (t *TransactionHeader) MarshalBinary() ([]byte, error) {
buf := new(bytes.Buffer)
buf.Write(tool.FillBytesToFront(t.From, crypto.PublicKeyLen))
buf.Write(tool.FillBytesToFront(t.To, crypto.PublicKeyLen))
buf.Write(tool.FillBytesToFront(t.PayloadHash, PayloadHashSize))
binary.Write(buf, binary.LittleEndian, t.PayloadLen)
binary.Write(buf, binary.LittleEndian, t.Timestamp)
binary.Write(buf, binary.LittleEndian, t.Nonce)
return buf.Bytes(), nil
}
func (t *TransactionHeader) UnmarshalBinary(data []byte) error {
buf := bytes.NewBuffer(data)
t.From = tool.SliceByteWhenEncount(buf.Next(crypto.PublicKeyLen), 0)
t.To = tool.SliceByteWhenEncount(buf.Next(crypto.PublicKeyLen), 0)
t.PayloadHash = tool.SliceByteWhenEncount(buf.Next(PayloadHashSize), 0)
binary.Read(bytes.NewBuffer(buf.Next(4)), binary.LittleEndian, &t.PayloadLen)
binary.Read(bytes.NewBuffer(buf.Next(4)), binary.LittleEndian, &t.Timestamp)
binary.Read(bytes.NewBuffer(buf.Next(4)), binary.LittleEndian, &t.Nonce)
return nil
}
func (t *TransactionSlice) MarshalBinary() ([]byte, error) {
buf := new(bytes.Buffer)
for _, tr := range *t {
b, err := tr.MarshalBinary()
if err != nil {
return nil, err
}
buf.Write(b)
}
return buf.Bytes(), nil
}
func (t *TransactionSlice) UnmarshalBinary(data []byte) error {
d := data
for len(d) > TransactionHeaderSize+TransactionSignatureSize {
tr := &Transaction{}
remain, err := tr.UnmarshalBinary(d)
if err != nil {
return err
}
*t = append(*t, *tr)
d = remain
}
return nil
}
func (t TransactionSlice) Exists(newTr *Transaction) bool {
for _, tr := range t {
if reflect.DeepEqual(tr.Signature, newTr.Signature) {
return true
}
}
return false
}
func (t TransactionSlice) AddTransaction(newTr *Transaction) TransactionSlice {
for i, tr := range t {
if tr.Header.Timestamp >= newTr.Header.Timestamp {
return append(append(t[:i], *newTr), t[i:]...)
}
}
return append(t, *newTr)
}
|
package config
import (
"fmt"
"io/ioutil"
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/common/promlog"
yaml "gopkg.in/yaml.v2"
graphite "github.com/criteo/graphite-remote-adapter/client/graphite/config"
"github.com/criteo/graphite-remote-adapter/utils"
)
// Load parses the YAML input s into a Config.
func Load(s string) (*Config, error) {
cfg := &Config{}
*cfg = DefaultConfig
err := yaml.Unmarshal([]byte(s), cfg)
if err != nil {
return nil, err
}
cfg.original = s
return cfg, nil
}
// LoadFile parses the given YAML file into a Config.
func LoadFile(logger log.Logger, filename string) (*Config, error) {
level.Info(logger).Log("file", filename, "msg", "Loading configuration file")
content, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
cfg, err := Load(string(content))
if err != nil {
return nil, err
}
return cfg, nil
}
// DefaultConfig is the default top-level configuration.
var DefaultConfig = Config{
Web: webOptions{
ListenAddress: "0.0.0.0:9201",
TelemetryPath: "/metrics",
},
Read: readOptions{
Timeout: 5 * time.Minute,
Delay: 1 * time.Hour,
IgnoreError: true,
},
Write: writeOptions{
Timeout: 5 * time.Minute,
},
Graphite: graphite.DefaultConfig,
}
// Config is the top-level configuration.
type Config struct {
ConfigFile string
LogLevel promlog.AllowedLevel
Web webOptions `yaml:"web,omitempty" json:"web,omitempty"`
Read readOptions `yaml:"read,omitempty" json:"read,omitempty"`
Write writeOptions `yaml:"write,omitempty" json:"write,omitempty"`
Graphite graphite.Config `yaml:"graphite,omitempty" json:"graphite,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline" json:"-"`
// original is the input from which the Config was parsed.
original string
}
func (c Config) String() string {
b, err := yaml.Marshal(c)
if err != nil {
return fmt.Sprintf("<error creating config string: %s>", err)
}
return string(b)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
type plain Config
*c = DefaultConfig
if err := unmarshal((*plain)(c)); err != nil {
return err
}
return utils.CheckOverflow(c.XXX, "config")
}
type webOptions struct {
ListenAddress string `yaml:"listen_address,omitempty" json:"listen_address,omitempty"`
TelemetryPath string `yaml:"telemetry_path,omitempty" json:"telemetry_path,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline" json:"-"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (opts *webOptions) UnmarshalYAML(unmarshal func(interface{}) error) error {
type plain webOptions
*opts = DefaultConfig.Web
if err := unmarshal((*plain)(opts)); err != nil {
return err
}
return utils.CheckOverflow(opts.XXX, "webOptions")
}
type readOptions struct {
Timeout time.Duration `yaml:"timeout,omitempty" json:"timeout,omitempty"`
Delay time.Duration `yaml:"delay,omitempty" json:"delay,omitempty"`
IgnoreError bool `yaml:"ignore_error,omitempty" json:"ignore_error,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline" json:"-"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (opts *readOptions) UnmarshalYAML(unmarshal func(interface{}) error) error {
type plain readOptions
*opts = DefaultConfig.Read
if err := unmarshal((*plain)(opts)); err != nil {
return err
}
return utils.CheckOverflow(opts.XXX, "readOptions")
}
type writeOptions struct {
Timeout time.Duration `yaml:"timeout,omitempty" json:"timeout,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline" json:"-"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (opts *writeOptions) UnmarshalYAML(unmarshal func(interface{}) error) error {
type plain writeOptions
*opts = DefaultConfig.Write
if err := unmarshal((*plain)(opts)); err != nil {
return err
}
return utils.CheckOverflow(opts.XXX, "writeOptions")
}
|
package main
import (
"net/http"
)
type NotFoundRedirectRespWr struct {
http.ResponseWriter // We embed http.ResponseWriter
status int
}
func (w *NotFoundRedirectRespWr) WriteHeader(status int) {
w.status = status // Store the status for our own use
if status != http.StatusNotFound {
w.ResponseWriter.WriteHeader(status)
}
}
func (w *NotFoundRedirectRespWr) Write(p []byte) (int, error) {
if w.status != http.StatusNotFound {
return w.ResponseWriter.Write(p)
}
return len(p), nil // Lie that we successfully written it
}
func wrapHandler(h http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
nfrw := &NotFoundRedirectRespWr{ResponseWriter: w}
h.ServeHTTP(nfrw, r)
if nfrw.status == 404 {
http.Redirect(w, r, "/", http.StatusFound)
}
}
}
func WebServer() {
fs := wrapHandler( http.FileServer(http.Dir(DEFAULT_STATIC_PATH)))
http.Handle("/", fs)
/// FRONTEND CALL ///
http.HandleFunc("/api/simpleRequest", FSimpleRequest)
http.HandleFunc("/api/fileRequest", FFileRequest)
http.HandleFunc("/api/fileResponse", FFileResponse)
http.HandleFunc("/api/FDBRequest", FDBRequest)
http.HandleFunc("/api/setLogin", FLogin)
http.HandleFunc("/api/runExecCode", FExecCode)
http.HandleFunc("/api/setOpMode", FOperationMode)
http.HandleFunc("/api/setConfigs", FConfigUpdate)
/// WEB SERVER LISTENER ///
ipPublic := ""
if GLOBAL_HOST.PublicAddr == "" {
for _, i := range ConfigDefaultAddr() {
ip, _:= HostIP(i.Name)
ipPublic = ip.String()
Log("WebInterface on IP http://"+ipPublic+":"+GLOBAL_HOST.FrontendPort, nil, "1")
}
ipPublic = ""
} else {
ip, _:= HostIP(GLOBAL_HOST.PublicAddr)
ipPublic = ip.String()
Log("WebInterface on IP http://"+ipPublic+":"+GLOBAL_HOST.FrontendPort, nil, "1")
}
//readtime, _ := strconv.Atoi(config.CodeExecTimeout)
//writetime, _ := strconv.Atoi(config.CodeExecTimeout)
srv := &http.Server{
Addr: ipPublic+":"+GLOBAL_HOST.FrontendPort,
Handler: nil,
//ReadTimeout: time.Duration(readtime) * 2 * time.Second,
//WriteTimeout: time.Duration(writetime) * 3 * time.Second,
//MaxHeaderBytes: 1 << 20,
}
err := srv.ListenAndServe()
Log("", err, "0")
} |
package main
import "fmt"
func main() {
fmt.Println("Perform String Shifts")
stringShift("abc", [][]int{{0, 4}, {1, 5}})
// stringShift("abcdefg", [][]int{{1, 1}, {1, 1}, {0, 2}, {1, 3}})
}
func stringShift(s string, shift [][]int) string {
r, l, pos := []rune(s), len(s), 0
for _, val := range shift {
val[1] = val[1] % l
if val[0] == 1 {
pos = l - val[1]
last := r[:pos]
r = append(r[pos:], last...)
} else {
first := r[:val[1]]
r = append(r[val[1]:], first...)
}
}
return string(r)
}
|
// Go support for Protocol Buffers RPC which compatiable with https://github.com/Baidu-ecom/Jprotobuf-rpc-socket
//
// Copyright 2002-2007 the original author or authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pbrpc
import (
"bytes"
"encoding/binary"
"errors"
"io"
"log"
"github.com/golang/protobuf/proto"
"github.com/golang/snappy"
)
// error log info definition
var ERR_IGNORE_ERR = errors.New("[marshal-001]Ingore error")
var ERR_META = errors.New("[marshal-003]Get nil value from Meta struct after marshal")
/*
Data package for baidu RPC.
all request and response data package should apply this.
-----------------------------------
| Head | Meta | Data | Attachment |
-----------------------------------
1. <Head> with fixed 12 byte length as follow format
----------------------------------------------
| PRPC | MessageSize(int32) | MetaSize(int32) |
----------------------------------------------
MessageSize = totalSize - 12(Fixed Head Size)
MetaSize = Meta object size
2. <Meta> body proto description as follow
message RpcMeta {
optional RpcRequestMeta request = 1;
optional RpcResponseMeta response = 2;
optional int32 compress_type = 3; // 0:nocompress 1:Snappy 2:gzip
optional int64 correlation_id = 4;
optional int32 attachment_size = 5;
optional ChunkInfo chuck_info = 6;
optional bytes authentication_data = 7;
};
message Request {
required string service_name = 1;
required string method_name = 2;
optional int64 log_id = 3;
};
message Response {
optional int32 error_code = 1;
optional string error_text = 2;
};
messsage ChunkInfo {
required int64 stream_id = 1;
required int64 chunk_id = 2;
};
3. <Data> customize transport data message.
4. <Attachment> attachment body data message
*/
/*
RPC package struct
*/
type RpcDataPackage struct {
Head Header
Meta RpcMeta
Data []byte
Attachment []byte
}
/*
Create a new RpcDataPackage instance.
*/
func NewRpcDataPackage() *RpcDataPackage {
data := RpcDataPackage{}
doInit(&data)
data.GetMeta().Response = &Response{}
return &data
}
/*
set magic code to package.
*/
func (r *RpcDataPackage) MagicCode(magicCode string) {
if len(magicCode) != 4 {
return
}
r.Head.SetMagicCode([]byte(magicCode))
}
func (r *RpcDataPackage) GetMagicCode() string {
return string(r.Head.GetMagicCode())
}
/*
To initialize Request struct instance if need(nil)
*/
func initRequest(r *RpcDataPackage) {
request := r.Meta.Request
if request == nil {
r.Meta.Request = &Request{}
}
}
/*
To initialize Response struct instance if need(nil)
*/
func initResponse(r *RpcDataPackage) {
response := r.Meta.Response
if response == nil {
r.Meta.Response = &Response{}
}
}
/*
set service name
*/
func (r *RpcDataPackage) ServiceName(serviceName string) *RpcDataPackage {
initRequest(r)
r.Meta.Request.ServiceName = &serviceName
return r
}
/*
set method name
*/
func (r *RpcDataPackage) MethodName(methodName string) *RpcDataPackage {
initRequest(r)
r.Meta.Request.MethodName = &methodName
return r
}
func (r *RpcDataPackage) SetData(Data []byte) *RpcDataPackage {
r.Data = Data
return r
}
func (r *RpcDataPackage) SetAttachment(Attachment []byte) *RpcDataPackage {
r.Attachment = Attachment
return r
}
func (r *RpcDataPackage) AuthenticationData(authenticationData []byte) *RpcDataPackage {
r.Meta.AuthenticationData = authenticationData
return r
}
func (r *RpcDataPackage) CorrelationId(correlationId int64) *RpcDataPackage {
r.Meta.CorrelationId = &correlationId
return r
}
func (r *RpcDataPackage) CompressType(compressType int32) *RpcDataPackage {
r.Meta.CompressType = &compressType
return r
}
func (r *RpcDataPackage) LogId(logId int64) *RpcDataPackage {
initRequest(r)
r.Meta.Request.LogId = &logId
return r
}
func (r *RpcDataPackage) GetLogId() int64 {
initRequest(r)
return r.Meta.Request.GetLogId()
}
func (r *RpcDataPackage) ErrorCode(errorCode int32) *RpcDataPackage {
initResponse(r)
r.Meta.Response.ErrorCode = &errorCode
return r
}
func (r *RpcDataPackage) ErrorText(errorText string) *RpcDataPackage {
initResponse(r)
r.Meta.Response.ErrorText = &errorText
return r
}
func (r *RpcDataPackage) ExtraParams(extraParams []byte) *RpcDataPackage {
initRequest(r)
r.Meta.Request.ExtraParam = extraParams
return r
}
func (r *RpcDataPackage) ChunkInfo(streamId int64, chunkId int64) *RpcDataPackage {
chunkInfo := ChunkInfo{}
chunkInfo.StreamId = &streamId
chunkInfo.ChunkId = &chunkId
r.Meta.ChunkInfo = &chunkInfo
return r
}
func doInit(r *RpcDataPackage) {
initRequest(r)
initResponse(r)
}
func (r *RpcDataPackage) GetHead() Header {
return r.Head
}
func (r *RpcDataPackage) GetMeta() *RpcMeta {
return &r.Meta
}
func (r *RpcDataPackage) GetData() []byte {
return r.Data
}
func (r *RpcDataPackage) GetAttachment() []byte {
return r.Attachment
}
/*
Convert RpcPackage to byte array
*/
func (r *RpcDataPackage) WriteIO(rw io.ReadWriter) error {
bs, err := r.Write()
if err != nil {
return err
}
_, err = rw.Write(bs)
if err != nil {
return err
}
return nil
}
/*
Convert RpcPackage to byte array
*/
func (r *RpcDataPackage) Write() ([]byte, error) {
doInit(r)
var totalSize int32 = 0
var dataSize int32 = 0
var err error
if r.Data != nil {
compressType := r.GetMeta().GetCompressType()
if compressType == COMPRESS_GZIP {
r.Data, err = GZIP(r.Data)
if err != nil {
return nil, err
}
} else if compressType == COMPRESS_SNAPPY {
dst := make([]byte, snappy.MaxEncodedLen(len(r.Data)))
r.Data = snappy.Encode(dst, r.Data)
}
dataSize = int32(len(r.Data))
totalSize = totalSize + dataSize
}
var attachmentSize int32 = 0
if r.Attachment != nil {
attachmentSize = int32(len(r.Attachment))
totalSize = totalSize + attachmentSize
}
r.Meta.AttachmentSize = proto.Int32(int32(attachmentSize))
metaBytes, err := proto.Marshal(&r.Meta)
if err != nil {
return nil, err
}
if metaBytes == nil {
return nil, ERR_META
}
rpcMetaSize := int32(len(metaBytes))
totalSize = totalSize + rpcMetaSize
r.Head.SetMetaSize(int32(rpcMetaSize))
r.Head.SetMessageSize(int32(totalSize)) // set message body size
r.Head.MessageSize = int32(totalSize)
r.Head.MetaSize = int32(rpcMetaSize)
buf := new(bytes.Buffer)
headBytes, _ := r.Head.Write()
binary.Write(buf, binary.BigEndian, headBytes)
binary.Write(buf, binary.BigEndian, metaBytes)
if r.Data != nil {
binary.Write(buf, binary.BigEndian, r.Data)
}
if r.Attachment != nil {
binary.Write(buf, binary.BigEndian, r.Attachment)
}
return buf.Bytes(), nil
}
/*
Read byte array and initialize RpcPackage
*/
func (r *RpcDataPackage) ReadIO(rw io.ReadWriter) error {
if rw == nil {
return errors.New("bytes is nil")
}
doInit(r)
// read Head
head := make([]byte, SIZE)
_, err := rw.Read(head)
if err != nil {
log.Println("Read head error", err)
// only to close current connection
return ERR_IGNORE_ERR
}
// unmarshal Head message
r.Head.Read(head)
// get RPC Meta size
metaSize := r.Head.GetMetaSize()
totalSize := r.Head.GetMessageSize()
if totalSize <= 0 {
// maybe heart beat data message, so do ignore here
return ERR_IGNORE_ERR
}
// read left
leftSize := totalSize
body := make([]byte, leftSize)
rw.Read(body)
proto.Unmarshal(body[0:metaSize], &r.Meta)
attachmentSize := r.Meta.GetAttachmentSize()
dataSize := leftSize - metaSize - attachmentSize
dataOffset := metaSize
if dataSize > 0 {
dataOffset = dataSize + metaSize
r.Data = body[metaSize:dataOffset]
compressType := r.GetMeta().GetCompressType()
if compressType == COMPRESS_GZIP {
r.Data, err = GUNZIP(r.Data)
if err != nil {
return err
}
} else if compressType == COMPRESS_SNAPPY {
dst := make([]byte, 1)
r.Data, err = snappy.Decode(dst, r.Data)
if err != nil {
return err
}
}
}
// if need read Attachment
if attachmentSize > 0 {
r.Attachment = body[dataOffset:leftSize]
}
return nil
}
/*
read RPC package from byte array. target byte array can not null.
*/
func (r *RpcDataPackage) Read(b []byte) error {
if b == nil {
return errors.New("b is nil")
}
buf := bytes.NewBuffer(b)
return r.ReadIO(buf)
}
|
package cache
import (
"time"
"github.com/patrickmn/go-cache"
)
var cacheInstance *cache.Cache = nil
func getInstance() *cache.Cache {
if cacheInstance != nil {
cacheInstance = cache.New(5*time.Minute, 30*time.Second)
}
return cacheInstance
}
func set(key string, value interface{}) {
cacheInstance.Set(key, value, 0)
}
|
package broker
import (
"bytes"
"fmt"
"log"
"net"
"net/rpc"
"os/exec"
"sync"
"time"
"github.com/shirou/gopsutil/cpu"
"github.com/shirou/gopsutil/mem"
"github.com/c12o16h1/shender/pkg/models"
)
const (
MAX_CPU_LOAD float64 = 60 // Max acceptable load for CPU
MAX_MEMORY_USAGE float64 = 75 // Max acceptable usage of memory
MAIN_LOOP_TIMEOUT = 10 * time.Millisecond // Timeout in main process loop to let CPU do more important things
MAX_RENDERERS = 10 // Max amount of alive workers
// Port range to run renderer workers
MIN_RENDEDER_PORT int = 52500
MAX_RENDEDER_PORT int = 57750
ERR_INVALID_WORKER = models.Error("Invalid worker")
)
var (
busyPorts = make(map[int]bool)
port = MIN_RENDEDER_PORT
)
/*
Crawler crawl websites pages and get cache from them
*/
func Crawl(chJobs <-chan models.Job, chRes chan<- models.JobResult) error {
var wg sync.WaitGroup
var mtx sync.Mutex
limiter := make(chan struct{}, MAX_RENDERERS)
go func() {
for {
//log.Print("LIMITER:", len(limiter), cap(limiter))
//log.Print("JOBS:", len(chJobs), cap(chJobs))
//log.Print("JOBRES:", len(chRes), cap(chRes))
time.Sleep(1 * time.Second)
}
}()
for {
if !enoughResources() {
time.Sleep(MAIN_LOOP_TIMEOUT) // Sleep a bit, let CPU do other, more important loops
}
limiter <- struct{}{} // would block if we already have enough renderers
time.Sleep(MAIN_LOOP_TIMEOUT) // Sleep a bit, let CPU do other, more important loops
job := <-chJobs
mtx.Lock()
port = nextPort(port)
mtx.Unlock()
go spawnRenderer(port) // lifetime of rendeder is max 30 seconds, so it's safe
go handleJob(mtx, wg, job, chRes, port, limiter)
}
return nil
}
// Goroutine to take job and port for worker, and process them
func handleJob(mtx sync.Mutex, wg sync.WaitGroup, j models.Job, chRes chan<- models.JobResult, port int, limiter <-chan struct{}) {
wg.Add(1)
result := models.JobResult{
Status: models.JobFailed,
Job: j,
}
defer func() {
log.Print(port, ":", result.Job.Url, " : ", len(result.HTML))
chRes <- result
wg.Done()
<-limiter // Drain from limiter channel, so allowing to spawn new workers and do other jobs
// Free port
mtx.Lock()
delete(busyPorts, port)
mtx.Unlock()
}()
time.Sleep(1 * time.Second) // wait for renderer
// Create worker for this task
c, err := rpc.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", port))
if err != nil {
log.Print(0, err)
return
}
defer c.Close()
// Check that worker is ok
var ok string
err = c.Call("Worker.Heartbeat", "", &ok)
if err != nil || ok != models.OK {
log.Print(1, err)
return
}
// Do job
url := "http://" + j.Url
log.Print("ENQ:", port, ":", url)
err = c.Call("Worker.Render", url, &result.HTML)
if err != nil {
log.Print(2, port, err)
return
}
// Close worker, kill chrome etc
c.Call("Worker.Close", 0, nil)
result.Status = models.JobOk
}
// Func to check that we have enough CPU and memory to do something,
// f.e. spawn new workers or start new jobs
func enoughResources() bool {
m, err := mem.VirtualMemory()
if err != nil {
return false
}
c, err := cpu.Percent(0, false)
if err != nil {
return false
}
if m.UsedPercent < MAX_MEMORY_USAGE && c[0] < MAX_CPU_LOAD {
return true
}
return false
}
// Round robin algorithm for ports
func nextPort(current int) int {
if current < MIN_RENDEDER_PORT {
current = MIN_RENDEDER_PORT
}
p := current
for {
p += 2
if current < MAX_RENDEDER_PORT {
if _, ok := busyPorts[p]; !ok {
// Check that port is free
if !isFreePort(p) {
continue
}
// Check that chrome ports is free
for _, cp := range chromePorts(p) {
if !isFreePort(cp) {
continue
}
}
// Check that chrome ports is free
busyPorts[p] = true
return p
}
}
}
log.Print("Can't allocate PORT")
return MIN_RENDEDER_PORT
}
// Spawn render worker at specified port
func spawnRenderer(port int) error {
args := []string{"-port", fmt.Sprintf("%d", port)}
cmd := exec.Command("./bin/render", args...)
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
err := cmd.Run()
if err != nil {
fmt.Println(fmt.Sprint(err) + ": " + stderr.String())
return err
}
return nil
}
// Chrome may take 2 ports.
// It'll be from -10000 to -9999
func chromePorts(port int) []int {
return []int{
port - 10000,
port - 9999,
}
}
// Checks that port is free
// True for free port
func isFreePort(p int) bool {
conn, _ := net.Dial("tcp", net.JoinHostPort("127.0.0.1", string(p)))
if conn != nil {
conn.Close()
return false
}
return true
}
func sampleIcomingQueue(chJobs chan models.Job) {
jobs := []models.Job{
{
Url: "http://google.com",
},
{
Url: "http://react.com",
},
{
Url: "http://angular.io",
},
}
for {
for _, j := range jobs {
time.Sleep(5 * time.Second)
chJobs <- j
}
}
}
|
package domain
// UnbindRequest encapsulates the request payload information
// for an unbind request.
type UnbindRequest struct {
// BindingID is the ID value for the service binding
// represented by this unbind request.
BindingID string
// InstanceID is the ID value for the service instance
// to be unbound in this unbind request.
InstanceID string
// ServiceID is the ID value of the service provided in
// the service catalog. This service was specified when
// the service instance was provisioned.
ServiceID string
// PlanID is the ID value of the plan provided in the
// service catalog. This plan was specified when the
// service instance was provisioned.
PlanID string
}
|
package constants
import "errors"
var (
StatusActive = 1
StatusInActive = 0
GetItemsLimit = int64(100)
ERRPRODUCTUNAVAILABLE = errors.New("sorry this product is unavailable")
)
|
package main
import (
"bufio"
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/jungju/circle_manager/modules"
)
var beegoAppProcess *os.Process
func genBeegoAppResource() error {
fmt.Println("Starting app of beego")
if err := beegoBuild(); err != nil {
return err
}
fmt.Println("Started app of beego")
commentRouterExistChan := make(chan bool)
go waitCommentRouterFile(commentRouterExistChan)
fmt.Println("Wait for generate of resources")
commentRouterExist := <-commentRouterExistChan
if commentRouterExist {
fmt.Println("Genarated resources! Exit app of beego")
beegoAppProcess.Kill()
return nil
}
return errors.New("비정상 종료")
}
func beegoBuild() error {
fmt.Println("Executing app build")
var err error
beegoAppProcess, err = executer("beego run", "bee", []string{"run", "-runmode=dev", "-gendoc=true", "-e=./"}, false)
return err
}
func dockerBuild(url string) error {
fmt.Println("Executing docker build. docker url: ", url)
_, err := executer("docker build", "docker", []string{"build", "-t", url, "."}, true)
return err
}
func dockerPush(url string) error {
fmt.Println("Executing docker push. docker url: ", url)
_, err := executer("docker push", "docker", []string{"push", url}, true)
return err
}
func executeSys(cmdType string, cmdName string, cmdArgs []string) error {
binary, lookErr := exec.LookPath(envs.AppDir)
if lookErr != nil {
panic(lookErr)
}
env := os.Environ()
execErr := syscall.Exec(binary, cmdArgs, env)
if execErr != nil {
panic(execErr)
}
return nil
}
func executer(cmdType string, cmdName string, cmdArgs []string, cmdWait bool) (*os.Process, error) {
cmd := exec.Command(cmdName, cmdArgs...)
cmd.Dir = envs.AppDir
cmdReader, err := cmd.StdoutPipe()
if err != nil {
fmt.Fprintln(os.Stderr, "Error creating StdoutPipe for Cmd", err)
return nil, err
}
scanner := bufio.NewScanner(cmdReader)
go func() {
for scanner.Scan() {
fmt.Printf("%s out | %s\n", cmdType, scanner.Text())
}
}()
err = cmd.Start()
if err != nil {
fmt.Fprintln(os.Stderr, "Error starting Cmd", err)
out, err := cmd.Output()
fmt.Fprintln(os.Stderr, "Output:", string(out))
return nil, err
}
if cmdWait {
err = cmd.Wait()
if err != nil {
fmt.Fprintln(os.Stderr, "Error waiting for Cmd", err)
return nil, err
}
}
return cmd.Process, nil
}
func waitCommentRouterFile(commentRouterExist chan bool) {
t := time.NewTicker(1 * time.Second)
defer t.Stop()
cnt := 0
limit := 100
for {
select {
case <-t.C:
cnt++
fmt.Println("Finding resource of beego")
if cnt > limit {
fmt.Println("Failed finding resource of beego")
commentRouterExist <- false
return
}
modules.SubDirectoryFiles(filepath.Join(envs.AppDir, "routers"), func(info os.FileInfo) error {
if strings.Index(info.Name(), "commentsRouter") == 0 {
fmt.Println("Suceessed finding resource of beego. Find file name: ", info.Name())
t.Stop()
commentRouterExist <- true
}
return nil
})
}
}
}
|
package decorators
import (
"fmt"
"sort"
"strings"
"github.com/itchyny/gojq"
"github.com/mitchellh/mapstructure"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/reference"
operatorsv1 "github.com/operator-framework/api/pkg/operators/v1"
operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/codec"
)
const (
newOperatorError = "cannot create new Operator: %s"
newComponentError = "cannot create new Component: %s"
componentLabelKeyError = "cannot generate component label key: %s"
// ComponentLabelKeyPrefix is the key prefix used for labels marking operator component resources.
ComponentLabelKeyPrefix = "operators.coreos.com/"
)
var (
csvGVK = operatorsv1alpha1.SchemeGroupVersion.WithKind(operatorsv1alpha1.ClusterServiceVersionKind)
componentConditionsJQ *gojq.Query
csvConditionsJQ *gojq.Query
)
func init() {
var err error
if componentConditionsJQ, err = gojq.Parse(".status.conditions"); err != nil {
panic(fmt.Errorf("failed to parse component conditions jq: %s", err))
}
if csvConditionsJQ, err = gojq.Parse(".status | [{\"type\": .phase, \"status\": \"True\", \"reason\": .reason, \"message\": .message, \"lastUpdateTime\": .lastUpdateTime,\"lastTransitionTime\": .lastTransitionTime}]"); err != nil {
panic(fmt.Errorf("failed to parse csv conditions jq: %s", err))
}
}
// OperatorNames returns a list of operator names extracted from the given labels.
func OperatorNames(labels map[string]string) (names []types.NamespacedName) {
for key := range labels {
if !strings.HasPrefix(key, ComponentLabelKeyPrefix) {
continue
}
names = append(names, types.NamespacedName{
Name: strings.TrimPrefix(key, ComponentLabelKeyPrefix),
})
}
return
}
type OperatorFactory interface {
// NewOperator returns an Operator decorator that wraps the given external Operator representation.
// An error is returned if the decorator cannon be instantiated.
NewOperator(external *operatorsv1.Operator) (*Operator, error)
// NewPackageOperator returns an Operator decorator for a package and install namespace.
NewPackageOperator(pkg, namespace string) (*Operator, error)
}
// schemedOperatorFactory is an OperatorFactory that instantiates Operator decorators with a shared scheme.
type schemedOperatorFactory struct {
scheme *runtime.Scheme
}
func (s *schemedOperatorFactory) NewOperator(external *operatorsv1.Operator) (*Operator, error) {
if external == nil {
return nil, fmt.Errorf(newOperatorError, "cannot create operator with nil external type")
}
return &Operator{
Operator: external.DeepCopy(),
scheme: s.scheme,
}, nil
}
// NewPackageOperator returns an Operator decorator for a package and install namespace.
// The decorator's name is in the form "<package>.<namespace>" and is truncated to a maximum length of 63 characters to abide by the label key character set and conventions (see https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set).
func (s *schemedOperatorFactory) NewPackageOperator(pkg, namespace string) (*Operator, error) {
var name string
if namespace == corev1.NamespaceAll {
// No additional namespace qualifier to add
name = pkg
} else {
name = fmt.Sprintf("%s.%s", pkg, namespace)
}
o := &operatorsv1.Operator{}
o.SetName(name)
return s.NewOperator(o)
}
// NewSchemedOperatorFactory returns an OperatorFactory that supplies a scheme to all Operators it creates.
func NewSchemedOperatorFactory(scheme *runtime.Scheme) (OperatorFactory, error) {
if scheme == nil {
return nil, fmt.Errorf("cannot create factory with nil scheme")
}
return &schemedOperatorFactory{
scheme: scheme,
}, nil
}
// Operator decorates an external Operator and provides convenience methods for managing it.
type Operator struct {
*operatorsv1.Operator
scheme *runtime.Scheme
componentLabelKey string
}
// ComponentLabelKey returns the operator's completed component label key.
func (o *Operator) ComponentLabelKey() (string, error) {
if o.componentLabelKey != "" {
return o.componentLabelKey, nil
}
name := o.GetName()
if name == "" {
return "", fmt.Errorf(componentLabelKeyError, "empty name field")
}
if len(name) > 63 {
// Truncate
name = name[0:63]
// Remove trailing illegal characters
idx := len(name) - 1
for ; idx >= 0; idx-- {
lastChar := name[idx]
if lastChar != '.' && lastChar != '_' && lastChar != '-' {
break
}
}
// Just being defensive. This is unlikely to happen since the operator name should
// be compatible kubernetes naming constraints
if idx < 0 {
return "", fmt.Errorf(componentLabelKeyError, "unsupported name field")
}
// Update Label
name = name[0 : idx+1]
}
o.componentLabelKey = ComponentLabelKeyPrefix + name
return o.componentLabelKey, nil
}
// ComponentLabelSelector returns a LabelSelector that matches this operator's component label.
func (o *Operator) ComponentLabelSelector() (*metav1.LabelSelector, error) {
key, err := o.ComponentLabelKey()
if err != nil {
return nil, err
}
labelSelector := &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: key,
Operator: metav1.LabelSelectorOpExists,
},
},
}
return labelSelector, nil
}
// NonComponentLabelSelector returns a LabelSelector that matches resources that do not have this operator's component label.
func (o *Operator) NonComponentLabelSelector() (*metav1.LabelSelector, error) {
key, err := o.ComponentLabelKey()
if err != nil {
return nil, err
}
labelSelector := &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: key,
Operator: metav1.LabelSelectorOpDoesNotExist,
},
},
}
return labelSelector, nil
}
// ComponentSelector returns a Selector that matches this operator's component label.
func (o *Operator) ComponentSelector() (labels.Selector, error) {
labelSelector, err := o.ComponentLabelSelector()
if err != nil {
return nil, err
}
return metav1.LabelSelectorAsSelector(labelSelector)
}
// NonComponentSelector returns a Selector that matches resources that do not have this operator's component label.
func (o *Operator) NonComponentSelector() (labels.Selector, error) {
labelSelector, err := o.NonComponentLabelSelector()
if err != nil {
return nil, err
}
return metav1.LabelSelectorAsSelector(labelSelector)
}
// ResetComponents resets the component selector and references in the operator's status.
func (o *Operator) ResetComponents() error {
labelSelector, err := o.ComponentLabelSelector()
if err != nil {
return err
}
o.Status.Components = &operatorsv1.Components{
LabelSelector: labelSelector,
}
return nil
}
// AdoptComponent adds the operator's component label to the given component, returning true if the
// component label was added and false if it already existed.
func (o *Operator) AdoptComponent(component runtime.Object) (adopted bool, err error) {
var labelKey string
if labelKey, err = o.ComponentLabelKey(); err != nil {
return
}
var m metav1.Object
if m, err = meta.Accessor(component); err != nil {
return
}
labels := m.GetLabels()
if labels == nil {
labels = map[string]string{}
m.SetLabels(labels)
}
if _, ok := labels[labelKey]; !ok {
labels[labelKey] = ""
adopted = true
}
return
}
// DisownComponent removes the operator's component label from the given component, returning true if the
// component label was removed and false if it wasn't present.
func (o *Operator) DisownComponent(component runtime.Object) (disowned bool, err error) {
var labelKey string
if labelKey, err = o.ComponentLabelKey(); err != nil {
return
}
var m metav1.Object
if m, err = meta.Accessor(component); err != nil {
return
}
labels := m.GetLabels()
if len(labels) < 1 {
// Not a component
return
}
if _, ok := labels[labelKey]; ok {
disowned = true
}
delete(labels, labelKey)
m.SetLabels(labels)
return
}
// AddComponents adds the given components to the operator's status and returns an error
// if a component isn't associated with the operator by label.
// List type arguments are flattened to their nested elements before being added.
func (o *Operator) AddComponents(components ...runtime.Object) error {
selector, err := o.ComponentSelector()
if err != nil {
return err
}
var refs []operatorsv1.RichReference
for _, obj := range components {
// Unpack nested components
if nested, err := meta.ExtractList(obj); err == nil {
if err = o.AddComponents(nested...); err != nil {
return err
}
continue
}
component, err := NewComponent(obj, o.scheme)
if err != nil {
return err
}
if matches, err := component.Matches(selector); err != nil {
return err
} else if !matches {
return fmt.Errorf("cannot add component %s/%s/%s to Operator %s: component labels not selected by %s", component.GetKind(), component.GetNamespace(), component.GetName(), o.GetName(), selector.String())
}
ref, err := component.Reference()
if err != nil {
return err
}
refs = append(refs, *ref)
}
if o.Status.Components == nil {
if err := o.ResetComponents(); err != nil {
return err
}
}
o.Status.Components.Refs = append(o.Status.Components.Refs, refs...)
// Sort the component refs to so subsequent reconciles of the object do not change
// the status and result in an update to the object.
sort.SliceStable(o.Status.Components.Refs, func(i, j int) bool {
if o.Status.Components.Refs[i].Kind != o.Status.Components.Refs[j].Kind {
return o.Status.Components.Refs[i].Kind < o.Status.Components.Refs[j].Kind
}
if o.Status.Components.Refs[i].APIVersion != o.Status.Components.Refs[j].APIVersion {
return o.Status.Components.Refs[i].APIVersion < o.Status.Components.Refs[j].APIVersion
}
if o.Status.Components.Refs[i].Namespace != o.Status.Components.Refs[j].Namespace {
return o.Status.Components.Refs[i].Namespace < o.Status.Components.Refs[j].Namespace
}
return o.Status.Components.Refs[i].Name < o.Status.Components.Refs[j].Name
})
return nil
}
// SetComponents sets the component references in the operator's status to the given components.
func (o *Operator) SetComponents(components ...runtime.Object) error {
if err := o.ResetComponents(); err != nil {
return err
}
return o.AddComponents(components...)
}
type Component struct {
*unstructured.Unstructured
scheme *runtime.Scheme
}
// NewComponent returns a new Component instance.
func NewComponent(component runtime.Object, scheme *runtime.Scheme) (*Component, error) {
if component == nil {
return nil, fmt.Errorf(newComponentError, "nil component")
}
if scheme == nil {
return nil, fmt.Errorf(newComponentError, "nil scheme")
}
u := &unstructured.Unstructured{}
if err := scheme.Convert(component, u, nil); err != nil {
return nil, err
}
// GVK may have been lost from PartialObjectMetadata during conversion.
if gvk := component.GetObjectKind().GroupVersionKind(); gvk != schema.EmptyObjectKind.GroupVersionKind() {
u.SetGroupVersionKind(gvk)
}
c := &Component{
Unstructured: u,
scheme: scheme,
}
return c, nil
}
func (c *Component) Matches(selector labels.Selector) (matches bool, err error) {
m, err := meta.Accessor(c)
if err != nil {
return
}
matches = selector.Matches(labels.Set(m.GetLabels()))
return
}
func (c *Component) Reference() (ref *operatorsv1.RichReference, err error) {
truncated, err := c.truncatedReference()
if err != nil {
return
}
ref = &operatorsv1.RichReference{
ObjectReference: truncated,
}
query := componentConditionsJQ
switch c.GroupVersionKind() {
case csvGVK:
query = csvConditionsJQ
}
iter := query.Run(c.UnstructuredContent())
var out interface{}
for {
v, ok := iter.Next()
if !ok {
break
}
if err, ok = v.(error); ok {
return
}
out = v
}
var decoder *mapstructure.Decoder
decoder, err = mapstructure.NewDecoder(&mapstructure.DecoderConfig{
Metadata: nil,
DecodeHook: codec.MetaTimeHookFunc(),
Result: &ref.Conditions,
})
if err != nil {
return
}
err = decoder.Decode(out)
return
}
func (c *Component) truncatedReference() (ref *corev1.ObjectReference, err error) {
ref, err = reference.GetReference(c.scheme, c.Unstructured)
if err != nil {
return
}
ref = &corev1.ObjectReference{
Kind: ref.Kind,
APIVersion: ref.APIVersion,
Namespace: ref.Namespace,
Name: ref.Name,
}
return
}
|
// Package main -
package main
import (
"bytes"
"fmt"
"sync"
"sync/atomic"
"time"
)
func main() {
fmt.Println("vim-go")
cadence := sync.NewCond(&sync.Mutex{})
go func() {
for range time.Tick(1 * time.Millisecond) {
cadence.Broadcast()
}
}()
takeStep := func() {
cadence.L.Lock()
cadence.Wait()
cadence.L.Unlock()
}
// tryDir allows a person to try and move in a direction - returning success
// or failure. Each direction is represented as a count of the number of
// people trying to move in that direction.
tryDir := func(dirName string, dir *int32, out *bytes.Buffer) bool {
fmt.Fprintf(out, " %v", dirName)
// Declare our intention to move in direction
atomic.AddInt32(dir, 1)
// simulate constant cadence amongst all parties
takeStep()
if atomic.LoadInt32(dir) == 1 {
fmt.Fprintf(out, ". Success!")
return true
}
takeStep()
// Step in direction was a failure, so decrement direction
atomic.AddInt32(dir, -1)
return false
}
var left, right int32
tryLeft := func(out *bytes.Buffer) bool { return tryDir("left", &left, out) }
tryRight := func(out *bytes.Buffer) bool { return tryDir("right", &right, out) }
walk := func(walking *sync.WaitGroup, name string) {
var out bytes.Buffer
defer func() {
fmt.Println(out.String())
}()
defer walking.Done()
fmt.Fprintf(&out, "%v is trying to scoot:", name)
for i := 1; i < 5; i++ { // artificial limit so the program isn't stuck forever
if tryLeft(&out) || tryRight(&out) { // Attempt left, then right, and if they both fail "kill"
fmt.Println("Killing!")
return
}
}
fmt.Fprintf(&out, "\n%v tosses her hands up in exasperation!", name)
}
var peopleInHallway sync.WaitGroup
peopleInHallway.Add(2)
go walk(&peopleInHallway, "Alice")
go walk(&peopleInHallway, "Barbara")
peopleInHallway.Wait()
}
|
package concurrent
import (
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestForeach(t *testing.T) {
slice := []int{1, 2, 3, 4, 5}
require.NoError(t, Foreach(slice, func(i int, v interface{}) error {
t.Logf("slice[%d]=%v start", i, v)
time.Sleep(20 * time.Millisecond)
t.Logf("slice[%d]=%v stop", i, v)
return nil
}))
}
|
// Copyright 2020 MongoDB Inc
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package store
import (
"strings"
"testing"
"go.mongodb.org/ops-manager/opsmngr"
)
func TestStore_apiPath(t *testing.T) {
t.Run("ops manager", func(t *testing.T) {
s := &Store{
service: "ops-manager",
}
result := s.apiPath("localhost")
if !strings.Contains(result, opsmngr.APIPublicV1Path) {
t.Errorf("apiPath() = %s; want '%s'", result, opsmngr.APIPublicV1Path)
}
})
t.Run("atlas", func(t *testing.T) {
s := &Store{
service: "cloud",
}
result := s.apiPath("localhost")
if !strings.Contains(result, atlasAPIPath) {
t.Errorf("apiPath() = %s; want '%s'", result, atlasAPIPath)
}
})
}
|
package main
import (
"log"
"net"
pb "../pb"
"../service"
"google.golang.org/grpc"
)
func main() {
listenPort, err := net.Listen("tcp", ":8888")
if err != nil {
log.Fatalln(err)
}
server := grpc.NewServer()
Service := &service.MyService{}
// 実行したい実処理をseverに登録する
pb.RegisterMyServiceServer(server, Service)
server.Serve(listenPort)
}
|
package main
import (
"testing"
"time"
"github.com/abiosoft/semaphore"
"github.com/eclipse/paho.mqtt.golang"
)
type FakeMQTTMessage struct {
duplicate bool
qos byte
retained bool
topic string
messageID uint16
payload []byte
}
func (m FakeMQTTMessage) Duplicate() bool {
return m.duplicate
}
func (m FakeMQTTMessage) Qos() byte {
return m.qos
}
func (m FakeMQTTMessage) Retained() bool {
return m.retained
}
func (m FakeMQTTMessage) Topic() string {
return m.topic
}
func (m FakeMQTTMessage) MessageID() uint16 {
return m.messageID
}
func (m FakeMQTTMessage) Payload() []byte {
return m.payload
}
type FakeMQTTToken struct {
}
func (t FakeMQTTToken) Error() error {
return nil
}
func (t FakeMQTTToken) Wait() bool {
return true
}
func (t FakeMQTTToken) WaitTimeout(d time.Duration) bool {
return true
}
type FakeMQTT struct {
t *testing.T
Connected bool
connectedSemaphore *semaphore.Semaphore
}
func NewMQTTMockedClient(t *testing.T) mqtt.Client {
c := &FakeMQTT{t: t}
c.Connected = false
c.connectedSemaphore = semaphore.New(1)
return c
}
func (q *FakeMQTT) IsConnected() bool {
q.connectedSemaphore.Acquire()
ret := q.Connected
q.connectedSemaphore.Release()
return ret
}
func (q *FakeMQTT) Publish(topic string, qos byte, retain bool, payload interface{}) mqtt.Token {
q.t.Logf("PUB: %s [qos:%d,retain:%t] - %s", topic, qos, retain, payload.(string))
return FakeMQTTToken{}
}
func (q *FakeMQTT) Subscribe(topic string, qos byte, handler mqtt.MessageHandler) mqtt.Token {
return FakeMQTTToken{}
}
func (q *FakeMQTT) Unsubscribe(topic ...string) mqtt.Token {
return FakeMQTTToken{}
}
func (q *FakeMQTT) SubscribeMultiple(topics map[string]byte, handler mqtt.MessageHandler) mqtt.Token {
return FakeMQTTToken{}
}
func (q *FakeMQTT) AddRoute(topic string, handler mqtt.MessageHandler) {
}
func (q *FakeMQTT) Connect() mqtt.Token {
q.connectedSemaphore.Acquire()
q.Connected = true
q.connectedSemaphore.Release()
return FakeMQTTToken{}
}
func (q *FakeMQTT) Disconnect(r uint) {
}
func (q *FakeMQTT) OptionsReader() mqtt.ClientOptionsReader {
return mqtt.ClientOptionsReader{}
}
|
package main
import "encoding/json"
import "log"
import "net/http"
import "github.com/gorilla/mux"
//init
var pets []Pet
//Struct for Pet
type Pet struct {
Name string `json:"name"`
Animal string `json:"animal"`
Weight float64 `json:"weight"`
Age int `json:"age"`
Owner *Owner `json:"owner"`
}
//Struct for OwnerPayment
type Owner struct {
Firstname string `json:"firstname"`
Surname string `json:"surname"`
OwnerPayment string `json:"-"`
}
// Get every pets
func getPets(w http.ResponseWriter, r *http.Request){
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(pets)
}
//Get a pet
func getPet(w http.ResponseWriter, r *http.Request){
w.Header().Set("Content-Type", "application/json")
params := mux.Vars(r) //To get parameters
//Looping through every item to see if the item is equal to the params url
for _, item := range pets {
if item.Name ==params["name"] {
json.NewEncoder(w).Encode(item)
return
}
}
json.NewEncoder(w).Encode(&Pet{})
}
// Add a pet
func createPet(w http.ResponseWriter, r *http.Request){
w.Header().Set("Content-Type", "application/json")
var Apet Pet
_ = json.NewDecoder(r.Body).Decode(&Apet)
//Apet.Name = rand.Animal //Generate a random ID - not safe - would'nt use in production-it could generate the same name.
pets = append(pets, Apet)
json.NewEncoder(w).Encode(Apet)
}
//Update Pet
func updatePet(w http.ResponseWriter, r *http.Request){
w.Header().Set("Content-Type", "application/json")
params := mux.Vars(r)
for index, item := range pets {
if item.Name == params["name"] {
pets = append(pets[:index], pets[index+1:]...)
var Apet Pet
_ = json.NewDecoder(r.Body).Decode(&Apet)
//if you was to have a random ID you want add here "pet.ID = params["id"]"
pets = append(pets, Apet)
json.NewEncoder(w).Encode(Apet)
return
}
}
json.NewEncoder(w).Encode(pets)
}
//Remove a pet
func deletePet(w http.ResponseWriter, r *http.Request){
w.Header().Set("Content-Type", "application/json")
params := mux.Vars(r)
for index, item := range pets {
if item.Name == params["name"] {
pets = append(pets[:index], pets[index+1:]...) // This is like a slice in JS - so if you were to loop through an array and slice it up
break
}
}
json.NewEncoder(w).Encode(pets) //repond with pets - delete book and then give you a response of all the books
}
func main(){
// init router
r := mux.NewRouter()
//Mock info - for DB
pets = append(pets, Pet{Name: "Burno", Animal: "puppy", Weight: 42, Age: 1, Owner: &Owner{Firstname: "Charlie", Surname: "Bucket", OwnerPayment: "Family"}})
pets = append(pets, Pet{Name: "Sammy", Animal: "Dog", Weight: 50, Age: 4, Owner: &Owner{Firstname: "Jiminy", Surname: "Cricket", OwnerPayment: "Let your conscience be your guide"}})
// Route Handler and EP
r.HandleFunc("/api/pets", getPets).Methods("GET")
r.HandleFunc("/api/pets/{name}", getPet).Methods("GET")
r.HandleFunc("/api/pets", createPet).Methods("POST")
r.HandleFunc("/api/pets/{name}", updatePet).Methods("PUT")
r.HandleFunc("/api/pets/{name}", deletePet).Methods("DELETE")
log.Fatal(http.ListenAndServe(":8000", r))
}
|
package main
import (
"database/sql"
"fmt"
_ "github.com/go-sql-driver/mysql"
"os"
"os/exec"
"strings"
"time"
)
func main() {
if len(os.Args) < 3 {
fmt.Println("Usage: killmyslave user pass [port]")
return
} else {
user := os.Args[1]
pass := os.Args[2]
port := "3306"
if len(os.Args) >= 4 {
port = os.Args[3]
}
start(user, pass, port)
}
}
func start(user string, pass string, port string) {
for {
repFailed := false
func() {
db, err := sql.Open("mysql", fmt.Sprint(user, ":", pass, "@tcp(127.0.0.1:", port, ")/information_schema"))
if err != nil {
fmt.Println(err)
}
defer db.Close()
rows, err := db.Query("SHOW SLAVE STATUS")
if err != nil {
fmt.Println(err)
}
defer rows.Close()
cols, err := rows.Columns()
if err != nil {
fmt.Println(err)
}
buff := make([]interface{}, len(cols))
data := make([]string, len(cols))
for i, _ := range buff {
buff[i] = &data[i]
}
for rows.Next() {
rows.Scan(buff...)
}
for k, col := range data {
key := cols[k]
if (key == "Slave_IO_Running" || key == "Slave_SQL_Running") && strings.ToLower(col) != "yes" {
repFailed = true
}
}
}()
if repFailed {
out, err := exec.Command("service", "mysql", "stop").Output()
fmt.Println(string(out), err)
break
}
time.Sleep(time.Second * 5)
}
}
|
package confobject
import (
"github.com/smartystreets/assertions/should"
)
type Assertion func(actual interface{}, expectedList ...interface{}) string
func Assert(actual interface{}, assert Assertion, expected ...interface{}) (bool, string) {
if result := so(actual, assert, expected...); len(result) == 0 {
return true, result
} else {
return false, result
}
}
func so(actual interface{}, assert func(interface{}, ...interface{}) string, expected ...interface{}) string {
return assert(actual, expected...)
}
var (
configAssertions map[string]Assertion
)
func init() {
configAssertions = map[string]Assertion{
"Equal": should.Equal,
"NotEqual": should.NotEqual,
"AlmostEqual": should.AlmostEqual,
"NotAlmostEqual": should.NotAlmostEqual,
"BeNil": should.BeNil,
"NotBeNil": should.NotBeNil,
"BeTrue": should.BeTrue,
"BeFalse": should.BeFalse,
"BeZeroValue": should.BeZeroValue,
"BeGreaterThan": should.BeGreaterThan,
"BeGreaterThanOrEqualTo": should.BeGreaterThanOrEqualTo,
"BeLessThan": should.BeLessThan,
"BeLessThanOrEqualTo": should.BeLessThanOrEqualTo,
"Contain": should.Contain,
"NotContain": should.NotContain,
// "ContainKey": should.ContainKey,
// "NotContainKey": should.NotContainKey,
"BeIn": should.BeIn,
"NotBeIn": should.NotBeIn,
"BeEmpty": should.BeEmpty,
"NotBeEmpty": should.NotBeEmpty,
"BeBlank": should.BeBlank,
"NotBeBlank": should.NotBeBlank,
}
}
|
package utils
import (
"reflect"
"testing"
)
func AssertArraysEqual(t *testing.T, expected []int, result []int) {
if len(expected) != len(result) || !reflect.DeepEqual(expected, result) {
fail(t, "Arrays were not equal", expected, result)
}
}
func AssertEqual(t *testing.T, values ...interface{}) {
if (len(values)) != 2 {
t.Error("Only expecting two values.")
return
}
expected := values[0]
result := values[1]
expectedType := reflect.TypeOf(expected)
resultType := reflect.TypeOf(result)
if expectedType.Kind() != resultType.Kind() {
fail(t, "Items under test are different types.", expectedType, resultType)
return
}
if expected != result {
fail(t, "Items were not equal", expected, result)
}
}
func fail(t *testing.T, msg string, expected interface{}, result interface{}) {
t.Error(msg, "\nExp:", expected, "\nAct:", result)
}
|
package main
import "fmt"
// Node 节点
type Node struct {
Val int
Left int
Right int
}
// TreeNode 二叉树
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
// preorderTraversalRecusive 前序遍历递归法
func preorderTraversalRecusive(root *TreeNode) {
if root == nil || root.Val == 0 {
// fmt.Println("该二叉树没有节点")
return
}
fmt.Printf("%v ", root.Val)
preorderTraversalRecusive(root.Left)
preorderTraversalRecusive(root.Right)
}
// preorderTraversal 前序遍历非递归
func preorderTraversal(root *TreeNode) []int {
if root == nil {
return nil
}
result := make([]int, 0)
stack := make([]*TreeNode, 0)
for root != nil || len(stack) != 0 {
for root != nil {
// 先保存结果
result = append(result, root.Val)
stack = append(stack, root)
root = root.Left
}
node := stack[len(stack)-1]
// pop
stack = stack[:len(stack)-1]
root = node.Right
}
return result
}
// inorderTraversal 中序遍历非递归
func inorderTraversal(root *TreeNode) []int {
if root == nil {
return nil
}
result := make([]int, 0)
stack := make([]*TreeNode, 0)
for root != nil || len(stack) > 0 {
for root != nil {
// 一直向左添加
stack = append(stack, root)
root = root.Left
}
val := stack[len(stack)-1]
result = append(result, val.Val)
// pop
stack = stack[:len(stack)-1]
root = val.Right
}
return result
}
// postorderTraversal 非递归后续遍历
func postorderTraversal(root *TreeNode) []int {
// 通过lastVisit标识右子节点是否已经弹出
if root == nil {
return nil
}
result := make([]int, 0)
stack := make([]*TreeNode, 0)
var lastVisit *TreeNode
for root != nil || len(stack) > 0 {
for root != nil {
stack = append(stack, root)
root = root.Left
}
// 先看看,不弹出
node := stack[len(stack)-1]
if node.Right == nil || node.Right == lastVisit {
stack = stack[:len(stack)-1] // pop
result = append(result, node.Val)
// 标记当前节点已经弹出过了
lastVisit = node
} else {
root = node.Right
}
}
return result
}
// DFSPreorderTraversal 深度优先 从上到下
func DFSPreorderTraversal(root *TreeNode) []int {
result := make([]int, 0)
dfs(root, &result)
return result
}
func dfs(root *TreeNode, result *[]int) {
if root == nil {
return
}
*result = append(*result, root.Val)
dfs(root.Left, result)
dfs(root.Right, result)
}
// BFSPreorderTraversal 广度优先
func BFSPreorderTraversal(root *TreeNode) [][]int {
result := make([][]int, 0)
if root == nil {
return result
}
queue := make([]*TreeNode, 0)
queue = append(queue, root)
for len(queue) > 0 {
valList := make([]int, 0)
// 记录添加子节点之前的长度,先遍历当前层的所有元素,再添加下一层
lenQueue := len(queue)
for i := 0; i < lenQueue; i++ {
level := queue[0]
// 出队
queue = queue[1:]
valList = append(valList, level.Val)
if level.Left != nil {
queue = append(queue, level.Left)
}
if level.Right != nil {
queue = append(queue, level.Right)
}
}
result = append(result, valList)
}
return result
}
func main() {
// 递归前序遍历
t1 := TreeNode{10, &TreeNode{5, &TreeNode{2, nil, nil}, nil}, &TreeNode{20, nil, nil}}
fmt.Printf("递归前序遍历:")
preorderTraversalRecusive(&t1)
fmt.Printf("\n*************************\n")
// 非递归前序遍历
t2 := TreeNode{10, &TreeNode{5, &TreeNode{2, nil, nil}, nil}, &TreeNode{20, nil, nil}}
fmt.Println("非递归前序遍历:", preorderTraversal(&t2))
// 非递归中序遍历
fmt.Println("非递归中序遍历:", inorderTraversal(&t2))
// 非递归后序遍历
fmt.Println("非递归后序遍历:", postorderTraversal(&t2))
// 深度优先遍历
fmt.Println("深度优先遍历:", DFSPreorderTraversal(&t2))
// 广度优先遍历
fmt.Println("广度优先遍历:", BFSPreorderTraversal(&t2))
}
|
package main
import (
"database/sql"
"fmt"
"net"
"os"
"os/signal"
"syscall"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
kitoc "github.com/go-kit/kit/tracing/opencensus"
kitgrpc "github.com/go-kit/kit/transport/grpc"
"github.com/oklog/oklog/pkg/group"
"google.golang.org/grpc"
"github.com/shijuvar/gokit-examples/services/account"
"github.com/shijuvar/gokit-examples/services/account/cockroachdb"
accountsvc "github.com/shijuvar/gokit-examples/services/account/implementation"
"github.com/shijuvar/gokit-examples/services/account/transport"
grpctransport "github.com/shijuvar/gokit-examples/services/account/transport/grpc"
"github.com/shijuvar/gokit-examples/services/account/transport/pb"
)
const (
port = ":50051"
dbsource = "postgresql://shijuvar@localhost:26257/ordersdb?sslmode=disable"
)
func main() {
// initialize our structured logger for the service
var logger log.Logger
{
logger = log.NewLogfmtLogger(os.Stderr)
logger = log.NewSyncLogger(logger)
logger = level.NewFilter(logger, level.AllowDebug())
logger = log.With(logger,
"svc", "account",
"ts", log.DefaultTimestampUTC,
"clr", log.DefaultCaller,
)
}
level.Info(logger).Log("msg", "service started")
defer level.Info(logger).Log("msg", "service ended")
//ctx, cancel := context.WithCancel(context.Background())
//defer cancel()
var db *sql.DB
{
var err error
// Connect to the "ordersdb" database
db, err = sql.Open("postgres", dbsource)
if err != nil {
level.Error(logger).Log("exit", err)
os.Exit(-1)
}
}
// Create Account Service
var svc account.Service
{
repository, err := cockroachdb.New(db, logger)
if err != nil {
level.Error(logger).Log("exit", err)
os.Exit(-1)
}
svc = accountsvc.NewService(repository, logger)
}
var endpoints transport.Endpoints
{
endpoints = transport.MakeEndpoints(svc)
}
// set-up grpc transport
var (
ocTracing = kitoc.GRPCServerTrace()
serverOptions = []kitgrpc.ServerOption{ocTracing}
accountService = grpctransport.NewGRPCServer(endpoints, serverOptions, logger)
grpcListener, _ = net.Listen("tcp", port)
grpcServer = grpc.NewServer()
)
var g group.Group
{
/*
Add an actor (function) to the group.
Each actor must be pre-emptable by an interrupt function.
That is, if interrupt is invoked, execute should return.
Also, it must be safe to call interrupt even after execute has returned.
The first actor (function) to return interrupts all running actors.
The error is passed to the interrupt functions, and is returned by Run.
*/
g.Add(func() error {
logger.Log("transport", "gRPC", "addr", port)
pb.RegisterAccountServer(grpcServer, accountService)
return grpcServer.Serve(grpcListener)
}, func(error) {
grpcListener.Close()
})
}
{
cancelInterrupt := make(chan struct{})
g.Add(func() error {
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)
select {
case sig := <-c:
return fmt.Errorf("received signal %s", sig)
case <-cancelInterrupt:
return nil
}
}, func(error) {
close(cancelInterrupt)
})
}
/*
Run all actors (functions) concurrently. When the first actor returns,
all others are interrupted. Run only returns when all actors have exited.
Run returns the error returned by the first exiting actor
*/
level.Error(logger).Log("exit", g.Run())
}
|
package dao
import (
"fmt"
"github.com/xormplus/xorm"
"go.uber.org/zap"
"mix/test/codes"
entity "mix/test/entity/core/transaction"
mapper "mix/test/mapper/core/transaction"
"mix/test/utils/status"
)
func (p *Dao) CreateBalance(logger *zap.Logger, session *xorm.Session, item *entity.Balance) (id int64, err error) {
res, err := mapper.CreateBalance(session, item)
if err != nil {
logger.Error("Call mapper.CreateBalance error", zap.Error(err))
return
}
id, err = res.LastInsertId()
if err != nil {
logger.Error("Get id error", zap.Error(err))
return
}
return
}
func (p *Dao) GetBalance(logger *zap.Logger, session *xorm.Session, id int64) (item *entity.Balance, err error) {
item, err = mapper.GetBalance(session, id)
if err != nil {
logger.Error("Call mapper.GetBalance error", zap.Error(err))
return
}
return
}
func (p *Dao) MustGetBalance(logger *zap.Logger, session *xorm.Session, id int64) (item *entity.Balance, err error) {
item, err = p.GetBalance(logger, session, id)
if err != nil {
return
}
if item == nil {
err = status.Code(codes.BalanceNotFound)
logger.Error(
"Get balance error",
zap.Error(err),
zap.Int64("id", id),
)
return
}
return
}
func (p *Dao) GetBalanceList(logger *zap.Logger, session *xorm.Session) (items []*entity.Balance, err error) {
items, err = mapper.GetBalanceList(session)
if err != nil {
logger.Error("Call mapper.GetBalanceList error", zap.Error(err))
return
}
return
}
func (p *Dao) RemoveBalance(logger *zap.Logger, session *xorm.Session, id int64) (err error) {
_, err = mapper.RemoveBalance(session, id)
if err != nil {
logger.Error("Call mapper.RemoveBalance error", zap.Error(err))
return
}
return
}
func (p *Dao) MustRemoveBalance(logger *zap.Logger, session *xorm.Session, id int64) (err error) {
res, err := mapper.RemoveBalance(session, id)
if err != nil {
logger.Error("Call mapper.RemoveBalance error", zap.Error(err))
return
}
affected, err := res.RowsAffected()
if err != nil {
logger.Error("Get affected error", zap.Error(err))
return
}
if affected != 1 {
err = fmt.Errorf("update affected error")
logger.Error("Call mapper.RemoveBalance error",
zap.Int64("affected", affected),
zap.Int64("id",
id),
zap.Error(err))
return
}
return
}
func (p *Dao) UpdateBalance(logger *zap.Logger, engine *xorm.EngineGroup, session *xorm.Session, item *entity.Balance) (err error) {
_, err = mapper.UpdateBalance(engine, session, item)
if err != nil {
logger.Error("Call mapper.UpdateBalance error", zap.Error(err))
return
}
return
}
func (p *Dao) MustUpdateBalance(logger *zap.Logger, engine *xorm.EngineGroup, session *xorm.Session, item *entity.Balance) (err error) {
res, err := mapper.UpdateBalance(engine, session, item)
if err != nil {
logger.Error("Call mapper.UpdateBalance error", zap.Error(err))
return
}
affected, err := res.RowsAffected()
if err != nil {
logger.Error("Get affected error", zap.Error(err))
return
}
if affected != 1 {
err = fmt.Errorf("update affected error")
logger.Error("Call mapper.UpdateBalance error",
zap.Int64("affected", affected),
zap.Int64("item.Id", item.Id),
zap.Error(err))
return
}
return
}
func (p *Dao) GetBalanceByAddressId(logger *zap.Logger, session *xorm.Session, addressId int64) (item *entity.Balance, err error) {
item, err = mapper.GetBalanceByAddressId(session, addressId)
if err != nil {
logger.Error("Call mapper.GetBalanceByAddressId error", zap.Error(err))
return
}
return
}
func (p *Dao) MustGetBalanceByAddressId(logger *zap.Logger, session *xorm.Session, addressId int64) (item *entity.Balance, err error) {
item, err = p.GetBalanceByAddressId(logger, session, addressId)
if err != nil {
return
}
if item == nil {
err = status.Code(codes.BalanceNotFound)
logger.Error(
"Get balance error",
zap.Error(err),
zap.Int64("addressId", addressId),
)
return
}
return
}
func (p *Dao) UpdateBalanceByAddressId(logger *zap.Logger, engine *xorm.EngineGroup, session *xorm.Session, item *entity.Balance) (err error) {
_, err = mapper.UpdateBalanceByAddressId(engine, session, item)
if err != nil {
logger.Error("Call mapper.UpdateBalanceByAddressId error", zap.Error(err))
return
}
return
}
func (p *Dao) MustUpdateBalanceByAddressId(logger *zap.Logger, engine *xorm.EngineGroup, session *xorm.Session, item *entity.Balance) (err error) {
res, err := mapper.UpdateBalanceByAddressId(engine, session, item)
if err != nil {
logger.Error("Call mapper.UpdateBalanceByAddressId error", zap.Error(err))
return
}
affected, err := res.RowsAffected()
if err != nil {
logger.Error("Get affected error", zap.Error(err))
return
}
if affected != 1 {
err = fmt.Errorf("update affected error")
logger.Error("Call mapper.UpdateBalanceByAddressId error",
zap.Int64("affected", affected),
zap.Int64("item.AddressId", item.AddressId),
zap.Error(err))
return
}
return
}
func (p *Dao) RemoveBalanceByAddressId(logger *zap.Logger, session *xorm.Session, addressId int64) (err error) {
_, err = mapper.RemoveBalanceByAddressId(session, addressId)
if err != nil {
logger.Error("Call mapper.RemoveBalanceByAddressId error", zap.Error(err))
return
}
return
}
func (p *Dao) MustRemoveBalanceByAddressId(logger *zap.Logger, session *xorm.Session, addressId int64) (err error) {
res, err := mapper.RemoveBalanceByAddressId(session, addressId)
if err != nil {
logger.Error("Call mapper.RemoveBalanceByAddressId error", zap.Error(err))
return
}
affected, err := res.RowsAffected()
if err != nil {
logger.Error("Get affected error", zap.Error(err))
return
}
if affected != 1 {
err = fmt.Errorf("update affected error")
logger.Error("Call mapper.RemoveBalanceByAddressId error",
zap.Int64("affected", affected),
zap.Int64("addressId",
addressId),
zap.Error(err))
return
}
return
}
|
package totp
import (
"encoding/base32"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/authelia/authelia/v4/internal/configuration/schema"
)
func TestTOTPGenerateCustom(t *testing.T) {
testCases := []struct {
desc string
username, algorithm, secret string
digits, period, secretSize uint
err string
}{
{
desc: "ShouldGenerateSHA1",
username: "john",
algorithm: "SHA1",
digits: 6,
period: 30,
secretSize: 32,
},
{
desc: "ShouldGenerateLongSecret",
username: "john",
algorithm: "SHA1",
digits: 6,
period: 30,
secretSize: 42,
},
{
desc: "ShouldGenerateSHA256",
username: "john",
algorithm: "SHA256",
digits: 6,
period: 30,
secretSize: 32,
},
{
desc: "ShouldGenerateSHA512",
username: "john",
algorithm: "SHA512",
digits: 6,
period: 30,
secretSize: 32,
},
{
desc: "ShouldGenerateWithSecret",
username: "john",
algorithm: "SHA512",
secret: "ONTGOYLTMZQXGZDBONSGC43EMFZWMZ3BONTWMYLTMRQXGZBSGMYTEMZRMFYXGZDBONSA",
digits: 6,
period: 30,
secretSize: 32,
},
{
desc: "ShouldGenerateWithBadSecretB32Data",
username: "john",
algorithm: "SHA512",
secret: "@#UNH$IK!J@N#EIKJ@U!NIJKUN@#WIK",
digits: 6,
period: 30,
secretSize: 32,
err: "totp generate failed: error decoding base32 string: illegal base32 data at input byte 0",
},
{
desc: "ShouldGenerateWithBadSecretLength",
username: "john",
algorithm: "SHA512",
secret: "ONTGOYLTMZQXGZD",
digits: 6,
period: 30,
secretSize: 0,
},
}
totp := NewTimeBasedProvider(schema.TOTP{
Issuer: "Authelia",
Algorithm: "SHA1",
Digits: 6,
Period: 30,
SecretSize: 32,
})
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
c, err := totp.GenerateCustom(tc.username, tc.algorithm, tc.secret, tc.digits, tc.period, tc.secretSize)
if tc.err == "" {
assert.NoError(t, err)
require.NotNil(t, c)
assert.Equal(t, tc.period, c.Period)
assert.Equal(t, tc.digits, c.Digits)
assert.Equal(t, tc.algorithm, c.Algorithm)
expectedSecretLen := int(tc.secretSize)
if tc.secret != "" {
expectedSecretLen = base32.StdEncoding.WithPadding(base32.NoPadding).DecodedLen(len(tc.secret))
}
secret := make([]byte, expectedSecretLen)
n, err := base32.StdEncoding.WithPadding(base32.NoPadding).Decode(secret, c.Secret)
assert.NoError(t, err)
assert.Len(t, secret, expectedSecretLen)
assert.Equal(t, expectedSecretLen, n)
} else {
assert.Nil(t, c)
assert.EqualError(t, err, tc.err)
}
})
}
}
func TestTOTPGenerate(t *testing.T) {
skew := uint(2)
totp := NewTimeBasedProvider(schema.TOTP{
Issuer: "Authelia",
Algorithm: "SHA256",
Digits: 8,
Period: 60,
Skew: &skew,
SecretSize: 32,
})
assert.Equal(t, uint(2), totp.skew)
config, err := totp.Generate("john")
assert.NoError(t, err)
assert.Equal(t, "Authelia", config.Issuer)
assert.Less(t, time.Since(config.CreatedAt), time.Second)
assert.Greater(t, time.Since(config.CreatedAt), time.Second*-1)
assert.Equal(t, uint(8), config.Digits)
assert.Equal(t, uint(60), config.Period)
assert.Equal(t, "SHA256", config.Algorithm)
secret := make([]byte, base32.StdEncoding.WithPadding(base32.NoPadding).DecodedLen(len(config.Secret)))
_, err = base32.StdEncoding.WithPadding(base32.NoPadding).Decode(secret, config.Secret)
assert.NoError(t, err)
assert.Len(t, secret, 32)
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2021/6/22 8:48 上午
# @File : lt_二分_搜索插入位置.go
# @Description :
# @Attention :
*/
package v2
func searchInsert(nums []int, target int) int {
start := 0
end := len(nums) - 1
for start+1 < end {
mid := start + (end-start)>>1
if nums[mid] > target {
end = mid
} else if nums[mid] < target {
start = mid
} else {
end = mid
}
}
if nums[start] >= target {
return start
}
if nums[end] >= target {
return end
} else if nums[end] < target {
return end + 1
}
return 0
}
|
package azure
// Metadata contains Azure metadata (e.g. for uninstalling the cluster).
type Metadata struct {
ARMEndpoint string `json:"armEndpoint"`
CloudName CloudEnvironment `json:"cloudName"`
Region string `json:"region"`
ResourceGroupName string `json:"resourceGroupName"`
BaseDomainResourceGroupName string `json:"baseDomainResourceGroupName"`
}
|
package graphql_test
import (
"testing"
"github.com/graphql-go/graphql"
"github.com/graphql-go/graphql/gqlerrors"
"github.com/graphql-go/graphql/language/ast"
"github.com/graphql-go/graphql/language/location"
"github.com/graphql-go/graphql/language/parser"
"github.com/graphql-go/graphql/language/source"
"github.com/graphql-go/graphql/testutil"
)
func expectValid(t *testing.T, schema *graphql.Schema, queryString string) {
source := source.NewSource(&source.Source{
Body: []byte(queryString),
Name: "GraphQL request",
})
AST, err := parser.Parse(parser.ParseParams{Source: source})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
validationResult := graphql.ValidateDocument(schema, AST, nil)
if !validationResult.IsValid || len(validationResult.Errors) > 0 {
t.Fatalf("Unexpected error: %v", validationResult.Errors)
}
}
func TestValidator_SupportsFullValidation_ValidatesQueries(t *testing.T) {
expectValid(t, testutil.TestSchema, `
query {
catOrDog {
... on Cat {
furColor
}
... on Dog {
isHousetrained
}
}
}
`)
}
// NOTE: experimental
func TestValidator_SupportsFullValidation_ValidatesUsingACustomTypeInfo(t *testing.T) {
// This TypeInfo will never return a valid field.
typeInfo := graphql.NewTypeInfo(&graphql.TypeInfoConfig{
Schema: testutil.TestSchema,
FieldDefFn: func(schema *graphql.Schema, parentType graphql.Type, fieldAST *ast.Field) *graphql.FieldDefinition {
return nil
},
})
ast := testutil.TestParse(t, `
query {
catOrDog {
... on Cat {
furColor
}
... on Dog {
isHousetrained
}
}
}
`)
errors := graphql.VisitUsingRules(testutil.TestSchema, typeInfo, ast, graphql.SpecifiedRules)
expectedErrors := []gqlerrors.FormattedError{
{
Message: `Cannot query field "catOrDog" on type "QueryRoot". Did you mean "catOrDog"?`,
Locations: []location.SourceLocation{
{Line: 3, Column: 9},
},
},
{
Message: `Cannot query field "furColor" on type "Cat". Did you mean "furColor"?`,
Locations: []location.SourceLocation{
{Line: 5, Column: 13},
},
},
{
Message: `Cannot query field "isHousetrained" on type "Dog". Did you mean "isHousetrained"?`,
Locations: []location.SourceLocation{
{Line: 8, Column: 13},
},
},
}
if !testutil.EqualFormattedErrors(expectedErrors, errors) {
t.Fatalf("Unexpected result, Diff: %v", testutil.Diff(expectedErrors, errors))
}
}
|
package main
import "fmt"
type rect2 struct{
width, height int
}
func (r *rect2) area() int {
return r.width * r.height
}
func (r rect2) perim() int {
return 2*r.width + 2*r.height
}
func main() {
r := rect2{width:10, height:5}
fmt.Println("area ",r.area())
fmt.Println("perim ",r.perim())
rp := &r
fmt.Println("area ",rp.area())
fmt.Println("perim ",rp.perim())
} |
package noopencryptor
func New() NoopEncryptor {
return NoopEncryptor{}
}
type NoopEncryptor struct{}
func (d NoopEncryptor) Encrypt(plaintext []byte) (string, error) {
return string(plaintext), nil
}
func (d NoopEncryptor) Decrypt(ciphertext string) ([]byte, error) {
return []byte(ciphertext), nil
}
|
package main
import "net/http"
import "sync"
func main() {
http.HandleFunc("/hello", func(w http.ResponseWriter, r *http.Request) {})
var servers sync.WaitGroup
servers.Add(1)
go func() {
defer servers.Done()
http.ListenAndServe(":1024", nil)
}()
servers.Add(1)
go func() {
defer servers.Done()
http.ListenAndServeTLS(":1025", "cert.pem", "key.pem", nil)
}()
servers.Wait()
} |
package comment
import (
"github.com/labstack/echo"
// HOFSTADTER_START import
// HOFSTADTER_END import
)
// HOFSTADTER_START const
// HOFSTADTER_END const
// HOFSTADTER_START var
// HOFSTADTER_END var
// HOFSTADTER_START init
// HOFSTADTER_END init
func InitRouter(G *echo.Group) (err error) {
// HOFSTADTER_START router-pre
// HOFSTADTER_END router-pre
commentGroup := G.Group("/comments")
// HOFSTADTER_START router-start
// HOFSTADTER_END router-start
// names: server | comment
// routes NOT SAME NAME
// methods
commentsGroup.GET("", Handle_LIST_Comment)
commentsGroup.POST("", Handle_POST_Comment)
commentsGroup.GET("/:comment-uuid", Handle_GET_Comment)
commentsGroup.DELETE("/:comment-uuid", Handle_DELETE_Comment)
commentsGroup.PUT("/:comment-uuid", Handle_PUT_Comment)
// HOFSTADTER_START router-end
// HOFSTADTER_END router-end
return nil
}
// HOFSTADTER_BELOW
|
package main
import (
"encoding/json"
"log"
"net/http"
"github.com/gorilla/mux"
)
type User struct {
UserID string
Score int
}
func getUserAPI(w http.ResponseWriter, r *http.Request) {
user, err := getUser(mux.Vars(r)["user"])
if err != nil {
user = &User{
UserID: mux.Vars(r)["user"],
Score: 0,
}
db.Exec("insert into t_user (user_id, score) values (?, ?)", user.UserID, user.Score)
}
err = json.NewEncoder(w).Encode(user)
if err != nil {
log.Println("Failed to encode: ", err)
}
}
func getUser(userID string) (*User, error) {
userRow := db.QueryRow("select user_id, score from t_user where user_id = ?", userID)
var result User
err := userRow.Scan(&result.UserID, &result.Score)
if err != nil {
return nil, err
}
return &result, nil
}
|
// Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package crypto
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestSignVerify(t *testing.T) {
require := require.New(t)
pub, pri, err := EC283.NewKeyPair()
require.NoError(err)
message := []byte("hello iotex message")
sig := EC283.Sign(pri, message)
require.True(EC283.Verify(pub, message, sig))
wrongMessage := []byte("wrong message")
require.False(EC283.Verify(pub, wrongMessage, sig))
}
func TestPubKeyGeneration(t *testing.T) {
require := require.New(t)
expectedPuk, pri, err := EC283.NewKeyPair()
require.NoError(err)
actualPuk, err := EC283.NewPubKey(pri)
require.NoError(err)
require.Equal(expectedPuk, actualPuk)
message := []byte("hello iotex message")
sig := EC283.Sign(pri, message)
require.True(EC283.Verify(actualPuk, message, sig))
wrongMessage := []byte("wrong message")
require.False(EC283.Verify(actualPuk, wrongMessage, sig))
}
|
//go:generate mockgen -source interface.go -destination product_mock.go -package product
package product
import "github.com/markus-azer/products-service/pkg/entity"
//MessagesReader Reader interface
type messagesReader interface {
}
//MessagesWriter product writer
type messagesWriter interface {
SendMessage(m *entity.Message)
SendMessages(messages []*entity.Message)
}
//MessagesRepository repository interface
type MessagesRepository interface {
messagesReader
messagesWriter
}
//StoreReader product reader interface
type storeReader interface {
FindOneByID(id entity.ID) (*entity.Product, error)
}
//StoreWriter product writer interface
type storeWriter interface {
StoreCommand(c *entity.Command) (*entity.ID, error)
Create(p *entity.Product) (*entity.ID, error)
UpdateOne(id entity.ID, p *entity.Product, v entity.Version) (int, error)
UpdateOneP(id entity.ID, p *entity.UpdateProduct, v entity.Version) (int, error)
DeleteOne(id entity.ID, v entity.Version) (int, error)
}
//StoreRepository product store repository interface
type StoreRepository interface {
storeReader
storeWriter
}
//Reader interface
type reader interface {
}
//Writer interface
type writer interface {
Create(createProductDTO CreateProductDTO) (*entity.ID, *entity.Version, error)
UpdateOne(id entity.ID, v int32, updateProductDTO UpdateProductDTO) (*int32, *entity.Error)
Delete(id entity.ID, version int32) *entity.Error
}
//UseCase use case interface
type UseCase interface {
reader
writer
}
|
package repository
import (
"fmt"
"github.com/jmoiron/sqlx"
"github.com/rs/zerolog/log"
"sitemap/models/entity"
"time"
)
func NewSQLCompanyRepo(Conn *sqlx.DB) *DbCompanyRepo {
return &DbCompanyRepo{
Conn: Conn,
}
}
type DbCompanyRepo struct {
Conn *sqlx.DB
}
func (l *DbCompanyRepo) Count()(int, error){
t := time.Now()
query := "SELECT COUNT(1) FROM COMPANIES where (companies.description is not null) or (companies.site is not null) "
var count int
err := l.Conn.Get(&count, query)
log.Debug().Msg(fmt.Sprintf("query: %s , %d ms",query, time.Now().Sub(t).Milliseconds()))
if err != nil {
return 0, err
}
return count, nil
}
func (l *DbCompanyRepo)ObjectsForSitemap(page int, limit int)(*[]entity.ResQuery,error){
t := time.Now()
offset:= (page - 1) *limit
query := fmt.Sprintf("select id, updated_at from companies where (companies.description is not null) or (companies.site is not null) order by updated_at limit %d OFFSET %d", limit, offset )
result := &[]entity.ResQuery{}
err := l.Conn.Select(result, query)
log.Debug().Msg(fmt.Sprintf("query: %s , %d ms",query, time.Now().Sub(t).Milliseconds()))
if err != nil {
return nil, err
}
return result, nil
}
|
package mysql
import "fmt"
type Config struct {
Host string
Port int
User string
Pass string
DB string
}
func (cfg Config) DSN() string {
return fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?parseTime=true&interpolateParams=true&allowNativePasswords=true",
cfg.User, cfg.Pass, cfg.Host, cfg.Port, cfg.DB)
}
|
package main
import (
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/postgres"
)
func main() {
db, err := gorm.Open("postgres", "user=postgres password=tp2u6jQbdM dbname=deneme sslmode=disable")
if err != nil {
panic(err.Error())
}
defer db.Close()
dbase := db.DB()
defer dbase.Close()
err = dbase.Ping()
if err != nil {
panic(err.Error())
}
println("Database Bağlantısı Başarılı")
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.