text stringlengths 11 4.05M |
|---|
package infrastructure
import (
"cleanarchitecture/adapter/controller"
"cleanarchitecture/adapter/interfaces"
"github.com/jinzhu/gorm"
"github.com/labstack/echo"
"github.com/spf13/viper"
)
type CustomContext struct {
echo.Context
}
func Router(dbConn *gorm.DB) {
e := echo.New()
logger := &Logger{}
e.Use( func(h echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
ctx := &CustomContext{c}
return h(ctx)
}
})
type ControllerFunc func(c interfaces.IContext) error
POST := func(path string, f ControllerFunc) *echo.Route {
return e.POST(path, func(c echo.Context) error {
return f(c.(*CustomContext))
})
}
userController := controller.NewUserController(dbConn, logger)
POST("/users", userController.Create)
err := e.Start(viper.GetString(`server.address`))
if err != nil {
panic(err)
}
}
|
package gotest
import (
"testing"
)
//单元测试,debug 的时候注意打好断点,F11
func Test_Division(t *testing.T) {
if i, e := Division(6, 2); i != 3 || e != nil {
t.Error("Division除法测试不通过!")
} else {
// 记录一些日志
t.Log("Dvicesion 测试通过!")
t.Log("i:", i)
}
}
//性能测试
func Benchmark_Division(b *testing.B) {
for i := 0; i < b.N; i++ {
Division(4, 5)
}
}
func Benchmark_TimeConsuming(b *testing.B) {
b.StopTimer() //调用该函数停止压力测试的时间计数
//...做一些初始化的工作,例如读取文件数据,数据库连接之类的,这样这些时间不影响我们测试函数本身的性能
b.StartTimer() //重新开始时间计数
// 或者 重置定时器
// b.ResetTimer()
for i := 0; i < b.N; i++ {
Division(4, 5)
}
}
|
package handlers
import (
"fmt"
"net/http"
"net/http/httputil"
)
type Headers struct {
}
func (p *Headers) ServeHTTP(w http.ResponseWriter, r *http.Request) {
dump, err := httputil.DumpRequest(r, true)
if err != nil {
w.Write([]byte(err.Error()))
}
fmt.Fprintf(w, "%q", dump)
// w.Write(dump)
}
|
package main
import "fmt"
func main() {
a:="store in a"
//b:="store in b" 没引用会一直报错
fmt.Println(a)
}
|
package main
import (
"fmt"
"log"
"net"
"os"
"path/filepath"
"runtime/debug"
"syscall"
"time"
"golang.org/x/sys/unix"
)
var (
ReconfigureDomainSocket = "listener.sock"
TransferListenDomainSocket = "listener.sock"
TransferConnDomainSocket = "conn.sock"
)
func init() {
absPath, _ := filepath.Abs(os.Args[0])
execPath := filepath.Dir(absPath)
ReconfigureDomainSocket = filepath.Join(execPath, ReconfigureDomainSocket)
TransferListenDomainSocket = filepath.Join(execPath, TransferListenDomainSocket)
TransferConnDomainSocket = filepath.Join(execPath, TransferConnDomainSocket)
}
func ReconfigureHandler() {
defer func() {
if r := recover(); r != nil {
log.Printf("[ERROR] [transfer] [ReconfigureHandler] panic %v\n%s\n", r, string(debug.Stack()))
// todo restart goroutine
}
}()
syscall.Unlink(ReconfigureDomainSocket)
l, err := net.Listen("unix", ReconfigureDomainSocket)
if err != nil {
log.Printf("[ERROR] [transfer] [ReconfigureHandler] net listen error: %v\n", err)
return
}
defer l.Close()
log.Printf("[INFO] [transfer] [ReconfigureHandler] start\n")
ul := l.(*net.UnixListener)
for {
uc, err := ul.AcceptUnix()
if err != nil {
log.Printf("[ERROR] [transfer] [ReconfigureHandler] Accept Unix Connnection error :%v\n", err)
return
}
log.Printf("[INFO] [transfer] [ReconfigureHandler] Accept new process coming\n")
_, err = uc.Write([]byte{0})
if err != nil {
log.Printf("[ERROR] [transfer] [ReconfigureHandler] ack to new proces error: %v\n", err)
continue
}
uc.Close()
reconfigure()
}
}
func reconfigure() {
// todo set process stat,
// transfer listen fd
var listenSockConn net.Conn
var err error
var n int
var buf [1]byte
if listenSockConn, err = sendInheritListeners(); err != nil {
return
}
// Wait new Process ack
listenSockConn.SetReadDeadline(time.Now().Add(10 * time.Minute))
n, err = listenSockConn.Read(buf[:])
if n != 1 {
log.Printf("[ERROR] [transfer] [reconfigure] new process start failed\n")
return
}
// Wait for new mosn start
time.Sleep(3 * time.Second)
// Stop accepting requests
StopAccept()
// Wait for all connections to be finished
WaitConnectionsDone(30 * time.Second)
log.Printf("[INFO] [transfer] [reconfigure] new process started, old process exit!\n")
os.Exit(0)
}
func sendInheritListeners() (net.Conn, error) {
lf, err := connhandler.listListenerFiles()
if err != nil {
return nil, fmt.Errorf("ListListenersFile() error: %v\n", err)
}
var files []*os.File
files = append(files, lf...)
fds := make([]int, len(files))
for i, f := range files {
fds[i] = int(f.Fd())
defer f.Close()
}
var unixConn net.Conn
// retry 10 time
for i := 0; i < 10; i++ {
unixConn, err = net.DialTimeout("unix", TransferListenDomainSocket, 1*time.Second)
if err == nil {
break
}
time.Sleep(1 * time.Second)
}
if err != nil {
log.Printf("[ERROR] [transfer] [sendInheritListeners] Dial unix failed %v\n", err)
return nil, err
}
uc := unixConn.(*net.UnixConn)
buf := make([]byte, 1)
rights := syscall.UnixRights(fds...)
n, oobn, err := uc.WriteMsgUnix(buf, rights, nil)
if err != nil {
log.Printf("[ERROR] [transfer] [sendInheritListeners] WriteMsgUnix error: %v\n", err)
return nil, err
}
if n != len(buf) || oobn != len(rights) {
log.Printf("[ERROR] [transfer] [sendInheritListeners] WriteMsgUnix = %d, %d; want 1, %d\n", n, oobn, len(rights))
return nil, err
}
return uc, nil
}
func GetInheritListeners() ([]net.Listener, net.Conn, error) {
if !isReconfigure() { // 判断是否有老进程存在
return nil, nil, nil
}
syscall.Unlink(TransferListenDomainSocket)
l, err := net.Listen("unix", TransferListenDomainSocket)
if err != nil {
log.Printf("[ERROR] InheritListeners net listen error: %v", err)
return nil, nil, err
}
defer l.Close()
log.Printf("[INFO] Get InheritListeners start")
ul := l.(*net.UnixListener)
ul.SetDeadline(time.Now().Add(time.Second * 10))
uc, err := ul.AcceptUnix()
if err != nil {
log.Printf("[ERROR] InheritListeners Accept error :%v", err)
return nil, nil, err
}
log.Printf("[INFO] Get InheritListeners Accept")
buf := make([]byte, 1)
oob := make([]byte, 1024)
_, oobn, _, _, err := uc.ReadMsgUnix(buf, oob)
if err != nil {
return nil, nil, err
}
scms, err := unix.ParseSocketControlMessage(oob[0:oobn])
if err != nil {
log.Printf("[ERROR] ParseSocketControlMessage: %v", err)
return nil, nil, err
}
if len(scms) != 1 {
log.Printf("[ERROR] expected 1 SocketControlMessage; got scms = %#v", scms)
return nil, nil, err
}
gotFds, err := unix.ParseUnixRights(&scms[0])
if err != nil {
log.Printf("[ERROR] unix.ParseUnixRights: %v", err)
return nil, nil, err
}
var listeners []net.Listener
for i := 0; i < len(gotFds); i++ {
fd := uintptr(gotFds[i])
file := os.NewFile(fd, "")
if file == nil {
log.Printf("[ERROR] create new file from fd %d failed", fd)
return nil, nil, err
}
defer file.Close()
fileListener, err := net.FileListener(file)
if err != nil {
log.Printf("[ERROR] recover listener from fd %d failed: %s", fd, err)
return nil, nil, err
}
// for tcp or unix listener
listeners = append(listeners, fileListener)
}
return listeners, uc, nil
}
func isReconfigure() bool {
var unixConn net.Conn
var err error
unixConn, err = net.DialTimeout("unix", ReconfigureDomainSocket, 1*time.Second)
if err != nil {
log.Printf("[INFO] [transfer] [isReconfigure] not reconfigure: %v\n", err)
return false
}
defer unixConn.Close()
uc := unixConn.(*net.UnixConn)
buf := make([]byte, 1)
n, _ := uc.Read(buf)
if n != 1 {
return false
}
return true
}
|
// Copyright 2018 xgfone
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"net/http"
"github.com/xgfone/ship"
)
func setupRouter() *ship.Ship {
router := ship.New()
router.Route("/router").GET(func(c *ship.Context) error { return c.String(200, "default") })
vhost1 := router.VHost("host1.example.com")
vhost1.Route("/router").GET(func(c *ship.Context) error { return c.String(200, "vhost1") })
vhost2 := router.VHost("host2.example.com")
vhost2.Route("/router").GET(func(c *ship.Context) error { return c.String(200, "vhost2") })
return router
}
func main() {
router := setupRouter()
http.ListenAndServe(":8080", router)
}
|
package gohub_test
import (
"http"
"url"
. "launchpad.net/gocheck"
"fmt"
"os"
"testing"
"time"
)
func Test(t *testing.T) {
TestingT(t)
}
type HTTPSuite struct {}
var testServer = NewTestHTTPServer("http://localhost:4444", 5e9)
func (s *HTTPSuite) SetUpSuite(c *C) {
testServer.Start()
}
func (s *HTTPSuite) TearDownTest(c *C) {
testServer.FlushRequests()
}
type TestHTTPServer struct {
URL string
Timeout int64
started bool
request chan *http.Request
response chan *testResponse
pending chan bool
}
type testResponse struct {
Status int
Headers map[string]string
Body string
}
func NewTestHTTPServer(url string, timeout int64) *TestHTTPServer {
return &TestHTTPServer{URL: url, Timeout: timeout}
}
func (s *TestHTTPServer) Start() {
if s.started {
return
}
s.started = true
s.request = make(chan *http.Request, 64)
s.response = make(chan *testResponse, 64)
s.pending = make(chan bool, 64)
url,_ := url.Parse(s.URL)
go http.ListenAndServe(url.Host, s)
s.PrepareResponse(202, nil, "Nothing.")
fmt.Fprintf(os.Stderr, "\nWaiting for the fake server to be up...")
for {
resp, err := http.Get(s.URL)
if err == nil && resp.StatusCode == 202 {
break
}
time.Sleep(1e8)
}
fmt.Fprintf(os.Stderr, "Done\n")
s.WaitRequest()
}
func (s *TestHTTPServer) FlushRequests() {
for {
select {
case <-s.request:
default:
return
}
}
}
func (s *TestHTTPServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
s.request <- req
var resp *testResponse
select {
case resp = <-s.response:
case <-time.After(s.Timeout):
fmt.Fprintf(os.Stderr, "ERROR: Timeout waiting for the test to provide any response\n")
resp = &testResponse{500, nil, ""}
}
if resp.Headers != nil {
h := w.Header()
for k,v := range resp.Headers {
h.Set(k, v)
}
}
if resp.Status != 0 {
w.WriteHeader(resp.Status)
}
w.Write([]byte(resp.Body))
}
func (s *TestHTTPServer) WaitRequest() *http.Request {
select {
case req := <-s.request:
req.ParseForm()
return req
case <-time.After(s.Timeout):
panic("timeout waiting for request")
}
panic("unreached")
}
func (s *TestHTTPServer) PrepareResponse(status int, headers map[string]string, body string) {
s.response <- &testResponse{status, headers, body}
}
|
package cryptpass
import (
"bufio"
"crypto/aes"
"crypto/cipher"
"encoding/base64"
"errors"
"os"
)
const (
ikey = "WueIirKvsQpSc6x3ZSHd5g=="
iiv = "1PNr7RSgUy2ITtD/iEJGOg=="
)
var (
PassPath = "/etc/cryptpass.key"
ErrLengthNotMatch = errors.New("length not match")
)
var (
masterKey []byte
masterIV []byte
cachedPasswd map[string]string
)
func init() {
cachedPasswd = make(map[string]string)
}
func xorBytes(b1 []byte, b2 []byte) ([]byte, error) {
if len(b1) != len(b2) {
return nil, ErrLengthNotMatch
}
buf := make([]byte, len(b1))
for i := 0; i < len(b1); i++ {
buf[i] = b1[i] ^ b2[i]
}
return buf, nil
}
func getBytes(reader *bufio.Reader, internal string) ([]byte, error) {
ibyte, err := base64.StdEncoding.DecodeString(internal)
if err != nil {
return nil, err
}
line, err := reader.ReadString('\n')
if err != nil {
return nil, err
}
fbyte, err := base64.StdEncoding.DecodeString(line)
if err != nil {
return nil, err
}
tbyte, err := xorBytes(ibyte, fbyte)
if err != nil {
return nil, err
}
return tbyte, nil
}
func readKeyIV() error {
file, err := os.Open(PassPath)
if err != nil {
return err
}
defer file.Close()
reader := bufio.NewReader(file)
masterKey, err = getBytes(reader, ikey)
if err != nil {
return err
}
masterIV, err = getBytes(reader, iiv)
if err != nil {
return err
}
return nil
}
func EncryptPass(s string) (string, error) {
if masterKey == nil || masterIV == nil {
err := readKeyIV()
if err != nil {
return "", err
}
}
c, err := aes.NewCipher(masterKey)
if err != nil {
return "", err
}
stream := cipher.NewCFBEncrypter(c, masterIV)
buf := make([]byte, len(s))
stream.XORKeyStream(buf, []byte(s))
return base64.StdEncoding.EncodeToString(buf), nil
}
func DecryptPass(s string) (string, error) {
if masterKey == nil || masterIV == nil {
err := readKeyIV()
if err != nil {
return "", err
}
}
src, err := base64.StdEncoding.DecodeString(s)
if err != nil {
return "", err
}
c, err := aes.NewCipher(masterKey)
if err != nil {
return "", err
}
stream := cipher.NewCFBDecrypter(c, masterIV)
buf := make([]byte, len(src))
stream.XORKeyStream(buf, src)
return string(buf), nil
}
// AutoPass will try to decrypt s to real password.
// it use cache to speed up decrypt.
// CAUTION: if it can't, it will return original string.
// and original string will not set to cache.
func AutoPass(s string) string {
r, ok := cachedPasswd[s]
if ok {
return r
}
r, err := DecryptPass(s)
if err != nil {
return s
}
cachedPasswd[s] = r
return r
}
func SafePass(s string) string {
if len(s) < 4 {
return s
}
if s[:3] != ".[~" {
return s
}
k := s[3:]
r, ok := cachedPasswd[k]
if ok {
return r
}
r, err := DecryptPass(k)
if err != nil {
return s
}
cachedPasswd[k] = r
return r
}
|
package model
import (
"encoding/json"
"github.com/caos/logging"
es_models "github.com/caos/zitadel/internal/eventstore/models"
"github.com/caos/zitadel/internal/project/model"
)
type Application struct {
es_models.ObjectRoot
AppID string `json:"appId"`
State int32 `json:"-"`
Name string `json:"name,omitempty"`
Type int32 `json:"appType,omitempty"`
OIDCConfig *OIDCConfig `json:"-"`
}
type ApplicationID struct {
es_models.ObjectRoot
AppID string `json:"appId"`
}
func GetApplication(apps []*Application, id string) (int, *Application) {
for i, a := range apps {
if a.AppID == id {
return i, a
}
}
return -1, nil
}
func (a *Application) Changes(changed *Application) map[string]interface{} {
changes := make(map[string]interface{}, 1)
changes["appId"] = a.AppID
if changed.Name != "" && a.Name != changed.Name {
changes["name"] = changed.Name
}
return changes
}
func AppsToModel(apps []*Application) []*model.Application {
convertedApps := make([]*model.Application, len(apps))
for i, a := range apps {
convertedApps[i] = AppToModel(a)
}
return convertedApps
}
func AppsFromModel(apps []*model.Application) []*Application {
convertedApps := make([]*Application, len(apps))
for i, a := range apps {
convertedApps[i] = AppFromModel(a)
}
return convertedApps
}
func AppFromModel(app *model.Application) *Application {
converted := &Application{
ObjectRoot: app.ObjectRoot,
AppID: app.AppID,
Name: app.Name,
State: int32(app.State),
Type: int32(app.Type),
}
if app.OIDCConfig != nil {
converted.OIDCConfig = OIDCConfigFromModel(app.OIDCConfig)
}
return converted
}
func AppToModel(app *Application) *model.Application {
converted := &model.Application{
ObjectRoot: app.ObjectRoot,
AppID: app.AppID,
Name: app.Name,
State: model.AppState(app.State),
Type: model.AppType(app.Type),
}
if app.OIDCConfig != nil {
converted.OIDCConfig = OIDCConfigToModel(app.OIDCConfig)
}
return converted
}
func (p *Project) appendAddAppEvent(event *es_models.Event) error {
app := new(Application)
err := app.setData(event)
if err != nil {
return err
}
app.ObjectRoot.CreationDate = event.CreationDate
p.Applications = append(p.Applications, app)
return nil
}
func (p *Project) appendChangeAppEvent(event *es_models.Event) error {
app := new(Application)
err := app.setData(event)
if err != nil {
return err
}
if i, a := GetApplication(p.Applications, app.AppID); a != nil {
p.Applications[i].setData(event)
}
return nil
}
func (p *Project) appendRemoveAppEvent(event *es_models.Event) error {
app := new(Application)
err := app.setData(event)
if err != nil {
return err
}
if i, a := GetApplication(p.Applications, app.AppID); a != nil {
p.Applications[i] = p.Applications[len(p.Applications)-1]
p.Applications[len(p.Applications)-1] = nil
p.Applications = p.Applications[:len(p.Applications)-1]
}
return nil
}
func (p *Project) appendAppStateEvent(event *es_models.Event, state model.AppState) error {
app := new(Application)
err := app.setData(event)
if err != nil {
return err
}
if i, a := GetApplication(p.Applications, app.AppID); a != nil {
a.State = int32(state)
p.Applications[i] = a
}
return nil
}
func (a *Application) setData(event *es_models.Event) error {
a.ObjectRoot.AppendEvent(event)
if err := json.Unmarshal(event.Data, a); err != nil {
logging.Log("EVEN-8die3").WithError(err).Error("could not unmarshal event data")
return err
}
return nil
}
|
package eventchannel
import (
"bytes"
"compress/gzip"
"sync"
"time"
"github.com/benbjohnson/clock"
"github.com/golang/glog"
)
type Metrics struct {
bufferSize int64
eventCount int64
}
type Limit struct {
maxByteSize int64
maxEventCount int64
maxTime time.Duration
}
type EventChannel struct {
gz *gzip.Writer
buff *bytes.Buffer
ch chan []byte
endCh chan int
metrics Metrics
muxGzBuffer sync.RWMutex
send Sender
limit Limit
clock clock.Clock
}
func NewEventChannel(sender Sender, clock clock.Clock, maxByteSize, maxEventCount int64, maxTime time.Duration) *EventChannel {
b := &bytes.Buffer{}
gzw := gzip.NewWriter(b)
c := EventChannel{
gz: gzw,
buff: b,
ch: make(chan []byte),
endCh: make(chan int),
metrics: Metrics{},
send: sender,
limit: Limit{maxByteSize, maxEventCount, maxTime},
clock: clock,
}
go c.start()
return &c
}
func (c *EventChannel) Push(event []byte) {
c.ch <- event
}
func (c *EventChannel) Close() {
c.endCh <- 1
}
func (c *EventChannel) buffer(event []byte) {
c.muxGzBuffer.Lock()
defer c.muxGzBuffer.Unlock()
_, err := c.gz.Write(event)
if err != nil {
glog.Warning("[pubstack] fail to compress, skip the event")
return
}
c.metrics.eventCount++
c.metrics.bufferSize += int64(len(event))
}
func (c *EventChannel) isBufferFull() bool {
c.muxGzBuffer.RLock()
defer c.muxGzBuffer.RUnlock()
return c.metrics.eventCount >= c.limit.maxEventCount || c.metrics.bufferSize >= c.limit.maxByteSize
}
func (c *EventChannel) reset() {
// reset buffer
c.gz.Reset(c.buff)
c.buff.Reset()
// reset metrics
c.metrics.eventCount = 0
c.metrics.bufferSize = 0
}
func (c *EventChannel) flush() {
c.muxGzBuffer.Lock()
defer c.muxGzBuffer.Unlock()
if c.metrics.eventCount == 0 || c.metrics.bufferSize == 0 {
return
}
// reset buffers and writers
defer c.reset()
// finish writing gzip header
err := c.gz.Close()
if err != nil {
glog.Warning("[pubstack] fail to close gzipped buffer")
return
}
// copy the current buffer to send the payload in a new thread
payload := make([]byte, c.buff.Len())
_, err = c.buff.Read(payload)
if err != nil {
glog.Warning("[pubstack] fail to copy the buffer")
return
}
// send events (async)
go c.send(payload)
}
func (c *EventChannel) start() {
ticker := c.clock.Ticker(c.limit.maxTime)
for {
select {
case <-c.endCh:
c.flush()
return
// event is received
case event := <-c.ch:
c.buffer(event)
if c.isBufferFull() {
c.flush()
}
// time between 2 flushes has passed
case <-ticker.C:
c.flush()
}
}
}
|
package main
import (
"fmt"
"sort"
)
func main() {
var n, k int
fmt.Scan(&n, &k)
var major []int
var minor []int
for i := 0; i < n; i++ {
var luck, importance int
fmt.Scan(&luck, &importance)
if importance == 1 {
major = append(major, luck)
} else {
minor = append(minor, luck)
}
}
sort.Ints(major)
fmt.Println("major =", major)
fmt.Println("minor =", minor)
maxluck := 0
for i := len(major) - 1; i >= 0; i-- {
if i >= len(major)-k {
maxluck += major[i]
} else {
maxluck -= major[i]
}
}
for i := 0; i < len(minor); i++ {
maxluck += minor[i]
}
fmt.Println(maxluck)
}
|
package builder
import (
"strings"
"github.com/chenwj93/utils"
)
type Where struct {
where string
sqlRet string
paramWhere []interface{}
paramIn []interface{}
}
func (t *Where) GetWhere() *Where {
if strings.TrimSpace(t.where) != utils.EMPTY_STRING {
t.sqlRet += " where " + t.where[4:]
}
return t
}
func (t *Where) GetParamWhere() []interface{} {
return t.paramWhere
}
func (t *Where) ToString() string {
var s = t.sqlRet
t.sqlRet = utils.EMPTY_STRING
return s
}
// @param ifCheckNil 是否对v值判空, 无输入=false
func (t *Where) Eq(key string, v interface{}, ifCheckNil ...bool) *Where {
if privateCheckParam(key, v, ifCheckNil) {
t.where += " and " + key + " = ?"
t.paramWhere = append(t.paramWhere, v)
}
return t
}
func (t *Where) Like(key string, v interface{}, ifCheckNil ...bool) *Where {
if privateCheckParam(key, v, ifCheckNil) {
t.where += " and " + key + " like ?"
t.paramWhere = append(t.paramWhere, utils.Wrap(v, "%"))
}
return t
}
func (t *Where) LL(key string, v interface{}, ifCheckNil ...bool) *Where {
if privateCheckParam(key, v, ifCheckNil) {
t.where += " and " + key + " like ?"
t.paramWhere = append(t.paramWhere, "%" + utils.ParseString(v))
}
return t
}
func (t *Where) RL(key string, v interface{}, ifCheckNil ...bool) *Where {
if privateCheckParam(key, v, ifCheckNil) {
t.where += " and " + key + " like ?"
t.paramWhere = append(t.paramWhere, utils.ParseString(v) + "%")
}
return t
}
func (t *Where) Gt(key string, v interface{}, ifCheckNil ...bool) *Where {
if privateCheckParam(key, v, ifCheckNil) {
t.where += " and " + key + " > ?"
t.paramWhere = append(t.paramWhere, v)
}
return t
}
func (t *Where) GtAndEq(key string, v interface{}, ifCheckNil ...bool) *Where {
if privateCheckParam(key, v, ifCheckNil) {
t.where += " and " + key + " >= ?"
t.paramWhere = append(t.paramWhere, v)
}
return t
}
func (t *Where) Lt(key string, v interface{}, ifCheckNil ...bool) *Where {
if privateCheckParam(key, v, ifCheckNil) {
t.where += " and " + key + " < ?"
t.paramWhere = append(t.paramWhere, v)
}
return t
}
func (t *Where) LtAndEq(key string, v interface{}, ifCheckNil ...bool) *Where {
if privateCheckParam(key, v, ifCheckNil) {
t.where += " and " + key + " <= ?"
t.paramWhere = append(t.paramWhere, v)
}
return t
}
// and col in (?, ?, ? ...), args...
func (t *Where) In(col string, args ...interface{}) *Where {
if len(args) <= 0 {
t.where += " and 1 = 0 "
} else {
t.where += " and " + col + " in ("
for i := 0; i < len(args); i++ {
t.where += "?, "
}
t.where = t.where[:len(t.where)-2] + ")"
t.paramWhere = append(t.paramWhere, args...)
}
return t
}
func (t *Where) Custom(s string, v ...interface{}) *Where {
if s != "" {
t.where += " " + s
t.paramWhere = append(t.paramWhere, v...)
}
return t
}
func privateCheckParam(key string, v interface{}, ifCheckNil []bool) bool {
return key != "" && (len(ifCheckNil) == 0 ||!ifCheckNil[0] ||!utils.IsEmpty(v))
}
|
package goz
import (
"reflect"
"testing"
)
func TestNewRoute(t *testing.T) {
expectedID := "/hello"
expectedTypeOfChildren := "map[string]*goz.Route"
expectedTypeOfHandlers := "map[string]goz.GoAppHandlerFunc"
expectedTypeOfVariableMap := "map[string]map[string]string"
route := NewRoute(expectedID)
if expectedID != route.ID {
t.Errorf("Expected route.ID to be %s, but got %s instead.", expectedID, route.ID)
}
typeOfChildren := reflect.TypeOf(route.children).String()
if expectedTypeOfChildren != typeOfChildren {
t.Errorf("Expected route.children to be %s, but got %s instead.", expectedTypeOfChildren, typeOfChildren)
}
typeOfHandlers := reflect.TypeOf(route.handlers).String()
if expectedTypeOfHandlers != typeOfHandlers {
t.Errorf("Expected route.children to be %s, but got %s instead.", expectedTypeOfHandlers, typeOfHandlers)
}
typeOfVariableMap := reflect.TypeOf(route.variableMap).String()
if expectedTypeOfVariableMap != typeOfVariableMap {
t.Errorf("Expected route.children to be %s, but got %s instead.", expectedTypeOfVariableMap, typeOfVariableMap)
}
}
func TestRouteAddStaticRoute(t *testing.T) {
route := NewRoute("")
newRouteID := "test"
newRoute := route.AddRoute(newRouteID)
if newRouteID != newRoute.ID {
t.Errorf("Expected new route.ID to be %s, but got %s instead.", newRouteID, newRoute.ID)
}
if route.children[newRouteID] != newRoute {
t.Errorf("Expected route to have newRoute as child")
}
}
func TestRouteAddDuplicateStaticRoute(t *testing.T) {
route := NewRoute("")
newRouteID := "test"
route.AddRoute(newRouteID)
var expectedNewRoute *Route
newRoute := route.AddRoute(newRouteID)
if newRoute != expectedNewRoute {
t.Errorf("Adding duplicate route should return %s, but got %s instead.", expectedNewRoute, newRoute)
}
}
func TestRouteAddDynamicRoute(t *testing.T) {
route := NewRoute("")
newRouteID := ":test"
expectedRouteID := ":var"
newRoute := route.AddRoute(newRouteID)
if expectedRouteID != newRoute.ID {
t.Errorf("Expected new route.ID to be %s, but got %s instead.", expectedRouteID, newRoute.ID)
}
if route.children[expectedRouteID] != newRoute {
t.Errorf("Expected route to have newRoute as child")
}
}
func TestRouteAddDuplicateDynamicRoute(t *testing.T) {
route := NewRoute("")
newRouteID := ":test"
route.AddRoute(newRouteID)
var expectedNewRoute *Route
newRoute := route.AddRoute(newRouteID)
if newRoute != expectedNewRoute {
t.Errorf("Adding duplicate route should return %s, but got %s instead.", expectedNewRoute, newRoute)
}
}
func TestRouteGetValidRoute(t *testing.T) {
routeID := "/hello"
expectedID := "world"
route := NewRoute(routeID)
expectedRoute := route.AddRoute(expectedID)
if expectedRoute != route.GetRoute(expectedID) {
t.Errorf("GetRoute did not return the correct route")
}
}
func TestRouteGetInvalidRoute(t *testing.T) {
routeID := "/hello"
expectedID := "world"
route := NewRoute(routeID)
route.AddRoute(expectedID)
var expectedRoute *Route
if expectedRoute != route.GetRoute("shouldnotexist") {
t.Errorf("GetRoute should return empty *Route when route does not exist")
}
}
func TestRouteSethandler(t *testing.T) {
routeID := "/hello"
route := NewRoute(routeID)
handlerMethod := "GET"
var expectedHandler GoAppHandlerFunc
expectedHandler = func(response ResponseWriter, request *Request) error {
return nil
}
route.SetHandler(handlerMethod, expectedHandler)
handler := route.Handler(handlerMethod)
if reflect.ValueOf(expectedHandler) != reflect.ValueOf(handler) {
t.Errorf("Handler Getter did not return what was set SetHandler")
}
}
func TestRouteGetHandlerDoesNotExist(t *testing.T) {
routeID := "/hello"
route := NewRoute(routeID)
handlerMethod := "GET"
handler := route.Handler(handlerMethod)
if nil != handler {
t.Errorf("Route should return nil handler when handler does not exist.")
}
}
func TestRouteSetVariableMap(t *testing.T) {
routeID := "/hello"
route := NewRoute(routeID)
handlerMethod := "GET"
expectedVariableMap := make(map[string]string)
route.SetVariableMap(handlerMethod, expectedVariableMap)
variableMap := route.VariableMap(handlerMethod)
if reflect.ValueOf(expectedVariableMap) != reflect.ValueOf(variableMap) {
t.Errorf("VariableMap Getter did not return what was set by SetVariableMap")
}
}
func TestRouteGetVariableMapDoesNotExist(t *testing.T) {
routeID := "/hello"
route := NewRoute(routeID)
handlerMethod := "GET"
variableMap := route.VariableMap(handlerMethod)
if nil != variableMap {
t.Errorf("Route should return nil variableMap when variableMap does not exist.")
}
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package iw
import (
"context"
"sync"
"time"
"chromiumos/tast/dut"
"chromiumos/tast/errors"
"chromiumos/tast/testing"
)
// EventLogger captures events on a WiFi interface with "iw event".
type EventLogger struct {
lock sync.RWMutex
done chan struct{}
events []*Event
watcher *EventWatcher
}
// NewEventLogger creates and starts a new EventLogger.
// Note that the logger may not be ready right after this function returned due to race condition,
// and it probably won't be fixed. Choose other solution if possible.
func NewEventLogger(ctx context.Context, dut *dut.DUT, ops ...EventWatcherOption) (*EventLogger, error) {
e := &EventLogger{
done: make(chan struct{}),
}
ew, err := NewEventWatcher(ctx, dut, ops...)
if err != nil {
return nil, errors.Wrap(err, "failed to create an event watcher")
}
e.watcher = ew
go func() {
defer close(e.done)
for {
ev, err := e.watcher.Wait(ctx)
if err != nil {
if err != ErrWatcherClosed {
testing.ContextLog(ctx, "Unexpected error from EventWatcher: ", err)
}
return
}
func() {
e.lock.Lock()
defer e.lock.Unlock()
e.events = append(e.events, ev)
}()
}
}()
return e, nil
}
// Stop the EventLogger.
func (e *EventLogger) Stop() error {
e.watcher.Stop()
<-e.done // Wait for the bg routine to end.
return nil
}
// Events returns the captured events till now.
// Caller should not modify the returned slice.
func (e *EventLogger) Events() []*Event {
e.lock.RLock()
defer e.lock.RUnlock()
// The logger only appends so it's ok to just return the slice.
return e.events
}
// EventsByType returns events captured with given EventType.
func (e *EventLogger) EventsByType(ets ...EventType) []*Event {
e.lock.RLock()
defer e.lock.RUnlock()
var ret []*Event
for _, ev := range e.events {
for _, et := range ets {
if ev.Type == et {
ret = append(ret, ev)
break
}
}
}
return ret
}
// DisconnectTime finds the first disconnect event and returns the time.
func (e *EventLogger) DisconnectTime() (time.Time, error) {
disconnectEvs := e.EventsByType(EventTypeDisconnect)
if len(disconnectEvs) == 0 {
return time.Time{}, errors.New("disconnect event not found")
}
return disconnectEvs[0].Timestamp, nil
}
// ConnectedTime finds the first connected event and returns the time.
func (e *EventLogger) ConnectedTime() (time.Time, error) {
connectedEvs := e.EventsByType(EventTypeConnected)
if len(connectedEvs) == 0 {
return time.Time{}, errors.New("connected event not found")
}
return connectedEvs[0].Timestamp, nil
}
|
// This file demonstrate how to sort values in Go.
package tips
import (
"sort"
"testing"
)
type person struct {
name string
age int
}
type byAge []person
func (a byAge) Len() int { return len(a) }
func (a byAge) Less(i, j int) bool { return a[i].age < a[j].age }
func (a byAge) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func TestSortByAge(t *testing.T) {
people := []person{
{"Bob", 13},
{"Alice", 8},
{"Charlie", 20},
}
sort.Sort(byAge(people))
if len(people) != 3 {
t.Errorf("Unexpected length of people: %d", len(people))
return
}
// Confirm the content of people is sorted.
a := people[0]
b := people[1]
c := people[2]
if a.name != "Alice" || a.age != 8 || b.name != "Bob" || b.age != 13 || c.name != "Charlie" || c.age != 20 {
t.Errorf("people is not sorted correctly: %v", people)
}
}
|
package main
/*
type cal struct{}
func (cal) hello() {
fmt.Println("Hello word")
}
*/
func main() {
server := NewServer(":3000")
server.Handle("POST", "/login", CheckAut)
server.Handle("POST", "/valida", ValidaToken)
server.Handle("POST", "/valida", server.AddMiddleware(prueba, CheckAuth()))
server.Handle("GET", "/Empleados", server.AddMiddleware(GetEmpleados, CheckAuth()))
server.Handle("POST", "/Empleado", server.AddMiddleware(RegistroEmpleado, CheckAuth()))
server.Handle("POST", "/borrarEmpleado", server.AddMiddleware(BorrarEmpleado, CheckAuth()))
server.Handle("POST", "/getEmpleado", server.AddMiddleware(GetEmpleado, CheckAuth()))
server.Handle("POST", "/registroEntrada", server.AddMiddleware(RegistraEntrada, CheckAuth()))
server.Handle("POST", "/check", server.AddMiddleware(checo, CheckAuth()))
server.Listen()
}
|
package desync
import "fmt"
type Store interface {
GetChunk(id ChunkID) ([]byte, error)
fmt.Stringer
}
|
package shoppingCartController
import (
"github.com/gin-gonic/gin"
"hd-mall-ed/packages/client/models/shoppingCartModel"
"hd-mall-ed/packages/common/pkg/app"
"hd-mall-ed/packages/common/pkg/e"
"strconv"
)
// 参数 type
func GetList(c *gin.Context) {
api := app.ApiFunction{C: c}
// 必须要 type 参数
model := &shoppingCartModel.ShoppingCart{}
typeString := c.Query("type")
typeNumber, _ := strconv.Atoi(typeString)
model.Type = typeNumber
model.UserId = uint(api.GetUserId())
list, err := model.GetList()
if err != nil {
api.ResFail(e.Fail)
return
}
api.Response(list)
}
// 通过临时订单获取所有信息
func GetDetailByID(c *gin.Context) {
api := app.ApiFunction{C: c}
model := &shoppingCartModel.ShoppingCart{}
idString := c.DefaultQuery("id", "")
if idString == "" {
api.ResFail(e.Fail)
return
}
id, _ := strconv.Atoi(idString)
model.ID = uint(id)
err := model.GetDetailById()
if err != nil {
api.ResFail(e.Fail)
return
}
api.Response(model)
}
func GetDetailByTempOrderId(c *gin.Context) {
api := app.ApiFunction{C: c}
model := &shoppingCartModel.ShoppingCart{}
tempOrderIdString := c.DefaultQuery("temp_order_id", "")
if tempOrderIdString == "" {
api.ResFail(e.Fail)
return
}
id, _ := strconv.Atoi(tempOrderIdString)
model.TempOrderId = uint(id)
list, err := model.GetListByTempOrderId()
if err != nil {
api.ResFail(e.Fail)
return
}
api.Response(list)
}
|
package word_test
import (
"testing"
"github.com/edipermadi/arabic/pkg/word"
"github.com/edipermadi/unicode"
"github.com/stretchr/testify/require"
)
func TestWord_Parse(t *testing.T) {
src := word.New("كَتَبَ")
dst := src.Cleanup()
require.Equal(t, []rune{unicode.ArabicLetterKaf, unicode.ArabicLetterTeh, unicode.ArabicLetterBeh}, dst.Runes())
t.Logf("cleaned = %s", dst.String())
}
|
package wechat_brain
import (
"bytes"
"io/ioutil"
"log"
"net/http"
"os/exec"
"strconv"
"time"
"github.com/coreos/goproxy"
)
var (
_spider = newSpider()
Mode int
AutoMatic int
)
type spider struct {
proxy *goproxy.ProxyHttpServer
}
func Run(port string, mode, automatic int) {
Mode = mode
AutoMatic = automatic
_spider.Init()
_spider.Run(port)
}
func Close() {
memoryDb.Close()
}
func newSpider() *spider {
sp := &spider{}
sp.proxy = goproxy.NewProxyHttpServer()
sp.proxy.OnRequest().HandleConnect(goproxy.AlwaysMitm)
return sp
}
func (s *spider) Run(port string) {
log.Println("server will at port:" + port)
log.Fatal(http.ListenAndServe(":"+port, s.proxy))
}
func (s *spider) Init() {
requestHandleFunc := func(request *http.Request, ctx *goproxy.ProxyCtx) (req *http.Request, resp *http.Response) {
req = request
if ctx.Req.URL.Path == `/question/bat/findQuiz` || ctx.Req.URL.Path == `/question/fight/findQuiz` {
bs, _ := ioutil.ReadAll(req.Body)
req.Body = ioutil.NopCloser(bytes.NewReader(bs))
handleQuestionReq(bs)
} else if ctx.Req.URL.Path == `/question/bat/choose` || ctx.Req.URL.Path == `/question/fight/choose` {
bs, _ := ioutil.ReadAll(req.Body)
req.Body = ioutil.NopCloser(bytes.NewReader(bs))
handleChooseReq(bs)
} else if ctx.Req.URL.Host == `abc.com` {
resp = new(http.Response)
resp.StatusCode = 200
resp.Header = make(http.Header)
resp.Header.Add("Content-Disposition", "attachment; filename=ca.crt")
resp.Header.Add("Content-Type", "application/octet-stream")
resp.Body = ioutil.NopCloser(bytes.NewReader(goproxy.CA_CERT))
}
return
}
responseHandleFunc := func(resp *http.Response, ctx *goproxy.ProxyCtx) *http.Response {
if resp == nil {
return resp
}
if ctx.Req.URL.Path == "/question/bat/findQuiz" || ctx.Req.URL.Path == "/question/fight/findQuiz" {
bs, _ := ioutil.ReadAll(resp.Body)
bsNew, ansPos := handleQuestionResp(bs)
resp.Body = ioutil.NopCloser(bytes.NewReader(bsNew))
if AutoMatic == 1 {
go clickProcess(ansPos)
} // click answer
} else if ctx.Req.URL.Path == "/question/bat/choose" || ctx.Req.URL.Path == "/question/fight/choose" {
bs, _ := ioutil.ReadAll(resp.Body)
resp.Body = ioutil.NopCloser(bytes.NewReader(bs))
go handleChooseResponse(bs)
} else if ctx.Req.URL.Path == "/question/bat/fightResult" || ctx.Req.URL.Path == "/question/fight/fightResult" {
if AutoMatic == 1 {
go clickProcess(-1)
} // go to next match
}
return resp
}
s.proxy.OnResponse().DoFunc(responseHandleFunc)
s.proxy.OnRequest().DoFunc(requestHandleFunc)
}
func clickProcess(ansPos int) {
var screanCenterX = 550 // center of screen
var firstItemY = 1280 // center of first item (y)
var qualifyingItemY = 2000 // 排位列表最后一项 y 坐标
if ansPos >= 0 {
log.Printf("【点击】正在点击选项:%d", ansPos)
time.Sleep(time.Millisecond * 3800) //延迟
go clickAction(screanCenterX, firstItemY+200*(ansPos-1)) // process click
} else {
// go to next match
log.Printf("【点击】将点击继续挑战按钮...")
time.Sleep(time.Millisecond * 7500)
go clickAction(screanCenterX, firstItemY+400) // 继续挑战 按钮在第三个item处
log.Printf("【点击】将点击排位列表底部一项,进行比赛匹配...")
time.Sleep(time.Millisecond * 2000)
go clickAction(screanCenterX, qualifyingItemY)
}
}
func clickAction(posX int, posY int) {
var err error
touchX, touchY := strconv.Itoa(posX), strconv.Itoa(posY)
_, err = exec.Command("adb", "shell", "input", "swipe", touchX, touchY, touchX, touchY).Output()
if err != nil {
log.Fatal("error: check adb connection.")
}
}
func orPanic(err error) {
if err != nil {
panic(err)
}
}
|
// Copyright 2022 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package iouringfs
import (
"fmt"
"unsafe"
"gvisor.dev/gvisor/pkg/atomicbitops"
)
func atomicUint32AtOffset(buf []byte, offset int) *atomicbitops.Uint32 {
const sizeOfUint32 int = 4
if offset+sizeOfUint32 > len(buf) || offset < 0 {
panic(fmt.Sprintf("cast at offset %d for slice of len %d would result in overrun", offset, len(buf)))
}
if offset%sizeOfUint32 != 0 {
panic(fmt.Sprintf("cast at offset %d would produce unaligned pointer", offset))
}
return (*atomicbitops.Uint32)(unsafe.Pointer(&buf[offset]))
}
|
package c78
//给定一组不含重复元素的整数数组 nums,返回该数组所有可能的子集(幂集)。
//
// 说明:解集不能包含重复的子集。
//
// 示例:
//
// 输入: nums = [1,2,3]
//输出:
//[
// [3],
// [1],
// [2],
// [1,2,3],
// [1,3],
// [2,3],
// [1,2],
// []
//]
// Related Topics 位运算 数组 回溯算法
//leetcode submit region begin(Prohibit modification and deletion)
/*
思路1:遍历插入子集
给定一个空数组、[]、遍历所有元素、在遍历到第i个元素时、将i放入空数组中、同时将i放入前面所有的已经生成的子集中
// 将i放入空数组
// 遍历前面所有元素、追加生成新的数组然后追加到result中
*/
func subsets(nums []int) [][]int {
result := make([][]int, 0)
result = append(result, []int{})
/*
注意、这里的里层循环只能用range,如果使用下标方式、会导致nums无限增加、
但使用rage则不会、因为range实际上是对nums做了一个拷贝、它内部遍历的
是nums_copy(nums的副本),而副本不会随着result增加无限增加
*/
//for i := 0; i < len(nums); i++{
// for j := 0; j < len(result); j++ {
// result = append(result, append(result[j], nums[i]))
// }
//}
for _, num := range nums {
for _, res := range result {
result = append(result, append(res, []int{num}...))
result = append(result, append([]int{num},res...))
}
}
return result
}
func subsets2(nums []int) [][]int {
res := [][]int{[]int{}}
for _, e := range nums {
for _, ee := range res {
res = append(res, append([]int{e}, ee...))
}
}
return res
}
//leetcode submit region end(Prohibit modification and deletion)
|
// SPDX-License-Identifier: ISC
// Copyright (c) 2014-2020 Bitmark Inc.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package transactionrecord_test
import (
"crypto/rand"
"os"
"testing"
"golang.org/x/crypto/ed25519"
"github.com/bitmark-inc/bitmarkd/account"
"github.com/bitmark-inc/bitmarkd/merkle"
"github.com/bitmark-inc/bitmarkd/transactionrecord"
"github.com/bitmark-inc/bitmarkd/util"
"github.com/bitmark-inc/logger"
)
// remove all files created by test
func removeFiles() {
os.RemoveAll("test.log")
}
// configure for testing
func setup(t *testing.T) {
removeFiles()
logger.Initialise(logger.Configuration{
Directory: ".",
File: "test.log",
Size: 50000,
Count: 10,
})
}
// post test cleanup
func teardown(t *testing.T) {
logger.Finalise()
removeFiles()
}
// to print a keypair for future tests
func TestGenerateKeypair(t *testing.T) {
generate := false
// generate = true // (uncomment to get a new key pair)
if generate {
// display key pair and fail the test
// use the displayed values to modify data below
publicKey, privateKey, err := ed25519.GenerateKey(rand.Reader)
if nil != err {
t.Errorf("key pair generation error: %s", err)
return
}
t.Errorf("*** GENERATED:\n%s", util.FormatBytes("publicKey", publicKey))
t.Errorf("*** GENERATED:\n%s", util.FormatBytes("privateKey", privateKey))
return
}
}
// to hold a keypair for testing
type keyPair struct {
publicKey []byte
privateKey []byte
}
// public/private keys from above generate
var proofedBy = keyPair{
publicKey: []byte{
0x55, 0xb2, 0x98, 0x88, 0x17, 0xf7, 0xea, 0xec,
0x37, 0x74, 0x1b, 0x82, 0x44, 0x71, 0x63, 0xca,
0xaa, 0x5a, 0x9d, 0xb2, 0xb6, 0xf0, 0xce, 0x72,
0x26, 0x26, 0x33, 0x8e, 0x5e, 0x3f, 0xd7, 0xf7,
},
privateKey: []byte{
0x95, 0xb5, 0xa8, 0x0b, 0x4c, 0xdb, 0xe6, 0x1c,
0x0f, 0x3f, 0x72, 0xcc, 0x15, 0x2d, 0x4a, 0x4f,
0x29, 0xbc, 0xfd, 0x39, 0xc9, 0xa6, 0x7e, 0x2c,
0x7b, 0xc6, 0xe0, 0xe1, 0x4e, 0xc7, 0xc7, 0xba,
0x55, 0xb2, 0x98, 0x88, 0x17, 0xf7, 0xea, 0xec,
0x37, 0x74, 0x1b, 0x82, 0x44, 0x71, 0x63, 0xca,
0xaa, 0x5a, 0x9d, 0xb2, 0xb6, 0xf0, 0xce, 0x72,
0x26, 0x26, 0x33, 0x8e, 0x5e, 0x3f, 0xd7, 0xf7,
},
}
var registrant = keyPair{
publicKey: []byte{
0x7a, 0x81, 0x92, 0x56, 0x5e, 0x6c, 0xa2, 0x35,
0x80, 0xe1, 0x81, 0x59, 0xef, 0x30, 0x73, 0xf6,
0xe2, 0xfb, 0x8e, 0x7e, 0x9d, 0x31, 0x49, 0x7e,
0x79, 0xd7, 0x73, 0x1b, 0xa3, 0x74, 0x11, 0x01,
},
privateKey: []byte{
0x66, 0xf5, 0x28, 0xd0, 0x2a, 0x64, 0x97, 0x3a,
0x2d, 0xa6, 0x5d, 0xb0, 0x53, 0xea, 0xd0, 0xfd,
0x94, 0xca, 0x93, 0xeb, 0x9f, 0x74, 0x02, 0x3e,
0xbe, 0xdb, 0x2e, 0x57, 0xb2, 0x79, 0xfd, 0xf3,
0x7a, 0x81, 0x92, 0x56, 0x5e, 0x6c, 0xa2, 0x35,
0x80, 0xe1, 0x81, 0x59, 0xef, 0x30, 0x73, 0xf6,
0xe2, 0xfb, 0x8e, 0x7e, 0x9d, 0x31, 0x49, 0x7e,
0x79, 0xd7, 0x73, 0x1b, 0xa3, 0x74, 0x11, 0x01,
},
}
var issuer = keyPair{
publicKey: []byte{
0x9f, 0xc4, 0x86, 0xa2, 0x53, 0x4f, 0x17, 0xe3,
0x67, 0x07, 0xfa, 0x4b, 0x95, 0x3e, 0x3b, 0x34,
0x00, 0xe2, 0x72, 0x9f, 0x65, 0x61, 0x16, 0xdd,
0x7b, 0x01, 0x8d, 0xf3, 0x46, 0x98, 0xbd, 0xc2,
},
privateKey: []byte{
0xf3, 0xf7, 0xa1, 0xfc, 0x33, 0x10, 0x71, 0xc2,
0xb1, 0xcb, 0xbe, 0x4f, 0x3a, 0xee, 0x23, 0x5a,
0xae, 0xcc, 0xd8, 0x5d, 0x2a, 0x80, 0x4c, 0x44,
0xb5, 0xc6, 0x03, 0xb4, 0xca, 0x4d, 0x9e, 0xc0,
0x9f, 0xc4, 0x86, 0xa2, 0x53, 0x4f, 0x17, 0xe3,
0x67, 0x07, 0xfa, 0x4b, 0x95, 0x3e, 0x3b, 0x34,
0x00, 0xe2, 0x72, 0x9f, 0x65, 0x61, 0x16, 0xdd,
0x7b, 0x01, 0x8d, 0xf3, 0x46, 0x98, 0xbd, 0xc2,
},
}
var ownerOne = keyPair{
publicKey: []byte{
0x27, 0x64, 0x0e, 0x4a, 0xab, 0x92, 0xd8, 0x7b,
0x4a, 0x6a, 0x2f, 0x30, 0xb8, 0x81, 0xf4, 0x49,
0x29, 0xf8, 0x66, 0x04, 0x3a, 0x84, 0x1c, 0x38,
0x14, 0xb1, 0x66, 0xb8, 0x89, 0x44, 0xb0, 0x92,
},
privateKey: []byte{
0xc7, 0xae, 0x9f, 0x22, 0x32, 0x0e, 0xda, 0x65,
0x02, 0x89, 0xf2, 0x64, 0x7b, 0xc3, 0xa4, 0x4f,
0xfa, 0xe0, 0x55, 0x79, 0xcb, 0x6a, 0x42, 0x20,
0x90, 0xb4, 0x59, 0xb3, 0x17, 0xed, 0xf4, 0xa1,
0x27, 0x64, 0x0e, 0x4a, 0xab, 0x92, 0xd8, 0x7b,
0x4a, 0x6a, 0x2f, 0x30, 0xb8, 0x81, 0xf4, 0x49,
0x29, 0xf8, 0x66, 0x04, 0x3a, 0x84, 0x1c, 0x38,
0x14, 0xb1, 0x66, 0xb8, 0x89, 0x44, 0xb0, 0x92,
},
}
var ownerTwo = keyPair{
publicKey: []byte{
0xa1, 0x36, 0x32, 0xd5, 0x42, 0x5a, 0xed, 0x3a,
0x6b, 0x62, 0xe2, 0xbb, 0x6d, 0xe4, 0xc9, 0x59,
0x48, 0x41, 0xc1, 0x5b, 0x70, 0x15, 0x69, 0xec,
0x99, 0x99, 0xdc, 0x20, 0x1c, 0x35, 0xf7, 0xb3,
},
privateKey: []byte{
0x8f, 0x83, 0x3e, 0x58, 0x30, 0xde, 0x63, 0x77,
0x89, 0x4a, 0x8d, 0xf2, 0xd4, 0x4b, 0x17, 0x88,
0x39, 0x1d, 0xcd, 0xb8, 0xfa, 0x57, 0x22, 0x73,
0xd6, 0x2e, 0x9f, 0xcb, 0x37, 0x20, 0x2a, 0xb9,
0xa1, 0x36, 0x32, 0xd5, 0x42, 0x5a, 0xed, 0x3a,
0x6b, 0x62, 0xe2, 0xbb, 0x6d, 0xe4, 0xc9, 0x59,
0x48, 0x41, 0xc1, 0x5b, 0x70, 0x15, 0x69, 0xec,
0x99, 0x99, 0xdc, 0x20, 0x1c, 0x35, 0xf7, 0xb3,
},
}
// only used for public key since the private key is currently unknown
var theZeroKey = keyPair{
publicKey: []byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
},
privateKey: []byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
},
}
// helper to make an address
func makeAccount(publicKey []byte) *account.Account {
return &account.Account{
AccountInterface: &account.ED25519Account{
Test: true,
PublicKey: publicKey,
},
}
}
// asset id is converted from little endian by fmt.Sscan
// but merkle digests are big endian so brovide a little endian routine
func merkleDigestFromLE(s string, link *merkle.Digest) error {
// convert little endian hex text into a digest
return link.UnmarshalText([]byte(s))
}
// try all length of incomplete record to ensure the case error
func checkPackedData(t *testing.T, title string, packed transactionrecord.Packed) {
loop:
for i := 0; i < len(packed); i += 1 {
// test the unpacker with bad records
// one less than whole record to avoid any success
//p := append(transactionrecord.Packed{}, packed[:i]...)
p := make(transactionrecord.Packed, i)
copy(p, packed[:i])
unpacked, n, err := p.Unpack(true)
if nil != err {
continue loop
}
t.Errorf("unpack[%d](%s): unexpected success: record[:%d]: %+v", i, title, n, unpacked)
}
}
|
package main
import (
"encoding/json"
"errors"
"io/ioutil"
"log"
"os"
"vrcdb/httpServer"
)
type JsonConfig struct {
HttpPort uint16 `json:"http_port"`
MongoDB struct {
Host string `json:"host"`
Username string `json:"username"`
Password string `json:"password"`
} `json:"mongodb"`
}
func ReadConfig() (*JsonConfig, error) {
configFile, err := os.Open("config.json")
if err != nil {
return nil, errors.New("Failed to open file: " + err.Error())
}
defer configFile.Close()
configBytes, err := ioutil.ReadAll(configFile)
if err != nil {
return nil, errors.New("Failed to read file: " + err.Error())
}
var config JsonConfig
err = json.Unmarshal(configBytes, &config)
if err != nil {
return nil, errors.New("Failed to parse file: " + err.Error())
}
if len(config.MongoDB.Host) == 0 {
return nil, errors.New("config does not contain mongodb.host")
}
if len(config.MongoDB.Username) == 0 {
return nil, errors.New("config does not contain mongodb.username")
}
if len(config.MongoDB.Password) == 0 {
return nil, errors.New("config does not contain mongodb.password")
}
return &config, nil
}
func main() {
config, err := ReadConfig()
if err != nil {
log.Fatal("Failed to open json: ", err)
}
httpServer.Init()
if !dbInit(config.MongoDB.Host, config.MongoDB.Username, config.MongoDB.Password) {
return
}
defer dbClose()
httpServer.Run(config.HttpPort)
}
|
package db
import (
"context"
"log"
"time"
"NokiaAssesmentGo/utils"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
var client *mongo.Client
// CreatePersonEndpoint will insert data into DB
func CreatePersonEndpoint(person utils.Person) *mongo.InsertOneResult {
collection := client.Database("NokiaAssesmentGo").Collection("people")
ctx, CancelFunc := context.WithTimeout(context.Background(), 5*time.Second)
defer CancelFunc()
result, err := collection.InsertOne(ctx, person)
if err != nil {
log.Println("Insert Person failed: ", err)
}
return result
}
// GetPersonFromDB from DB
func GetPersonFromDB() {
collection := client.Database("NokiaAssesmentGo").Collection("people")
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
cursor, err := collection.Find(ctx, bson.M{})
if err != nil {
return
}
defer cursor.Close(ctx)
for cursor.Next(ctx) {
var person utils.Person
cursor.Decode(&person)
utils.StoreInCache(person.ID.Hex(), &person)
}
}
//ConnectToDB connect to DB
func ConnectToDB() {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
clientOptions := options.Client().ApplyURI("mongodb://localhost:27017")
client, _ = mongo.Connect(ctx, clientOptions)
}
|
/*
Copyright 2021 The KodeRover Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
"github.com/koderover/zadig/lib/setting"
"github.com/koderover/zadig/lib/tool/xlog"
)
const (
SystemType = 1
ProjectType = 2
AllUsersRoleName = "all-users"
)
type UserInfo struct {
ID int `json:"id"`
Name string `json:"name"`
Email string `json:"email"`
Password string `json:"password"`
Phone string `json:"phone"`
IsAdmin bool `json:"isAdmin"`
IsSuperUser bool `json:"isSuperUser"`
IsTeamLeader bool `json:"isTeamLeader"`
OrganizationID int `json:"organization_id"`
Directory string `json:"directory"`
LastLoginAt int64 `json:"lastLogin"`
CreatedAt int64 `json:"created_at"`
UpdatedAt int64 `json:"updated_at"`
Teams []TeamInfo `json:"teams"`
}
type TeamInfo struct {
ID int `json:"id"`
OrgID int `json:"orgId"`
Name string `json:"name"`
Desc string `json:"desc"`
IsTeamLeader bool `json:"isTeamLeader"`
//Users []*User `json:"leaders"`
CreatedAt int64 `json:"created_at"`
UpdatedAt int64 `json:"updated_at"`
}
type Organization struct {
ID int `json:"id"`
Name string `json:"name"`
Token string `json:"token"`
Website string `json:"website"`
CreatedAt int64 `json:"created_at"`
UpdatedAt int64 `json:"updated_at"`
}
type UserViewReponseModel struct {
User *UserInfo `json:"info"`
Teams []*TeamInfo `json:"teams"`
Organization *Organization `json:"organization"`
}
type PoetryClient struct {
PoetryAPIServer string
ApiRootKey string
}
func (p *PoetryClient) SendRequest(url, method string, data interface{}, header http.Header) (string, error) {
if body, err := p.Do(url, method, GetRequestBody(data), header); err != nil {
return "", err
} else {
return string(body), nil
}
}
func GetRequestBody(body interface{}) io.Reader {
if body == nil {
return nil
}
dataStr, ok := body.(string)
if ok {
return bytes.NewReader([]byte(dataStr))
}
dataBytes, ok := body.([]byte)
if ok {
return bytes.NewReader(dataBytes)
}
rawData, err := json.Marshal(body)
if err != nil {
return nil
}
return bytes.NewReader(rawData)
}
func (p *PoetryClient) Do(url, method string, reader io.Reader, header http.Header) ([]byte, error) {
if !strings.HasPrefix(url, p.PoetryAPIServer) {
url = p.PoetryAPIServer + url
}
req, err := http.NewRequest(method, url, reader)
if err != nil {
return nil, err
}
req.Header = header
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer func() { _ = resp.Body.Close() }()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if resp.StatusCode/100 == 2 {
return body, nil
}
return nil, fmt.Errorf("response status error: %s %s %v %d", url, string(body), resp.Header, resp.StatusCode)
}
func (p *PoetryClient) Deserialize(data []byte, v interface{}) error {
err := json.Unmarshal(data, v)
if err != nil {
return err
}
return nil
}
func (p *PoetryClient) GetUserPermissionUUIDs(roleID int64, productName string, log *xlog.Logger) ([]string, error) {
permissionUUIDs := make([]string, 0)
rolePermissions, err := p.ListRolePermissions(roleID, ProjectType, productName, log)
if err != nil {
log.Errorf("GetUserPermission ListRolePermissions error: %v", err)
return []string{}, fmt.Errorf("GetUserPermission ListRolePermissions error: %v", err)
}
for _, rolePermission := range rolePermissions {
permissionUUIDs = append(permissionUUIDs, rolePermission.PermissionUUID)
}
return permissionUUIDs, nil
}
type RolePermission struct {
RoleID int64 `json:"roleId"`
PermissionUUID string `json:"permissionUUID"`
}
type RolePermissionModels struct {
RolePermissions []*RolePermission `json:"RolePermissions"`
}
func (p *PoetryClient) ListRolePermissions(roleID int64, roleType int, productName string, log *xlog.Logger) ([]*RolePermission, error) {
header := http.Header{}
header.Set(setting.Auth, fmt.Sprintf("%s%s", setting.AuthPrefix, p.ApiRootKey))
responseBody, err := p.SendRequest(fmt.Sprintf("%s/directory/rolePermission?roleType=%d&roleId=%d&productName=%s", p.PoetryAPIServer, roleType, roleID, productName), "GET", "", header)
if err != nil {
log.Errorf("ListRolePermissions SendRequest error: %v", err)
return nil, fmt.Errorf("ListRolePermissions SendRequest error: %v", err)
}
var RolePermissionModels RolePermissionModels
err = p.Deserialize([]byte(responseBody), &RolePermissionModels.RolePermissions)
if err != nil {
log.Errorf("ListRolePermissions Deserialize error: %v", err)
return nil, fmt.Errorf("ListRolePermissions Deserialize error: %v", err)
}
return RolePermissionModels.RolePermissions, nil
}
|
// Package matcher provides matching positioning messages to a set of filtering rules
// and pereating messages to a topic in case of they are matched.
package matcher
import (
"encoding/json"
"errors"
"github.com/lvl484/positioning-filter/position"
"github.com/lvl484/positioning-filter/repository"
)
const (
ErrBadFilterType = "Bad type of filter"
criticalLeftLatitude float32 = -180
criticalRightLatitude float32 = 180
)
type matcher func(position.Position, *repository.Filter) (bool, error)
type matcherFilters struct {
filters repository.Filters
}
// Match checks if given position is matched with at least one filter
func (m matcherFilters) Match(pos position.Position) (bool, error) {
filters, err := m.filters.AllByUser(pos.UserID)
if err != nil {
return false, err
}
for _, filter := range filters {
match, err := matcherByType(filter.Type)
if err != nil {
return false, err
}
matched, err := match(pos, filter)
if err != nil {
return false, err
}
if matched {
return true, nil
}
}
return false, nil
}
// NewMatcher returns struct that implement Matcher interface
func NewMatcher(filters repository.Filters) Matcher {
return matcherFilters{filters: filters}
}
func matcherByType(matcherType string) (matcher, error) {
switch matcherType {
case "round":
return matchRound, nil
case "rectangular":
return matchRectangular, nil
default:
return nil, errors.New(ErrBadFilterType)
}
}
func matchRectangular(pos position.Position, filter *repository.Filter) (bool, error) {
var rfilter repository.RectangularFilter
if err := json.Unmarshal(filter.Configuration, &rfilter); err != nil {
return false, err
}
if isConflict(rfilter.TopLeftLatitude, rfilter.BottomRightLatitude) {
delta := moveRectangularFilter(&rfilter)
movePosition(&pos, delta)
}
matched := rfilter.BottomRightLatitude > pos.Latitude &&
rfilter.TopLeftLatitude < pos.Latitude &&
rfilter.BottomRightLongitude < pos.Longitude &&
rfilter.TopLeftLongitude > pos.Longitude
return xor(matched, filter.Reversed), nil
}
// matchRound has issue with matching over twelve meridian.
// Return false when position and filter center are on different sides of it
func matchRound(pos position.Position, filter *repository.Filter) (bool, error) {
var rfilter repository.RoundFilter
if err := json.Unmarshal(filter.Configuration, &rfilter); err != nil {
return false, err
}
matched := (pos.Latitude-rfilter.CenterLatitude)*(pos.Latitude-rfilter.CenterLatitude)+
(pos.Longitude-rfilter.CentreLongitude)*(pos.Longitude-rfilter.CentreLongitude) <=
(rfilter.Radius * rfilter.Radius)
return xor(matched, filter.Reversed), nil
}
func xor(a, b bool) bool {
return (a && !b) || (!a && b)
}
// if leftLatitude > rightLatitude filter crosses twelve meridian and makes conflicts in matching
func isConflict(leftLatitude, rightLatitude float32) bool {
return leftLatitude > rightLatitude
}
// moveRectangularFilter move filter right latitude to criticalRightLatitude
// and left latitude to point
// got by subtraction delta from original left latitude
func moveRectangularFilter(r *repository.RectangularFilter) float32 {
delta := criticalRightLatitude + r.BottomRightLatitude
r.BottomRightLatitude -= delta
// move out from overlapping with degrees border from 180 to -180
r.BottomRightLatitude += 360
r.TopLeftLatitude -= delta
return delta
}
// movePosition moves position latitude to point got by subtraction delta from position latitude
func movePosition(p *position.Position, delta float32) {
p.Latitude -= delta
if p.Latitude <= criticalLeftLatitude {
// move out from overlapping with degrees border from 180 to -180
p.Latitude += 360
}
}
|
package array_memory
import (
"testing"
"math/big"
)
func TestSetGet(t *testing.T) {
mySummer := NewMemArray()
myArray := [][2]*big.Int{ { big.NewInt(1), big.NewInt(1)}, {big.NewInt(1) , big.NewInt(2)}, {big.NewInt(3), big.NewInt(4)} }
result := mySummer.SumArray(myArray)
res1 := result[0].Uint64()
if 5 != res1 {
t.Fatalf("Result should be 5 not %d", res1)
}
res2 := result[1].Uint64()
if 7 != res2 {
t.Fatalf("Result should be 7 not %d", res2)
}
}; |
package plex
import (
"net/http"
)
// BasicAuthTransport is an http.RoundTripper that authenticates all requests
// using HTTP Basic Authentication with the provided username and password.
type PlexAuthTransport struct {
XPlexToken string // X-Plex-Token
// Transport is the underlying HTTP transport to use when making requests.
// It will default to http.DefaultTransport if nil.
Transport http.RoundTripper
}
// RoundTrip implements the RoundTripper interface.
func (t *PlexAuthTransport) RoundTrip(req *http.Request) (*http.Response, error) {
req = cloneRequest(req) // per RoundTrip contract
req.Header.Set("X-Plex-Product", XPlexProduct)
req.Header.Set("X-Plex-Version", XPlexVersion)
req.Header.Set("X-Plex-Client-Identifier", XPlexClientIdentifier)
if len(t.XPlexToken) > 0 {
req.Header.Set("X-Plex-Token", t.XPlexToken)
}
return t.transport().RoundTrip(req)
}
// Client returns an *http.Client that makes requests that are authenticated
// using HTTP Basic Authentication.
func (t *PlexAuthTransport) Client() *http.Client {
return &http.Client{Transport: t}
}
func (t *PlexAuthTransport) transport() http.RoundTripper {
if t.Transport != nil {
return t.Transport
}
return http.DefaultTransport
}
// cloneRequest returns a clone of the provided *http.Request. The clone is a
// shallow copy of the struct and its Header map.
func cloneRequest(r *http.Request) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header, len(r.Header))
for k, s := range r.Header {
r2.Header[k] = append([]string(nil), s...)
}
return r2
}
|
package crawler
import "log"
import "os"
import "strings"
import "net/url"
import "net/http"
import "github.com/ernesto-jimenez/emit_urls/url_extractor"
func Crawl(initial_url string, channel chan FoundURL, logfile string) {
parsedUrl, _ := url.Parse(initial_url)
var output *os.File
switch logfile {
case "/dev/stdout":
output = os.Stdout
case "/dev/stderr":
output = os.Stderr
default:
output, _ = os.OpenFile(logfile, os.O_CREATE, 0666)
}
log.SetOutput(output)
host := parsedUrl.Host
crawl(initial_url, channel, host, make(map[string]bool), []string{})
}
func crawl(url string, channel chan FoundURL, host string,
visitedURLs map[string]bool, queue []string) {
resp, err := http.Get(url)
if err != nil {
log.Printf("%v", err)
}
defer resp.Body.Close()
visitedURLs[url] = true
contentType := resp.Header.Get("Content-Type")
crawled := FoundURL{StatusCode: resp.StatusCode, Url: url, contentType: contentType}
log.Printf("%v\n", crawled)
if resp.StatusCode == 200 {
channel <- crawled
if strings.Contains(crawled.contentType, "text/html") {
urls := url_extractor.ExtractURLs(url, resp.Body)
for i := 0; i < len(urls); i++ {
next_url := urls[i].String()
if (!visitedURLs[next_url] && urls[i].Host == host) {
visitedURLs[next_url] = true
queue = append(queue, next_url)
}
}
log.Printf("%v urls found\n", len(urls))
}
}
if len(queue) > 0 {
log.Printf("Queue: %v items\n", len(queue))
crawl(queue[0], channel, host, visitedURLs, queue[1:])
} else {
close(channel)
}
}
type FoundURL struct {
Url string
StatusCode int
contentType string
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package rangefeed
import (
"context"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/storage/enginepb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/errors"
)
// A runnable can be run as an async task.
type runnable interface {
// Run executes the runnable. Cannot be called multiple times.
Run(context.Context)
// Must be called if runnable is not Run.
Cancel()
}
// initResolvedTSScan scans over all keys using the provided iterator and
// informs the rangefeed Processor of any intents. This allows the Processor to
// backfill its unresolvedIntentQueue with any intents that were written before
// the Processor was started and hooked up to a stream of logical operations.
// The Processor can initialize its resolvedTimestamp once the scan completes
// because it knows it is now tracking all intents in its key range.
//
// MVCCIterator Contract:
// The provided MVCCIterator must observe all intents in the Processor's keyspan.
// An important implication of this is that if the iterator is a
// TimeBoundIterator, its MinTimestamp cannot be above the keyspan's largest
// known resolved timestamp, if one has ever been recorded. If one has never
// been recorded, the TimeBoundIterator cannot have any lower bound.
//
type initResolvedTSScan struct {
p *Processor
it storage.SimpleMVCCIterator
}
func newInitResolvedTSScan(p *Processor, it storage.SimpleMVCCIterator) runnable {
return &initResolvedTSScan{p: p, it: it}
}
func (s *initResolvedTSScan) Run(ctx context.Context) {
defer s.Cancel()
if err := s.iterateAndConsume(ctx); err != nil {
err = errors.Wrap(err, "initial resolved timestamp scan failed")
log.Errorf(ctx, "%v", err)
s.p.StopWithErr(roachpb.NewError(err))
} else {
// Inform the processor that its resolved timestamp can be initialized.
s.p.setResolvedTSInitialized()
}
}
func (s *initResolvedTSScan) iterateAndConsume(ctx context.Context) error {
startKey := storage.MakeMVCCMetadataKey(s.p.Span.Key.AsRawKey())
endKey := storage.MakeMVCCMetadataKey(s.p.Span.EndKey.AsRawKey())
// Iterate through all keys using NextKey. This will look at the first MVCC
// version for each key. We're only looking for MVCCMetadata versions, which
// will always be the first version of a key if it exists, so its fine that
// we skip over all other versions of keys.
var meta enginepb.MVCCMetadata
for s.it.SeekGE(startKey); ; s.it.NextKey() {
if ok, err := s.it.Valid(); err != nil {
return err
} else if !ok || !s.it.UnsafeKey().Less(endKey) {
break
}
// If the key is not a metadata key, ignore it.
unsafeKey := s.it.UnsafeKey()
if unsafeKey.IsValue() {
continue
}
// Found a metadata key. Unmarshal.
if err := protoutil.Unmarshal(s.it.UnsafeValue(), &meta); err != nil {
return errors.Wrapf(err, "unmarshaling mvcc meta: %v", unsafeKey)
}
// If this is an intent, inform the Processor.
if meta.Txn != nil {
var ops [1]enginepb.MVCCLogicalOp
ops[0].SetValue(&enginepb.MVCCWriteIntentOp{
TxnID: meta.Txn.ID,
TxnKey: meta.Txn.Key,
TxnMinTimestamp: meta.Txn.MinTimestamp,
Timestamp: meta.Txn.WriteTimestamp,
})
s.p.sendEvent(event{ops: ops[:]}, 0 /* timeout */)
}
}
return nil
}
func (s *initResolvedTSScan) Cancel() {
s.it.Close()
}
// TxnPusher is capable of pushing transactions to a new timestamp and
// cleaning up the intents of transactions that are found to be committed.
type TxnPusher interface {
// PushTxns attempts to push the specified transactions to a new
// timestamp. It returns the resulting transaction protos.
PushTxns(context.Context, []enginepb.TxnMeta, hlc.Timestamp) ([]*roachpb.Transaction, error)
// ResolveIntents resolves the specified intents.
ResolveIntents(ctx context.Context, intents []roachpb.LockUpdate) error
}
// txnPushAttempt pushes all old transactions that have unresolved intents on
// the range which are blocking the resolved timestamp from moving forward. It
// does so in two steps.
// 1. it pushes all old transactions to the current timestamp and gathers
// up the transactions' authoritative transaction records.
// 2. for each transaction that is pushed, it checks the transaction's current
// status and reacts accordingly:
// - PENDING: inform the Processor that the transaction's timestamp has
// increased so that the transaction's intents no longer need
// to block the resolved timestamp. Even though the intents
// may still be at an older timestamp, we know that they can't
// commit at that timestamp.
// - COMMITTED: launch async processes to resolve the transaction's intents
// so they will be resolved sometime soon and unblock the
// resolved timestamp.
// - ABORTED: inform the Processor to stop caring about the transaction.
// It will never commit and its intents can be safely ignored.
type txnPushAttempt struct {
p *Processor
txns []enginepb.TxnMeta
ts hlc.Timestamp
doneC chan struct{}
}
func newTxnPushAttempt(
p *Processor, txns []enginepb.TxnMeta, ts hlc.Timestamp, doneC chan struct{},
) runnable {
return &txnPushAttempt{
p: p,
txns: txns,
ts: ts,
doneC: doneC,
}
}
func (a *txnPushAttempt) Run(ctx context.Context) {
defer a.Cancel()
if err := a.pushOldTxns(ctx); err != nil {
log.Errorf(ctx, "pushing old intents failed: %v", err)
}
}
func (a *txnPushAttempt) pushOldTxns(ctx context.Context) error {
// Push all transactions using the TxnPusher to the current time.
// This may cause transaction restarts, but span refreshing should
// prevent a restart for any transaction that has not been written
// over at a larger timestamp.
pushedTxns, err := a.p.TxnPusher.PushTxns(ctx, a.txns, a.ts)
if err != nil {
return err
}
if len(pushedTxns) != len(a.txns) {
// We expect results for all txns. In particular, if no txns have been pushed, we'd
// crash later cause we'd be creating an invalid empty event.
return errors.AssertionFailedf("tried to push %d transactions, got response for %d",
len(a.txns), len(pushedTxns))
}
// Inform the Processor of the results of the push for each transaction.
ops := make([]enginepb.MVCCLogicalOp, len(pushedTxns))
var intentsToCleanup []roachpb.LockUpdate
for i, txn := range pushedTxns {
switch txn.Status {
case roachpb.PENDING, roachpb.STAGING:
// The transaction is still in progress but its timestamp was moved
// forward to the current time. Inform the Processor that it can
// forward the txn's timestamp in its unresolvedIntentQueue.
ops[i].SetValue(&enginepb.MVCCUpdateIntentOp{
TxnID: txn.ID,
Timestamp: txn.WriteTimestamp,
})
case roachpb.COMMITTED:
// The transaction is committed and its timestamp may have moved
// forward since we last saw an intent. Inform the Processor
// immediately in case this is the transaction that is holding back
// the resolved timestamp. However, we still need to wait for the
// transaction's intents to actually be resolved.
ops[i].SetValue(&enginepb.MVCCUpdateIntentOp{
TxnID: txn.ID,
Timestamp: txn.WriteTimestamp,
})
// Clean up the transaction's intents, which should eventually cause all
// unresolved intents for this transaction on the rangefeed's range to be
// resolved. We'll have to wait until the intents are resolved before the
// resolved timestamp can advance past the transaction's commit timestamp,
// so the best we can do is help speed up the resolution.
intentsToCleanup = append(intentsToCleanup, txn.LocksAsLockUpdates()...)
case roachpb.ABORTED:
// The transaction is aborted, so it doesn't need to be tracked
// anymore nor does it need to prevent the resolved timestamp from
// advancing. Inform the Processor that it can remove the txn from
// its unresolvedIntentQueue.
//
// NOTE: the unresolvedIntentQueue will ignore MVCCAbortTxn operations
// before it has been initialized. This is not a concern here though
// because we never launch txnPushAttempt tasks before the queue has
// been initialized.
ops[i].SetValue(&enginepb.MVCCAbortTxnOp{
TxnID: txn.ID,
})
// If the txn happens to have its LockSpans populated, then lets clean up
// the intents as an optimization helping others. If we aborted the txn,
// then it won't have this field populated. If, however, we ran into a
// transaction that its coordinator tried to rollback but didn't follow up
// with garbage collection, then LockSpans will be populated.
intentsToCleanup = append(intentsToCleanup, txn.LocksAsLockUpdates()...)
}
}
// Inform the processor of all logical ops.
a.p.sendEvent(event{ops: ops}, 0 /* timeout */)
// Resolve intents, if necessary.
return a.p.TxnPusher.ResolveIntents(ctx, intentsToCleanup)
}
func (a *txnPushAttempt) Cancel() {
close(a.doneC)
}
|
package group
import (
"context"
"sync"
"github.com/upfluence/pkg/multierror"
)
type waitGroup struct {
ctx context.Context
fn context.CancelFunc
mu sync.Mutex
errs []error
wg sync.WaitGroup
}
func WaitGroup(ctx context.Context) Group {
var cctx, fn = context.WithCancel(ctx)
return &waitGroup{ctx: cctx, fn: fn}
}
func (wg *waitGroup) Do(fn Runner) {
select {
case <-wg.ctx.Done():
return
default:
}
wg.wg.Add(1)
go func() {
defer wg.wg.Done()
if err := fn(wg.ctx); err != nil {
wg.mu.Lock()
wg.errs = append(wg.errs, err)
wg.mu.Unlock()
}
}()
}
func (wg *waitGroup) Wait() error {
wg.wg.Wait()
wg.fn()
return multierror.Wrap(wg.errs)
}
|
package logif
import (
"errors"
"strings"
)
// Logging level constants
const (
LevelDebug = iota
LevelInfo
LevelWarning
LevelError
)
// ErrNoSuchLogLevel is thrown when a parse
var ErrNoSuchLogLevel = errors.New("no such log level")
// LogLevelString returns the name representation of a given log level int
func LogLevelString(l int) (string, error) {
switch l {
case LevelDebug:
return "DEBUG", nil
case LevelInfo:
return "INFO", nil
case LevelWarning:
return "WARN", nil
case LevelError:
return "ERROR", nil
default:
return "", ErrNoSuchLogLevel
}
}
// ParseLogLevel parses a log level string into an int
func ParseLogLevel(l string) (int, error) {
switch strings.ToUpper(l) {
case "DEBUG":
return LevelDebug, nil
case "INFO":
return LevelInfo, nil
case "WARN":
return LevelWarning, nil
case "WARNING":
return LevelWarning, nil
case "ERR":
return LevelError, nil
case "ERROR":
return LevelError, nil
default:
return -1, ErrNoSuchLogLevel
}
}
|
package printers
import (
"fmt"
"html/template"
"io"
"log"
"sort"
"strconv"
"strings"
"time"
containers "github.com/ernoaapa/eliot/pkg/api/services/containers/v1"
node "github.com/ernoaapa/eliot/pkg/api/services/node/v1"
pods "github.com/ernoaapa/eliot/pkg/api/services/pods/v1"
"github.com/ernoaapa/eliot/pkg/config"
"github.com/ernoaapa/eliot/pkg/printers/humanreadable"
"github.com/ernoaapa/eliot/pkg/utils"
"github.com/pkg/errors"
"github.com/c2h5oh/datasize"
"github.com/hako/durafmt"
)
// HumanReadablePrinter is an implementation of ResourcePrinter which prints
// resources in human readable format (tables etc.).
type HumanReadablePrinter struct {
}
// NewHumanReadablePrinter creates new HumanReadablePrinter
func NewHumanReadablePrinter() *HumanReadablePrinter {
return &HumanReadablePrinter{}
}
// PrintPods writes list of Pods in human readable table format to the writer
func (p *HumanReadablePrinter) PrintPods(pods []*pods.Pod, writer io.Writer) error {
if len(pods) == 0 {
fmt.Fprintf(writer, "\n\t(No pods)\n\n")
return nil
}
fmt.Fprintln(writer, "\nNAMESPACE\tNAME\tCONTAINERS\tSTATUS")
for _, pod := range pods {
_, err := fmt.Fprintf(writer, "%s\t%s\t%d\t%s\n", pod.Metadata.Namespace, pod.Metadata.Name, len(pod.Spec.Containers), getStatus(pod))
if err != nil {
return errors.Wrapf(err, "Error while writing pod row")
}
}
return nil
}
// getStatus constructs a string representation of all containers statuses
func getStatus(pod *pods.Pod) string {
counts := map[string]int{}
statuses := []*containers.ContainerStatus{}
if pod.Status != nil {
statuses = pod.Status.ContainerStatuses
}
for _, status := range statuses {
if _, ok := counts[status.State]; !ok {
counts[status.State] = 0
}
counts[status.State]++
}
keys := getKeys(counts)
sort.Strings(keys)
result := []string{}
for _, key := range keys {
result = append(result, fmt.Sprintf("%s(%d)", key, counts[key]))
}
return strings.Join(result, ",")
}
func getKeys(source map[string]int) (result []string) {
for key := range source {
result = append(result, key)
}
return result
}
// PrintNodes writes list of Nodes in human readable table format to the writer
func (p *HumanReadablePrinter) PrintNodes(nodes []*node.Info, writer io.Writer) error {
if len(nodes) == 0 {
fmt.Fprintf(writer, "\n\t(No nodes)\n\n")
return nil
}
fmt.Fprintln(writer, "\nHOSTNAME\tENDPOINT\tVERSION")
for _, node := range nodes {
endpoint := fmt.Sprintf("%s:%d", utils.GetFirst(node.Addresses, ""), node.GrpcPort)
_, err := fmt.Fprintf(writer, "%s\t%s\t%s\n", node.Hostname, endpoint, node.Version)
if err != nil {
return errors.Wrapf(err, "Error while writing node row")
}
}
return nil
}
// PrintNode writes a node in human readable detailed format to the writer
func (p *HumanReadablePrinter) PrintNode(info *node.Info, writer io.Writer) error {
t := template.New("node-details").Funcs(template.FuncMap{
"FormatPercent": formatPercent,
"FormatUptime": formatUptime,
"Subtract": func(a, b uint64) uint64 {
return a - b
},
"FormatBytes": func(v uint64) string {
return datasize.ByteSize(v).HumanReadable()
},
})
t, err := t.Parse(humanreadable.NodeDetailsTemplate)
if err != nil {
log.Fatalf("Invalid pod template: %s", err)
}
return t.Execute(writer, info)
}
func formatPercent(total, free, available uint64) string {
percent := 0.0
bUsed := (total - free) / 1024
bAvail := available / 1024
utotal := bUsed + bAvail
used := bUsed
if utotal != 0 {
u100 := used * 100
pct := u100 / utotal
if u100%utotal != 0 {
pct++
}
percent = (float64(pct) / float64(100)) * 100.0
}
return strconv.FormatFloat(percent, 'f', -1, 64) + "%"
}
func formatUptime(uptime uint64) string {
var duration = time.Duration(uptime * 1000 * 1000 * 1000)
if duration < 0 {
// the duration went over maximum int64, fallback to just display the seconds
return fmt.Sprintf("%d seconds", uptime)
}
return durafmt.Parse(duration).String()
}
// PrintPod writes a pod in human readable detailed format to the writer
func (p *HumanReadablePrinter) PrintPod(pod *pods.Pod, writer io.Writer) error {
t := template.New("pod-details").Funcs(template.FuncMap{
"GetStatus": func(pod pods.Pod, name string) *containers.ContainerStatus {
if pod.Status == nil {
return nil
}
for _, status := range pod.Status.ContainerStatuses {
if status.Name == name {
return status
}
}
return nil
},
"StringsJoin": strings.Join,
})
t, err := t.Parse(humanreadable.PodDetailsTemplate)
if err != nil {
log.Fatalf("Invalid pod template: %s", err)
}
data := map[string]interface{}{
"Pod": pod,
"Status": getStatus(pod),
}
return t.Execute(writer, data)
}
// PrintConfig writes list of pods in human readable detailed format to the writer
func (p *HumanReadablePrinter) PrintConfig(config *config.Config, writer io.Writer) error {
t := template.New("config")
t, err := t.Parse(humanreadable.ConfigTemplate)
if err != nil {
log.Fatalf("Invalid config template: %s", err)
}
return t.Execute(writer, config)
}
|
package main
import (
"encoding/json"
"flag"
"fmt"
"log"
"net/http"
"os"
"runtime"
"github.com/golang/glog"
"github.com/spf13/pflag"
"github.com/urfave/negroni"
"kolihub.io/koli/pkg/git/conf"
gitserver "kolihub.io/koli/pkg/git/server"
gitutil "kolihub.io/koli/pkg/git/util"
"kolihub.io/koli/pkg/version"
)
func init() {
runtime.GOMAXPROCS(runtime.NumCPU())
}
// Version refers to the version of the binary
type Version struct {
git string
main string
buildDatr string
}
var cfg conf.Config
var showVersion bool
func init() {
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
pflag.StringVar(&cfg.Host, "apiserver", "", "api server addr, e.g. 'http://127.0.0.1:8080'. Omit parameter to run in on-cluster mode and utilize the service account token.")
pflag.StringVar(&cfg.TLSConfig.CertFile, "cert-file", "", "path to public TLS certificate file.")
pflag.StringVar(&cfg.TLSConfig.KeyFile, "key-file", "", "path to private TLS certificate file.")
pflag.StringVar(&cfg.TLSConfig.CAFile, "ca-file", "", "path to TLS CA file.")
// TODO: fallback to a default secret resource
pflag.StringVar(&cfg.PlatformClientSecret, "platform-secret", "", "platform jwt secret for validating tokens.")
pflag.StringVar(&cfg.GitHome, "git-home", "/home/git", "git server repositories path")
pflag.StringVar(&cfg.GitAPIHostname, "gitapi-host", "http://git-api.koli-system", "address of the git api store server")
pflag.BoolVar(&showVersion, "version", false, "print version information and quit.")
pflag.BoolVar(&cfg.TLSInsecure, "tls-insecure", false, "don't verify API server's CA certificate.")
pflag.Parse()
// Convinces goflags that we have called Parse() to avoid noisy logs.
// OSS Issue: kubernetes/kubernetes#17162.
flag.CommandLine.Parse([]string{})
}
func main() {
v := version.Get()
if showVersion {
b, err := json.Marshal(&v)
if err != nil {
fmt.Printf("failed decoding version: %s\n", err)
os.Exit(1)
}
fmt.Println(string(b))
return
}
glog.Infof("Version: %s, GitCommit: %s, GoVersion: %s, BuildDate: %s", v.GitVersion, v.GitCommit, v.GoVersion, v.BuildDate)
kubeClient, err := gitutil.GetKubernetesClient(cfg.Host)
if err != nil {
fmt.Printf("failed getting clientset: %s\n", err)
os.Exit(1)
}
gitHandler := gitserver.NewHandler(&cfg, kubeClient)
n := negroni.New(negroni.HandlerFunc(gitHandler.Authenticate))
n.UseHandlerFunc(gitHandler.ServeHTTP)
log.Fatal(http.ListenAndServe(":8000", n))
}
|
package ha
var HtmlPage = `
<h2>High Availability</h2>
<table class="responsive-table highlight">
<thead>
<tr>
<th>Namespace</th>
<th>Name</th>
<th>Type</th>
<th>Replicas</th>
<th>Rollout Strategy</th>
<th>Anti-Affinity</th>
<th>PVCs</th>
<th>Rank</th>
</tr>
</thead>
<tbody>
{{range .Items}}
<tr>
<td>{{.Namespace}}</td>
<td>{{.Name}}</td>
<td>{{.Type}}</td>
<td>{{.Replicas}}</td>
<td>{{.RolloutStrategy}}</td>
<td>{{.PodAntiAffinity}}</td>
<td>{{.PVC}}</td>
<td>{{.Rank}}</td>
</tr>
{{end}}
</tbody>
</table>
<br/>
<div style="white-space: pre-wrap;">%s</div>`
|
//go:generate mockgen -destination mock/enable_service.go . EnableServiceHandler
package handlers
import (
"context"
"github.com/raba-jp/primus/pkg/cli/ui"
"github.com/raba-jp/primus/pkg/exec"
"golang.org/x/xerrors"
)
type EnableServiceHandler interface {
EnableService(ctx context.Context, dryrun bool, name string) error
}
type EnableServiceHandlerFunc func(ctx context.Context, dryrun bool, name string) error
func (f EnableServiceHandlerFunc) EnableService(ctx context.Context, dryrun bool, name string) error {
return f(ctx, dryrun, name)
}
func NewEnableService(execIF exec.Interface) EnableServiceHandler {
return EnableServiceHandlerFunc(func(ctx context.Context, dryrun bool, name string) error {
if dryrun {
ui.Printf("systemctl enable %s\n", name)
return nil
}
check, err := execIF.CommandContext(ctx, "systemctl", "is-enabled", name).Output()
if err != nil {
return xerrors.Errorf("systemd service enable check failed: %w", err)
}
if string(check) == "enabled" {
return nil
}
if err := execIF.CommandContext(ctx, "systemctl", "enable", name).Run(); err != nil {
return xerrors.Errorf("systemd service enable failed: %w", err)
}
return nil
})
}
|
package main
import (
"bufio"
"fmt"
"io"
"os"
"sort"
"strconv"
)
func check(e error) {
if e != nil {
panic(e)
}
}
func main() {
f, err := os.Open("day10/input.txt")
check(err)
r:= bufio.NewReader(f)
nums,_ := ReadInts(r)
nums=append(nums, 0)
sort.Ints(nums)
lastItem := nums[len(nums)-1]
nums=append(nums,lastItem+3)
fmt.Println(part2(nums))
}
// Part 2: was a bit stuck here but used this guy's approach https://www.youtube.com/watch?v=cE88K2kFZn0&feature=youtu.be&ab_channel=JonathanPaulson
// dynamic programming is a very fancy phrase for a recursion optimisation but overall its cool
func part2(ints []int)int{
i:= 0
m := make(map[int]int)
return recursiveSolution(ints,i,&m)
}
func recursiveSolution(ints []int, i int, m *map[int]int) int {
if i == len(ints)-1{
return 1
}
if val,ok := (*m)[i];ok{
return val
}
ans := 0
for _,x := range makeRange(i+1,len(ints)-1){
if ints[x]-ints[i]<=3{
ans += recursiveSolution(ints,x,m)
}
}
(*m)[i]=ans
return ans
}
func makeRange(min, max int) []int {
a := make([]int, max-min+1)
for i := range a {
a[i] = min + i
}
return a
}
// Part 1
func part1(ints []int) int {
ones,twos,threes:= 0,0,0
for i,x := range ints{
if i+1 == len(ints){
break
}
nextX:= ints[i+1]
switch nextX - x {
case 1:
ones++
case 2:
twos++
case 3:
threes++
}
}
return ones*threes
}
func ReadInts(r io.Reader) ([]int, error) {
scanner := bufio.NewScanner(r)
scanner.Split(bufio.ScanWords)
var result []int
for scanner.Scan() {
x, err := strconv.Atoi(scanner.Text())
if err != nil {
return result, err
}
result = append(result, x)
}
return result, scanner.Err()
}
|
/*
1) Create a new type: vehicle. The underlying type is a struct.
The fields: doors, color. Create two new types: truck & sedan.
The underlying type of each of these new types is a struct.
Embed the “vehicle” type in both truck & sedan.
Give truck the field “fourWheel” which will be set to bool.
Give sedan the field “luxury” which will be set to bool.
2) Using the vehicle, truck, and sedan structs:
using a composite literal, create a value of type truck and assign values to the fields;
using a composite literal, create a value of type sedan and assign values to the fields.
Print out each of these values. Print out a single field from each of these values.
3) Give a method to both the “truck” and “sedan” types with the following signature
transportationDevice() string
Have each func return a string saying what they do.
Create a value of type truck and populate the fields.
Create a value of type sedan and populate the fields. Call the method for each value. */
package main
import "fmt"
type vehicle struct {
doors int
color string
}
type truck struct {
vehicle
fourWheel bool
}
type sedan struct {
vehicle
luxury bool
}
func (t truck) transportationDevice() string {
return fmt.Sprintln("this is a", t.vehicle.color, "truck")
}
func (s sedan) transportationDevice() string {
return fmt.Sprintln("this is a", s.vehicle.color, "sedan")
}
func main() {
myTruck := truck{vehicle{4, "blue"}, true}
mySedan := sedan{vehicle{2, "red"}, false}
fmt.Println(myTruck, mySedan)
fmt.Println(myTruck.color, mySedan.luxury)
fmt.Println(myTruck.transportationDevice(), mySedan.transportationDevice())
}
|
package adapter
import (
"fmt"
"regexp"
"strconv"
"github.com/ikmski/git-lfs3/usecase"
)
type transferController struct {
transferService usecase.TransferService
}
// TransferController is ...
type TransferController interface {
Download(ctx Context)
Upload(ctx Context)
}
// NewTransferController is ...
func NewTransferController(s usecase.TransferService) TransferController {
return &transferController{
transferService: s,
}
}
func (c *transferController) Download(ctx Context) {
or := parseObjectRequest(ctx)
exists := c.transferService.Exists(or)
if !exists {
ctx.SetStatus(404)
return
}
rangeHeader := ctx.GetHeader("Range")
if rangeHeader != "" {
size := c.transferService.GetSize(or)
var fromByte int64
var toByte int64 = size
regex := regexp.MustCompile(`bytes=(.*)\-(.*)`)
match := regex.FindStringSubmatch(rangeHeader)
if match != nil && len(match) >= 3 {
if len(match[1]) > 0 {
fromByte, _ = strconv.ParseInt(match[1], 10, 64)
}
if len(match[2]) > 0 {
toByte, _ = strconv.ParseInt(match[2], 10, 64)
}
}
or.From = fromByte
or.To = toByte
ctx.SetHeader("Content-Range", fmt.Sprintf("bytes %d-%d/%d", fromByte, toByte-1, int64(toByte-fromByte)))
ctx.SetStatus(206)
}
_, err := c.transferService.Download(or, ctx.GetResponseWriter())
if err != nil {
ctx.SetStatus(404)
return
}
}
func (c *transferController) Upload(ctx Context) {
o := parseObjectRequest(ctx)
exists := c.transferService.Exists(o)
if !exists {
ctx.SetStatus(404)
return
}
err := c.transferService.Upload(o, ctx.GetRequestReader())
if err != nil {
ctx.SetStatus(500)
//fmt.Fprintf(c.Writer, `{"message":"%s"}`, err)
return
}
}
func parseObjectRequest(ctx Context) *usecase.ObjectRequest {
oid := ctx.GetParam("oid")
or := &usecase.ObjectRequest{
Oid: oid,
}
return or
}
/*
func (o *ObjectRequest) DownloadLink() string {
return o.internalLink("objects")
}
func (o *ObjectRequest) UploadLink() string {
return o.internalLink("objects")
}
func (o *ObjectRequest) internalLink(subpath string) string {
path := ""
if len(o.User) > 0 {
path += fmt.Sprintf("/%s", o.User)
}
if len(o.Repo) > 0 {
path += fmt.Sprintf("/%s", o.Repo)
}
path += fmt.Sprintf("/%s/%s", subpath, o.Oid)
if config.Server.Tls {
return fmt.Sprintf("https://%s%s", config.Server.Host, path)
}
return fmt.Sprintf("http://%s%s", config.Server.Host, path)
}
func (o *ObjectRequest) VerifyLink() string {
path := fmt.Sprintf("/verify/%s", o.Oid)
if config.Server.Tls {
return fmt.Sprintf("https://%s%s", config.Server.Host, path)
}
return fmt.Sprintf("http://%s%s", config.Server.Host, path)
}
*/
|
package orb
// MultiLineString is a set of polylines.
type MultiLineString []LineString
// GeoJSONType returns the GeoJSON type for the object.
func (mls MultiLineString) GeoJSONType() string {
return "MultiLineString"
}
// Dimensions returns 1 because a MultiLineString is a 2d object.
func (mls MultiLineString) Dimensions() int {
return 1
}
// Bound returns a bound around all the line strings.
func (mls MultiLineString) Bound() Bound {
if len(mls) == 0 {
return emptyBound
}
bound := mls[0].Bound()
for i := 1; i < len(mls); i++ {
bound = bound.Union(mls[i].Bound())
}
return bound
}
// Equal compares two multi line strings. Returns true if lengths are the same
// and all points are Equal.
func (mls MultiLineString) Equal(multiLineString MultiLineString) bool {
if len(mls) != len(multiLineString) {
return false
}
for i, ls := range mls {
if !ls.Equal(multiLineString[i]) {
return false
}
}
return true
}
// Clone returns a new deep copy of the multi line string.
func (mls MultiLineString) Clone() MultiLineString {
if mls == nil {
return nil
}
nmls := make(MultiLineString, 0, len(mls))
for _, ls := range mls {
nmls = append(nmls, ls.Clone())
}
return nmls
}
|
package bus
import (
"context"
"fmt"
"sync"
"time"
)
// TxOptions 事务配置
type TxOptions struct {
Context context.Context
// Timeout 事务处理时长
// 启用事务的消息不会立即发布给消费者
// 当本地事务回调执行返回true才会正式发布
// 详见事务流程图 ./tx_flow.png
Timeout time.Duration
// EnsureFunc 事务完成确认
// 请一定要注意布尔返回值的代表含义
// 若返回值为true则表示事务已处理, 发布消息
// 若返回值为false则表示事务未处理, 撤销消息
EnsureFunc func(msg *Message) (done bool)
// RetryDelay 重试延迟机制
// 返回值为重试间隔, 若 < 0 则代表不进行重试
RetryDelay func(attempts int) time.Duration
// TxStorage 事务消息存储
TxStorage TXStorageInterface
// recordQueue 日志队列
recordQueue string
}
func (to *TxOptions) prepare(topic string) {
if to.Timeout <= 0 {
throw("sender [%s] the timeout of tx option must > 0", topic)
}
if to.EnsureFunc == nil {
throw("sender [%s] the ensure func of tx option is missing", topic)
}
if to.TxStorage == nil {
throw("sender [%s] the storage of tx option is missing", topic)
}
if to.Context == nil {
to.Context = context.Background()
}
if to.RetryDelay == nil {
to.RetryDelay = func(attempts int) time.Duration {
return time.Duration(attempts) * 10 * time.Second
}
}
to.recordQueue = fmt.Sprintf("%s.tx-record", topic)
}
// Sender 发送器
type Sender struct {
sync.Once
// Topic 发送主题
Topic string
// Driver 驱动实例
Driver DriverInterface
// Logger 异常日志
Logger LoggerInterface
// TxOptions 事务配置
TxOptions *TxOptions
// ready 是否就绪
ready bool
txHandler *Handler
}
// Prepare 创建主题和日志队列
func (s *Sender) Prepare() *Sender {
s.Do(func() {
if s.Driver == nil {
throw("sender [%s] missing driver instance", s.Topic)
}
if s.Logger == nil {
s.Logger = stderrLogger{}
}
if err := s.Driver.CreateTopic(s.Topic); err != nil {
throw("sender [%s] create topic error, %v", s.Topic, err)
}
if s.TxOptions != nil {
s.TxOptions.prepare(s.Topic)
s.txHandler = &Handler{
Context: s.TxOptions.Context,
Queue: s.TxOptions.recordQueue,
Driver: s.Driver,
Logger: s.Logger,
HandleFunc: func(log *Message) bool {
var id string
log.Scan(&id)
data, err := s.TxOptions.TxStorage.Fetch(id)
if err != nil {
s.Logger.Errorf("sender [%s] tx fetch failed, %v", s.Topic, err)
return false
} else if data == nil {
// 已经发布成功
s.txRemove(id)
return true
}
var msg Message
decode(data, &msg)
if s.TxOptions.EnsureFunc(&msg) {
// 事务处理成功, 消息未发送
err = s.Driver.SendToTopic(s.Topic, data, msg.RouteKey)
if err == nil {
s.txRemove(id)
return true
}
s.Logger.Errorf("sender [%s] with route key [%s] failed, %v", s.Topic, msg.RouteKey, err)
return false
} else {
// 事务未处理成功, 消息丢弃
s.txRemove(id)
return true
}
},
RetryDelay: s.TxOptions.RetryDelay,
EnsureFunc: func(msg *Message) (allow bool) { return true },
}
s.txHandler.Prepare()
go s.txHandler.Run()
}
s.ready = true
})
return s
}
// Send 发送消息
// msg 发送的消息结构体
// localTx 本地事务执行函数
func (s *Sender) Send(msg *Message, localTx ...func() error) (err error) {
if s.ready == false {
throw("sender [%s] has not prepared", s.Topic)
}
defer handlePanic(func(i interface{}) {
err = fmt.Errorf("sender [%s] panic: %v, call stack: \n%s", s.Topic, i, stackTrace(0))
})
if len(localTx) == 0 || localTx[0] == nil {
// 未使用事务, 直接发布至主题
if err := s.Driver.SendToTopic(s.Topic, encode(msg), msg.RouteKey); err != nil {
return fmt.Errorf("sender [%s] with route key [%s] failed, %v", s.Topic, msg.RouteKey, err)
}
} else if s.TxOptions == nil {
return fmt.Errorf("sender [%s] missing tx options", s.Topic)
} else {
data := encode(msg)
// 消息预发存储
id, err := s.TxOptions.TxStorage.Store(data)
if err != nil {
return fmt.Errorf("sender [%s] tx store failed, %v", s.Topic, err)
}
// 将操作日志发送至队列
err = s.Driver.SendToQueue(
s.TxOptions.recordQueue,
encode(MessageWithId(id, id, "")),
s.TxOptions.Timeout,
)
if err != nil {
return fmt.Errorf(
"sender [%s] send to queue [%s] with delay [%d] failed, %v",
s.Topic, s.TxOptions.recordQueue, s.TxOptions.Timeout, err,
)
}
// 执行本地事务
if err := localTx[0](); err != nil {
s.txRemove(id) // 事务失败即可清理
return err
}
// 此时无需关心消息是否发送成功, 可依靠日志补偿处理
if err := s.Driver.SendToTopic(s.Topic, data, msg.RouteKey); err != nil {
s.Logger.Errorf("sender [%s] with route key [%s] failed, %v", s.Topic, msg.RouteKey, err)
} else {
s.txRemove(id) // 发送成功即可清理
}
}
return nil
}
// Wait 等待退出
func (s *Sender) Wait() {
if s.txHandler == nil {
return
}
s.txHandler.Wait()
}
// txRemove 内部封装,便于使用
func (s *Sender) txRemove(id string) {
if err := s.TxOptions.TxStorage.Remove(id); err != nil {
s.Logger.Errorf("sender [%s] tx remove failed, %v", s.Topic, err)
}
}
|
package webx
import (
"io"
"net/http"
"net/http/httputil"
"net/url"
"strings"
"time"
"github.com/golang/glog"
"github.com/shestakovda/errx"
)
var ErrMsgMustBeAbs = "Базовый URL должен быть абсолютным"
var defClient = &http.Client{
Timeout: time.Minute,
}
func newRequestV1(base string, args []Option) (req *v1Request, err error) {
req = new(v1Request)
if req.opts, err = getOpts(args); err != nil {
return nil, ErrBadRequest.WithReason(err)
}
if req.base, err = url.ParseRequestURI(base); err != nil {
return nil, ErrBadURL.WithReason(err).WithDebug(errx.Debug{
"URL": base,
})
}
if !req.base.IsAbs() {
return nil, ErrBadURL.WithDetail(ErrMsgMustBeAbs).WithDebug(errx.Debug{
"URL": base,
})
}
return req, nil
}
type v1Request struct {
opts options
base *url.URL
}
func (c v1Request) Make(ref string, args ...Option) (_ Response, err error) {
var req *http.Request
var body io.Reader
var opts options
if opts, err = getOpts(args); err != nil {
return nil, ErrBadRequest.WithReason(err)
}
if body, err = opts.Body(); err != nil {
return nil, ErrBadRequest.WithReason(err)
}
addr := strings.TrimRight(c.base.String(), "/") + "/" + strings.TrimLeft(strings.TrimSpace(ref), "/")
if opts.ctx == nil {
req, err = http.NewRequest(opts.method, addr, body)
} else {
req, err = http.NewRequestWithContext(opts.ctx, opts.method, addr, body)
}
if err != nil {
return nil, ErrBadRequest.WithReason(err).WithDebug(errx.Debug{
"URL": addr,
"Method": opts.method,
})
}
if err = c.applyGetArgs(req, &opts); err != nil {
return nil, ErrBadRequest.WithReason(err)
}
if err = c.applyHeaders(req, &opts); err != nil {
return nil, ErrBadRequest.WithReason(err)
}
return c.do(req, &opts)
}
func (c v1Request) applyGetArgs(req *http.Request, opts *options) error {
// Возможно, какие-то аргументы уже указаны в запросе
args := req.URL.Query()
// Сначала параметры из базового запроса
for name, list := range c.opts.addget {
args[name] = append(args[name], list...)
}
for name := range c.opts.setget {
args.Set(name, c.opts.setget.Get(name))
}
// Затем параметры из основного запроса
for name, list := range opts.addget {
args[name] = append(args[name], list...)
}
for name := range opts.setget {
args.Set(name, opts.setget.Get(name))
}
// Конвертируются обратно
req.URL.RawQuery = args.Encode()
return nil
}
func (c v1Request) applyHeaders(req *http.Request, opts *options) error {
// Сначала устанавливаются заголовки базового запроса
for name, list := range c.opts.addhead {
req.Header[name] = append(req.Header[name], list...)
}
for name := range c.opts.sethead {
req.Header.Set(name, c.opts.sethead.Get(name))
}
// Затем устанавливаются заголовки самого запроса
for name, list := range opts.addhead {
req.Header[name] = append(req.Header[name], list...)
}
for name := range opts.sethead {
req.Header.Set(name, opts.sethead.Get(name))
}
// Если никто так и не поставил тип содержимого - ставим мы
if req.Header.Get(HeaderContentType) == "" {
req.Header.Set(HeaderContentType, MimeUnknown)
}
// Если этому запросу нужна авторизация - применяем её
if opts.user != "" {
req.SetBasicAuth(opts.user, opts.pass)
return nil
}
// Если авторизация в базовом запросе - применяем её
if c.opts.user != "" {
req.SetBasicAuth(c.opts.user, c.opts.pass)
}
return nil
}
func (c v1Request) do(req *http.Request, opts *options) (_ Response, err error) {
var resp *http.Response
var client *http.Client
if opts.client != nil {
// Если в самом запросе указан клиент, используем его
client = opts.client
} else if c.opts.client != nil {
// Если в базовом запросе указан клиент, используем его
client = c.opts.client
} else {
// Если нигде указан - используем умолчания
client = defClient
}
if c.opts.debug || opts.debug {
var dump []byte
if dump, err = httputil.DumpRequestOut(req, true); err != nil {
return nil, ErrBadRequest.WithReason(err)
}
glog.Errorf("webx.Request = %s", dump)
glog.Flush()
}
if resp, err = client.Do(req); err != nil {
return nil, ErrBadRequest.WithReason(err).WithDebug(errx.Debug{
"URL": req.URL.String(),
"Method": req.Method,
"Length": req.ContentLength,
})
}
return newResponseV1(req, resp)
}
|
package core
import (
"fmt"
"log"
"path/filepath"
homedir "github.com/mitchellh/go-homedir"
"github.com/pkg/errors"
"github.com/skatsuta/athenai/exec"
"gopkg.in/ini.v1"
)
const (
defaultDir = ".athenai"
defaultConfigFile = "config"
)
// Config is a configuration information.
type Config struct {
Debug bool `ini:"debug"`
Silent bool `ini:"silent"`
Output string `ini:"output"`
Section string `ini:"-"`
Profile string `ini:"profile"`
Region string `ini:"region"`
Database string `ini:"database"`
Location string `ini:"location"`
Encrypt string `ini:"encrypt"`
KMS string `ini:"kms"`
Format string `ini:"format"`
Count uint `ini:"count"`
Concurrent uint `ini:"concurrent"`
iniCfg *ini.File `ini:"-"`
}
// QueryConfig creates an exec.QueryConfig struct based on c.
func (c *Config) QueryConfig() *exec.QueryConfig {
return &exec.QueryConfig{
Database: c.Database,
Location: c.Location,
Encrypt: c.Encrypt,
KMS: c.KMS,
}
}
// SectionError represents an error about section in config file.
type SectionError struct {
Path string
Section string
Cause error // Do not implement Cause() for pkg/errors
}
func (se *SectionError) Error() string {
return fmt.Sprintf("failed to get section '%s' in %s: %s", se.Section, se.Path, se.Cause)
}
// LoadConfigFile loads configurations at `cfg.Section` section into `cfg` from `path`.
// If `path` is empty, `$HOME/.athenai/config` is used.
func LoadConfigFile(cfg *Config, path string) error {
if cfg == nil {
return errors.New("cfg is nil")
}
if cfg.Section == "" {
return errors.New("section name is empty")
}
filePath, err := normalizeConfigPath(path)
if err != nil {
return errors.Wrap(err, "failed to identify config file path")
}
log.Println("Normalized config file path:", filePath)
iniCfg, err := ini.Load(filePath)
if err != nil {
return errors.Wrap(err, "failed to load config file")
}
cfg.iniCfg = iniCfg
sec, err := iniCfg.GetSection(cfg.Section)
if err != nil {
return &SectionError{
Path: filePath,
Section: cfg.Section,
Cause: err,
}
}
return sec.MapTo(cfg)
}
func normalizeConfigPath(path string) (string, error) {
if path != "" {
return homedir.Expand(path)
}
home, err := homedir.Dir()
if err != nil {
return "", errors.Wrap(err, "failed to find your home directory")
}
path = filepath.Join(home, defaultDir, defaultConfigFile)
return path, nil
}
|
package main
var PodInfoTemplate string = `
Podname: {{.Name}}
========================
Master: {{.MasterIP}}:{{.MasterPort}}
Quorum: {{.Quorum}}
Auth Token: {{.Authpass}}
Known Sentinels: {{ range .KnownSentinels }}
{{.}}
{{ end }}
Known Slaves: {{ range .KnownSlaves }}
{{.}}
{{ end }}
Settings: {{ range $k,$v := .Settings }}
{{printf "%-30s" $k}} {{printf "%10s" $v}} {{ end }}
`
|
package mr
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"sort"
"sync"
)
import "log"
import "net/rpc"
import "hash/fnv"
//
// Map functions return a slice of KeyValue.
//
type KeyValue struct {
Key string
Value string
}
//
// use ihash(key) % NReduce to choose the reduce
// task number for each KeyValue emitted by Map.
//
func ihash(key string) int {
h := fnv.New32a()
h.Write([]byte(key))
return int(h.Sum32() & 0x7fffffff)
}
//
// main/mrworker.go calls this function.
//
func Worker(mapf func(string, string) []KeyValue, reducef func(string, []string) string) {
var mapWg sync.WaitGroup
var reduceWg sync.WaitGroup
for {
taskInfo := getJob()
log.Printf("get one job from master %v ...... \n", taskInfo)
if taskInfo.TaskType == TaskTypeMap {
mapWg.Add(1)
go func() {
defer mapWg.Done()
handleMapTask(mapf, taskInfo)
}()
} else if taskInfo.TaskType.In(TaskTypeMapDispatchedOver, TaskTypeReduce, TaskTypeReduceDispatchedOver) {
break
}
//time.Sleep(1 * time.Second)
}
log.Printf("workers knows that all MAP task DISPATCHER over!!!")
mapWg.Wait()
log.Printf("workers knows that all MAP task PROCESS over!!!")
for {
taskInfo := getJob()
log.Printf("get one job from master %v ...... \n", taskInfo)
if taskInfo.TaskType == TaskTypeReduce {
reduceWg.Add(1)
go func() {
defer reduceWg.Done()
handleReduceTask(reducef, taskInfo)
}()
} else if taskInfo.TaskType == TaskTypeReduceDispatchedOver {
break
}
}
log.Printf("workers knows that all REDUCE task DISPATCHER over!!!")
reduceWg.Wait()
log.Printf("workers knows that all REDUCE task PROCESS over!!!")
return
}
func handleMapTask(mapf func(string, string) []KeyValue, taskInfo GetTaskReply) {
log.Printf("handling map task %v ...... \n", taskInfo.MapTaskInfo.FileName)
content := readFile(taskInfo.MapTaskInfo.FileName)
kva := mapf(taskInfo.MapTaskInfo.FileName, string(content))
saveIntermediateFile(taskInfo.TaskNum, taskInfo.MapTaskInfo.NReduce, kva)
log.Printf("map task %v done \n", taskInfo.MapTaskInfo.FileName)
finishJob(taskInfo)
}
// for sorting by key.
type ByKey []KeyValue
// for sorting by key.
func (a ByKey) Len() int { return len(a) }
func (a ByKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByKey) Less(i, j int) bool { return a[i].Key < a[j].Key }
func handleReduceTask(reducef func(string, []string) string, taskInfo GetTaskReply) {
log.Printf("handling reduce task %v ...... \n", taskInfo.TaskNum)
intermediate := make([]KeyValue, 0)
for i := 0; i < taskInfo.ReduceTaskInfo.NMap; i++ {
intermediateFilename := fmt.Sprintf("mr-%d-%d", i, taskInfo.TaskNum)
file, err := os.Open(intermediateFilename)
defer file.Close()
if err != nil {
log.Fatalf("cannot open %v", intermediateFilename)
}
var kvs []KeyValue
dec := json.NewDecoder(file)
for {
var kv KeyValue
if err := dec.Decode(&kv); err != nil {
break
}
kvs = append(kvs, kv)
}
intermediate = append(intermediate, kvs...)
}
sort.Sort(ByKey(intermediate))
oname := fmt.Sprintf("mr-out-%d", taskInfo.TaskNum)
ofile, _ := os.Create(oname)
//
// call Reduce on each distinct key in intermediate[],
// and print the result to mr-out-0.
//
i := 0
for i < len(intermediate) {
j := i + 1
for j < len(intermediate) && intermediate[j].Key == intermediate[i].Key {
j++
}
values := []string{}
for k := i; k < j; k++ {
values = append(values, intermediate[k].Value)
}
output := reducef(intermediate[i].Key, values)
// this is the correct format for each line of Reduce output.
fmt.Fprintf(ofile, "%v %v\n", intermediate[i].Key, output)
i = j
}
ofile.Close()
log.Printf("reduce task %v done \n", taskInfo.TaskNum)
finishJob(taskInfo)
}
func saveIntermediateFile(n int64, nReduce int, kva []KeyValue) {
log.Printf("get %d kva:", len(kva))
keyBuckets := make([][]KeyValue, nReduce)
for _, kv := range kva {
bucketNum := ihash(kv.Key) % nReduce
keyBuckets[bucketNum] = append(keyBuckets[bucketNum], kv)
}
for i := 0; i < nReduce; i++ {
intermediateFilename := fmt.Sprintf("mr-%d-%d", n, i)
ofile, _ := os.Create(intermediateFilename)
defer ofile.Close()
//resStr, err := json.Marshal(keyBuckets[i])
enc := json.NewEncoder(ofile)
for _, kv := range keyBuckets[i] {
err := enc.Encode(&kv)
if err != nil {
panic(err)
}
}
}
}
func readFile(filename string) []byte {
file, err := os.Open(filename)
defer file.Close()
if err != nil {
log.Fatalf("cannot open %v", filename)
}
content, err := ioutil.ReadAll(file)
if err != nil {
log.Fatalf("cannot read %v", filename)
}
return content
}
func getJob() GetTaskReply {
log.Printf("worker request job...")
args := GetTaskArgs{}
reply := GetTaskReply{}
call("Master.GetJob", &args, &reply)
log.Printf("get request job %v, %v, %v,%v", reply.TaskType.ToString(), reply.TaskNum, reply.MapTaskInfo, reply.ReduceTaskInfo)
return reply
}
func finishJob(taskInfo GetTaskReply) FinishTaskReply {
args := FinishTaskArgs{
TaskType: taskInfo.TaskType,
TaskNum: taskInfo.TaskNum,
}
log.Printf("finish job : %v : %v", taskInfo.TaskType, taskInfo.TaskNum)
reply := FinishTaskReply{}
call("Master.FinishJob", &args, &reply)
return reply
}
//
// example function to show how to make an RPC call to the master.`
//
// the RPC argument and reply types are defined in rpc.go.
//
func CallExample() {
// declare an argument structure.
args := ExampleArgs{}
// fill in the argument(s).
args.X = 99
// declare a reply structure.
reply := ExampleReply{}
// send the RPC request, wait for the reply.
call("Master.Example", &args, &reply)
// reply.Y should be 100.
fmt.Printf("reply.Y %v\n", reply.Y)
}
//
// send an RPC request to the master, wait for the response.
// usually returns true.
// returns false if something goes wrong.
//
func call(rpcname string, args interface{}, reply interface{}) bool {
// c, err := rpc.DialHTTP("tcp", "127.0.0.1"+":1234")
sockname := masterSock()
c, err := rpc.DialHTTP("unix", sockname)
if err != nil {
log.Fatal("dialing:", err)
}
defer c.Close()
err = c.Call(rpcname, args, reply)
if err == nil {
return true
}
panic(err)
return false
}
|
// 07 CGroup
package main
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strconv"
"syscall"
)
func main() {
switch os.Args[1] {
case "run":
run()
case "child":
child()
default:
panic(fmt.Sprintf("Unknow command %s", os.Args[1]))
}
}
func run() {
cmd := exec.Command("/proc/self/exe", append([]string{"child"}, os.Args[2:]...)...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Env = []string{"PS1=hello:$(pwd) #", "PATH=/bin:/usr/bin/", "TERM=xterm"}
cmd.SysProcAttr = &syscall.SysProcAttr{
Cloneflags: syscall.CLONE_NEWUTS |
syscall.CLONE_NEWNS |
syscall.CLONE_NEWIPC |
syscall.CLONE_NEWPID |
syscall.CLONE_NEWNET |
syscall.CLONE_NEWUSER,
UidMappings: []syscall.SysProcIDMap{
{
ContainerID: 0,
HostID: os.Getuid(),
Size: 1,
},
},
GidMappings: []syscall.SysProcIDMap{
{
ContainerID: 0,
HostID: os.Getgid(),
Size: 1,
},
},
}
if err := cmd.Run(); err != nil {
panic(err)
}
}
func child() {
fmt.Printf("Namespace initialization\n")
// 100Mo de mémoire pour le CGroup demo (créé dans bash pour garder les droits)
memoryCGroup := "/sys/fs/cgroup/memory/"
if err := ioutil.WriteFile(filepath.Join(memoryCGroup, "demo/memory.limit_in_bytes"), []byte("100000000"), 0700); err != nil {
fmt.Printf("Error while creating memory limit: %s\n", err)
}
if err := ioutil.WriteFile(filepath.Join(memoryCGroup, "demo/memory.swappiness"), []byte("0"), 0700); err != nil {
fmt.Printf("Error while disabling memory swapiness: %s\n", err)
}
// Ajoute le PID courant dans le control group demo if err := ioutil.WriteFile(filepath.Join(memoryCGroup, "demo/cgroup.procs"), []byte(strconv.Itoa(os.Getpid())), 0700); err != nil {
fmt.Printf("Error while adding pid (%d) to demo Cgroup: %s\n", os.Getpid(), err)
}
// Expérimental: Supprime le nouveau CGroup après la suppression du container
if err := ioutil.WriteFile(filepath.Join(memoryCGroup, "demo/notify_on_release"), []byte("1"), 0700); err != nil {
fmt.Printf("Error while adding notify on release: %s\n", err)
}
// Mount
rootfsPath := "/home/stoakes/go/src/recreez-votre-docker/steps/07_cgroups/centos"
checkRootFS(rootfsPath)
if err := mountProc(rootfsPath); err != nil {
fmt.Printf("Error mounting /proc - %s\n", err)
os.Exit(1)
}
if err := bindMountDeviceNode("/dev/urandom", rootfsPath+"/dev/urandom"); err != nil {
fmt.Printf("Error running bind mount urandom: %s", err)
}
if err := pivotRoot(rootfsPath); err != nil {
fmt.Printf("Error running pivot_root - %s\n", err)
os.Exit(1)
}
fmt.Printf("Running %v as PID %d\n", os.Args[2:], os.Getpid())
cmd := exec.Command(os.Args[2], os.Args[3:]...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
panic(err)
}
syscall.Unmount("proc", 0)
syscall.Unmount("dev/urandom", 0)
}
|
package main
import (
"fmt"
"log"
"os"
"text/template"
)
type artifactClass struct {
Id string
Name string
Effect string
LevelNames []string
}
var _artifacts = []artifactClass{
{
Id: "puzzle_cube",
Name: "Puzzle cube",
Effect: "Lowers research costs",
LevelNames: []string{"Ancient", "Regular", "Mystical", "Unsolvable"},
},
{
Id: "lunar_totem",
Name: "Lunar totem",
Effect: "Increases away earnings",
LevelNames: []string{"Basic", "Regular", "Powerful", "Eggceptional"},
},
{
Id: "demeters_necklace",
Name: "Demeters necklace",
Effect: "Increases egg value",
LevelNames: []string{"Simple", "Jeweled", "Pristine", "Beggspoke"},
},
{
Id: "vial_of_martian_dust",
Name: "Vial of martian dust",
Effect: "Increases max running chicken bonus",
LevelNames: []string{"Tiny", "Regular", "Hermetic", "Prime"},
},
{
Id: "aurelian_brooch",
Name: "Aurelian brooch",
Effect: "Increases drone rewards",
LevelNames: []string{"Plain", "Regular", "Jeweled", "Eggceptional"},
},
{
Id: "tungsten_ankh",
Name: "Tungsten ankh",
Effect: "Increases egg value",
LevelNames: []string{"Crude", "Regular", "Polished", "Brilliant"},
},
{
Id: "ornate_gusset",
Name: "Gusset",
Effect: "Increases hen house capacity",
LevelNames: []string{"Plain", "Ornate", "Distegguished", "Jeweled"},
},
{
Id: "neodymium_medallion",
Name: "Neodymium medallion",
Effect: "Increases drone frequency",
LevelNames: []string{"Weak", "Regular", "Precise", "Eggceptional"},
},
{
Id: "mercurys_lens",
Name: "Mercury's lens",
Effect: "Increases farm value",
LevelNames: []string{"Misaligned", "Regular", "Precise", "Meggnificent"},
},
{
Id: "beak_of_midas",
Name: "Beak of midas",
Effect: "Increases gold reward chance",
LevelNames: []string{"Dull", "Regular", "Jeweled", "Glistening"},
},
{
Id: "carved_rainstick",
Name: "Carved rainstick",
Effect: "Increases chance of cash rewards from gifts and drones",
LevelNames: []string{"Simple", "Regular", "Ornate", "Meggnificent"},
},
{
Id: "interstellar_compass",
Name: "Interstellar compass",
Effect: "Increases egg shipping rate",
LevelNames: []string{"Miscalibrated", "Regular", "Precise", "Clairvoyant"},
},
{
Id: "the_chalice",
Name: "The chalice",
Effect: "Improves internal hatcheries",
LevelNames: []string{"Plain", "Polished", "Jeweled", "Eggceptional"},
},
{
Id: "phoenix_feather",
Name: "Phoenix feather",
Effect: "Increases soul egg collection rate",
LevelNames: []string{"Tattered", "Regular", "Brilliant", "Blazing"},
},
{
Id: "quantum_metronome",
Name: "Quantum metronome",
Effect: "Increases egg laying rate",
LevelNames: []string{"Misaligned", "Adequate", "Perfect", "Reggference"},
},
{
Id: "dilithium_monocle",
Name: "Dilithium monocle",
Effect: "Increases boost effectiveness",
LevelNames: []string{"Regular", "Precise", "Eggsacting", "Flawless"},
},
{
Id: "titanium_actuator",
Name: "Titanium actuator",
Effect: "Increases hold to hatch rate",
LevelNames: []string{"Inconsistent", "Regular", "Precise", "Reggference"},
},
{
Id: "ship_in_a_bottle",
Name: "Ship in a bottle",
Effect: "Increases co-op mates' earnings",
LevelNames: []string{"Regular", "Detailed", "Complex", "Eggquisite"},
},
{
Id: "tachyon_deflector",
Name: "Tachyon deflector",
Effect: "Increases co-op mates' egg laying rate",
LevelNames: []string{"Weak", "Regular", "Robust", "Eggceptional"},
},
{
Id: "book_of_basan",
Name: "Book of basan",
Effect: "Increases effect of Eggs of Prophecy",
LevelNames: []string{"Regular", "Collectors", "Fortified", "Gilded"},
},
{
Id: "light_of_eggendil",
Name: "Light of eggendil",
Effect: "Increases enlightenment egg value",
LevelNames: []string{"Dim", "Shimmering", "Glowing", "Brilliant"},
},
}
var _stones = []artifactClass{
{
Id: "lunar_stone",
Name: "Lunar stone",
Effect: "Increases away earnings when set",
LevelNames: []string{"Fragment", "Regular", "Eggsquisite", "Meggnificent"},
},
{
Id: "shell_stone",
Name: "Shell stone",
Effect: "Increases egg value when set",
LevelNames: []string{"Fragment", "Regular", "Eggsquisite", "Flawless"},
},
{
Id: "tachyon_stone",
Name: "Tachyon stone",
Effect: "Increases egg laying rate when set",
LevelNames: []string{"Fragment", "Regular", "Eggsquisite", "Brilliant"},
},
{
Id: "terra_stone",
Name: "Terra stone",
Effect: "Increases max running chicken bonus when set",
LevelNames: []string{"Fragment", "Regular", "Rich", "Eggceptional"},
},
{
Id: "soul_stone",
Name: "Soul stone",
Effect: "Increases soul egg bonus when set",
LevelNames: []string{"Fragment", "Regular", "Eggsquisite", "Radiant"},
},
{
Id: "dilithium_stone",
Name: "Dilithium stone",
Effect: "Increases boost duration when set",
LevelNames: []string{"Fragment", "Regular", "Eggsquisite", "Brilliant"},
},
{
Id: "quantum_stone",
Name: "Quantum stone",
Effect: "Increases shipping capacity when set",
LevelNames: []string{"Fragment", "Regular", "Phased", "Meggnificent"},
},
{
Id: "life_stone",
Name: "Life stone",
Effect: "Improves internal hatcheries when set",
LevelNames: []string{"Fragment", "Regular", "Good", "Eggceptional"},
},
{
Id: "clarity_stone",
Name: "Clarity stone",
Effect: "Enables effect of host artifact on enlightenment egg farm",
LevelNames: []string{"Fragment", "Regular", "Eggsquisite", "Eggceptional"},
},
{
Id: "prophecy_stone",
Name: "Prophecy stone",
Effect: "Increases egg of prophecy egg bonus when set",
LevelNames: []string{"Fragment", "Regular", "Eggsquisite", "Radiant"},
},
}
var _ingredients = []artifactClass{
{
Id: "gold_meteorite",
Name: "Gold meteorite",
LevelNames: []string{"Tiny", "Enriched", "Solid"},
},
{
Id: "tau_ceti_geode",
Name: "Tau ceti geode",
LevelNames: []string{"Piece", "Glimmering", "Radiant"},
},
{
Id: "solar_titanium",
Name: "Solar titanium",
LevelNames: []string{"Ore", "Bar", "Geogon"},
},
}
var _unconfirmedIngredients = []artifactClass{
{
Id: "et_aluminum",
Name: "Extraterrestrial aluminum",
LevelNames: []string{"?", "?", "?"},
},
{
Id: "ancient_tungsten",
Name: "Ancient tungsten",
LevelNames: []string{"?", "?", "?"},
},
{
Id: "space_rocks",
Name: "Space rocks",
LevelNames: []string{"?", "?", "?"},
},
{
Id: "alien_wood",
Name: "Alien wood",
LevelNames: []string{"?", "?", "?"},
},
{
Id: "centaurian_steel",
Name: "Centaurian steel",
LevelNames: []string{"?", "?", "?"},
},
{
Id: "eridani_feather",
Name: "Eridani feather",
LevelNames: []string{"?", "?", "?"},
},
// DRONE_PARTS doesn't even have an icon.
// {
// Id: "drone_parts",
// Name: "Drone parts",
// LevelNames: []string{"?", "?", "?"},
// },
{
Id: "celestial_bronze",
Name: "Celestial bronze",
LevelNames: []string{"?", "?", "?"},
},
{
Id: "lalande_hide",
Name: "Lalande hide",
LevelNames: []string{"?", "?", "?"},
},
}
// idx should be 0-indexed.
func afxIconPath(artifact artifactClass, idx int) string {
id := artifact.Id
switch id {
case "light_of_eggendil":
id = "light_eggendil"
case "neodymium_medallion":
id = "neo_medallion"
case "vial_of_martian_dust":
id = "vial_martian_dust"
}
return fmt.Sprintf("egginc/afx_%s_%d.png", id, idx+1)
}
func main() {
tmpl := template.Must(template.New("").Funcs(template.FuncMap{
"afxiconpath": afxIconPath,
}).ParseGlob("templates/*/*.html"))
err := os.MkdirAll("src", 0o755)
if err != nil {
log.Fatalf("mkdir -p src failed: %s", err)
}
output, err := os.Create("src/index.html")
if err != nil {
log.Fatalf("failed to open src/index.html for writing: %s", err)
}
defer output.Close()
err = tmpl.ExecuteTemplate(output, "index.html", struct {
Artifacts []artifactClass
Stones []artifactClass
Ingredients []artifactClass
UnconfirmedIngredients []artifactClass
}{
Artifacts: _artifacts,
Stones: _stones,
Ingredients: _ingredients,
UnconfirmedIngredients: _unconfirmedIngredients,
})
if err != nil {
log.Fatalf("failed to render template: %s", err)
}
}
|
package helpers
import (
"encoding/binary"
"math/big"
"math/rand"
"sync"
"time"
"github.com/dbogatov/fabric-amcl/amcl"
)
// RandomBytes ...
func RandomBytes(prg *amcl.RAND, n int) (bytes []byte) {
bytes = make([]byte, n)
for i := 0; i < n; i++ {
bytes[i] = prg.GetByte()
}
return
}
// PeerByHash ...
func PeerByHash(hash []byte, peers int) (peer int) {
input := new(big.Int)
input.SetBytes(hash)
divisor := new(big.Int)
divisor.SetInt64(int64(peers))
result := new(big.Int)
result = result.Mod(input, divisor)
peer = int(result.Int64())
return
}
// Sha3 ...
func Sha3(raw []byte) (hash []byte) {
hash = make([]byte, 32)
sha3 := amcl.NewSHA3(amcl.SHA3_HASH256)
for i := 0; i < len(raw); i++ {
sha3.Process(raw[i])
}
sha3.Hash(hash[:])
return
}
// RandomString ...
func RandomString(prg *amcl.RAND, length int) string {
const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
b := make([]byte, length)
for i := range b {
r := prg.GetByte()
b[i] = charset[int(r)%len(charset)]
}
return string(b)
}
// RandomULong ...
func RandomULong(prg *amcl.RAND) uint64 {
var raw [8]byte
for i := 0; i < 8; i++ {
raw[i] = prg.GetByte()
}
return binary.BigEndian.Uint64(raw[:])
}
var randMutex = &sync.Mutex{}
// NewRand ...
func NewRand() (prg *amcl.RAND) {
randMutex.Lock()
defer randMutex.Unlock()
prg = amcl.NewRAND()
goPrg := rand.New(rand.NewSource(time.Now().UnixNano()))
var raw [32]byte
for i := 0; i < 32; i++ {
raw[i] = byte(goPrg.Int())
}
prg.Seed(32, raw[:])
return
}
// NewRandSeed ...
func NewRandSeed(seed []byte) (prg *amcl.RAND) {
prg = amcl.NewRAND()
prg.Seed(len(seed), seed)
return
}
|
package fitbuddy
import (
"fittgbot/internal/configuration"
tgbotapi "github.com/go-telegram-bot-api/telegram-bot-api"
"log"
)
type BotInstance struct {
BotAPI *tgbotapi.BotAPI
Configuration configuration.Configuration
UpdateConfig tgbotapi.UpdateConfig
}
func NewBot(conf configuration.Configuration, updateConf tgbotapi.UpdateConfig) *BotInstance {
bot, err := tgbotapi.NewBotAPI(conf.BotInternalConfiguration.BotAuthToken)
if err != nil {
log.Panic(err)
}
bot.Debug = conf.BotInternalConfiguration.Debug
log.Printf("Authorized on account %s", bot.Self.UserName)
return &BotInstance{
BotAPI: bot,
Configuration: conf,
UpdateConfig: updateConf,
}
}
func NewDefaultUpdatesConfig() tgbotapi.UpdateConfig {
updatesConfig := tgbotapi.NewUpdate(0)
updatesConfig.Timeout = 60
return updatesConfig
}
func (bot *BotInstance) StartListening(cmdMux *CmdMux) {
updatesChan, err := bot.BotAPI.GetUpdatesChan(bot.UpdateConfig)
if err != nil {
log.Panic("Bot cannot start listen for updates")
}
listenForUpdatesFromChan(cmdMux, bot, updatesChan)
}
func listenForUpdatesFromChan(cmdMux *CmdMux, botInstance *BotInstance, updatesChan tgbotapi.UpdatesChannel) {
for update := range updatesChan {
if update.Message == nil {
continue
}
go cmdMux.HandleUpdate(botInstance, update)
}
}
|
package cmd
import (
"context"
"fmt"
"io"
"net/http"
"os"
"github.com/calvinfeng/sling/handler"
//"github.com/calvinfeng/sling/stream"
"github.com/calvinfeng/sling/stream/broker"
//"github.com/calvinfeng/sling/stream/broker"
"github.com/gorilla/websocket"
"github.com/jinzhu/gorm"
"github.com/labstack/echo/v4"
"github.com/labstack/echo/v4/middleware"
"github.com/spf13/cobra"
// Postgres database driver
_ "github.com/jinzhu/gorm/dialects/postgres"
)
// RunServerCmd is the command used to run the server.
var RunServerCmd = &cobra.Command{
Use: "runserver",
Short: "run user authentication server",
RunE: runServer,
}
func runServer(cmd *cobra.Command, args []string) error {
conn, err := gorm.Open("postgres", pgAddr)
if err != nil {
log.Fatalf("failed to open DB conn: %s", err.Error())
}
broker := broker.SetupBroker(context.Background(), conn)
srv := echo.New()
srv.Use(middleware.LoggerWithConfig(middleware.LoggerConfig{
Format: "HTTP[${time_rfc3339}] ${method} ${path} status=${status} latency=${latency_human}\n",
Output: io.MultiWriter(os.Stdout),
}))
srv.Use(middleware.CORSWithConfig(middleware.CORSConfig{
AllowOrigins: []string{"*"},
AllowMethods: []string{http.MethodGet, http.MethodPut, http.MethodPost, http.MethodDelete},
}))
srv.File("/", "./frontend/build/index.html")
srv.Static("/static", "./frontend/build/static")
srv.POST("/api/register", handler.NewUserHandler(conn, broker))
srv.POST("/api/login", handler.LoginHandler(conn))
users := srv.Group("api/users")
users.Use(handler.NewTokenAuthMiddleware(conn))
users.GET("/", handler.GetUsersHandler(conn))
users.GET("/current", handler.GetCurrentUserHandler(conn))
srv.GET("/api/rooms", handler.GetRoomsHandler(conn), handler.NewTokenAuthMiddleware(conn))
messageStreamHandler := handler.GetMessageStreamHandler(&websocket.Upgrader{}, broker)
actionStreamHandler := handler.GetActionStreamHandler(&websocket.Upgrader{}, broker)
streams := srv.Group("api/stream")
streams.GET("/messages", messageStreamHandler)
streams.GET("/actions", actionStreamHandler)
fmt.Println("Listening at localhost:8888...")
if err := srv.Start(":8888"); err != nil {
return err
}
return nil
}
|
// Copyright 2019 - 2022 The Samply Community
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fhir
import (
"encoding/json"
"fmt"
"strings"
)
// THIS FILE IS GENERATED BY https://github.com/samply/golang-fhir-models
// PLEASE DO NOT EDIT BY HAND
// EpisodeOfCareStatus is documented here http://hl7.org/fhir/ValueSet/episode-of-care-status
type EpisodeOfCareStatus int
const (
EpisodeOfCareStatusPlanned EpisodeOfCareStatus = iota
EpisodeOfCareStatusWaitlist
EpisodeOfCareStatusActive
EpisodeOfCareStatusOnhold
EpisodeOfCareStatusFinished
EpisodeOfCareStatusCancelled
EpisodeOfCareStatusEnteredInError
)
func (code EpisodeOfCareStatus) MarshalJSON() ([]byte, error) {
return json.Marshal(code.Code())
}
func (code *EpisodeOfCareStatus) UnmarshalJSON(json []byte) error {
s := strings.Trim(string(json), "\"")
switch s {
case "planned":
*code = EpisodeOfCareStatusPlanned
case "waitlist":
*code = EpisodeOfCareStatusWaitlist
case "active":
*code = EpisodeOfCareStatusActive
case "onhold":
*code = EpisodeOfCareStatusOnhold
case "finished":
*code = EpisodeOfCareStatusFinished
case "cancelled":
*code = EpisodeOfCareStatusCancelled
case "entered-in-error":
*code = EpisodeOfCareStatusEnteredInError
default:
return fmt.Errorf("unknown EpisodeOfCareStatus code `%s`", s)
}
return nil
}
func (code EpisodeOfCareStatus) String() string {
return code.Code()
}
func (code EpisodeOfCareStatus) Code() string {
switch code {
case EpisodeOfCareStatusPlanned:
return "planned"
case EpisodeOfCareStatusWaitlist:
return "waitlist"
case EpisodeOfCareStatusActive:
return "active"
case EpisodeOfCareStatusOnhold:
return "onhold"
case EpisodeOfCareStatusFinished:
return "finished"
case EpisodeOfCareStatusCancelled:
return "cancelled"
case EpisodeOfCareStatusEnteredInError:
return "entered-in-error"
}
return "<unknown>"
}
func (code EpisodeOfCareStatus) Display() string {
switch code {
case EpisodeOfCareStatusPlanned:
return "Planned"
case EpisodeOfCareStatusWaitlist:
return "Waitlist"
case EpisodeOfCareStatusActive:
return "Active"
case EpisodeOfCareStatusOnhold:
return "On Hold"
case EpisodeOfCareStatusFinished:
return "Finished"
case EpisodeOfCareStatusCancelled:
return "Cancelled"
case EpisodeOfCareStatusEnteredInError:
return "Entered in Error"
}
return "<unknown>"
}
func (code EpisodeOfCareStatus) Definition() string {
switch code {
case EpisodeOfCareStatusPlanned:
return "This episode of care is planned to start at the date specified in the period.start. During this status, an organization may perform assessments to determine if the patient is eligible to receive services, or be organizing to make resources available to provide care services."
case EpisodeOfCareStatusWaitlist:
return "This episode has been placed on a waitlist, pending the episode being made active (or cancelled)."
case EpisodeOfCareStatusActive:
return "This episode of care is current."
case EpisodeOfCareStatusOnhold:
return "This episode of care is on hold; the organization has limited responsibility for the patient (such as while on respite)."
case EpisodeOfCareStatusFinished:
return "This episode of care is finished and the organization is not expecting to be providing further care to the patient. Can also be known as \"closed\", \"completed\" or other similar terms."
case EpisodeOfCareStatusCancelled:
return "The episode of care was cancelled, or withdrawn from service, often selected during the planned stage as the patient may have gone elsewhere, or the circumstances have changed and the organization is unable to provide the care. It indicates that services terminated outside the planned/expected workflow."
case EpisodeOfCareStatusEnteredInError:
return "This instance should not have been part of this patient's medical record."
}
return "<unknown>"
}
|
package file
import (
"os"
"syscall"
)
type Flock struct {
path string
file *os.File
}
func NewFlock(path string) *Flock {
return &Flock{path: path}
}
func (f *Flock) File() *os.File {
return f.file
}
func (f *Flock) Lock() error {
if f.file == nil {
if err := f.createOrOpenFile(); err != nil {
return err
}
}
if err := syscall.Flock(int(f.file.Fd()), syscall.LOCK_EX); err != nil {
return err
}
return nil
}
func (f *Flock) Unlock() error {
if err := syscall.Flock(int(f.file.Fd()), syscall.LOCK_UN); err != nil {
return err
}
return f.file.Close()
}
func (f *Flock) createOrOpenFile() error {
file, err := os.OpenFile(f.path, os.O_CREATE|os.O_RDWR, os.FileMode(0644))
if err != nil {
return err
}
f.file = file
return nil
}
|
package repositories
import (
"errors"
"strconv"
"github.com/auenc/simple-rest/models"
)
type InMemoryJobRepository struct {
Jobs []models.Job
}
func NewInMemoryJobRepository() *InMemoryJobRepository {
jobs := make([]models.Job, 0)
return &InMemoryJobRepository{
Jobs: jobs,
}
}
func (r *InMemoryJobRepository) Get(id string) (models.Job, error) {
var job models.Job
for _, j := range r.Jobs {
if j.ID == id {
return j, nil
}
}
return job, errors.New("Job not found")
}
func (r *InMemoryJobRepository) GetAll() ([]models.Job, error) {
return r.Jobs, nil
}
func (r *InMemoryJobRepository) Create(job models.Job) error {
job.ID = strconv.Itoa(len(r.Jobs))
r.Jobs = append(r.Jobs, job)
return nil
}
func (r *InMemoryJobRepository) Update(id string, job models.Job) error {
for i, j := range r.Jobs {
if j.ID == id {
job.ID = id
r.Jobs[i] = job
}
return nil
}
return errors.New("Job not found")
}
func (r *InMemoryJobRepository) Delete(id string) error {
for i, j := range r.Jobs {
if j.ID == id {
// delete from slice
r.Jobs = append(r.Jobs[:i], r.Jobs[i+1:]...)
return nil
}
}
return errors.New("Job not found")
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package scexec
import (
"context"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec/scmutationexec"
)
type mutationDescGetter struct {
descs *descs.Collection
txn *kv.Txn
retrieved catalog.DescriptorIDSet
}
func (m *mutationDescGetter) GetMutableTableByID(
ctx context.Context, id descpb.ID,
) (*tabledesc.Mutable, error) {
table, err := m.descs.GetMutableTableVersionByID(ctx, id, m.txn)
if err != nil {
return nil, err
}
table.MaybeIncrementVersion()
m.retrieved.Add(table.GetID())
return table, nil
}
var _ scmutationexec.MutableDescGetter = (*mutationDescGetter)(nil)
|
/*
Copyright 2018 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
import (
"bytes"
"context"
"encoding/json"
"fmt"
"os"
"os/exec"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/klog"
"github.com/ceph/ceph-csi/pkg/util"
"github.com/container-storage-interface/spec/lib/go/csi"
)
type volumeID string
func execCommand(ctx context.Context, program string, args ...string) (stdout, stderr []byte, err error) {
var (
cmd = exec.Command(program, args...) // nolint: gosec, #nosec
sanitizedArgs = util.StripSecretInArgs(args)
stdoutBuf bytes.Buffer
stderrBuf bytes.Buffer
)
cmd.Stdout = &stdoutBuf
cmd.Stderr = &stderrBuf
klog.V(4).Infof(util.Log(ctx, "cephfs: EXEC %s %s"), program, sanitizedArgs)
if err := cmd.Run(); err != nil {
if cmd.Process == nil {
return nil, nil, fmt.Errorf("cannot get process pid while running %s %v: %v: %s",
program, sanitizedArgs, err, stderrBuf.Bytes())
}
return nil, nil, fmt.Errorf("an error occurred while running (%d) %s %v: %v: %s",
cmd.Process.Pid, program, sanitizedArgs, err, stderrBuf.Bytes())
}
return stdoutBuf.Bytes(), stderrBuf.Bytes(), nil
}
func execCommandErr(ctx context.Context, program string, args ...string) error {
_, _, err := execCommand(ctx, program, args...)
return err
}
//nolint: unparam
func execCommandJSON(ctx context.Context, v interface{}, program string, args ...string) error {
stdout, _, err := execCommand(ctx, program, args...)
if err != nil {
return err
}
if err = json.Unmarshal(stdout, v); err != nil {
return fmt.Errorf("failed to unmarshal JSON for %s %v: %s: %v", program, util.StripSecretInArgs(args), stdout, err)
}
return nil
}
func pathExists(p string) bool {
_, err := os.Stat(p)
return err == nil
}
// Controller service request validation
func (cs *ControllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
return fmt.Errorf("invalid CreateVolumeRequest: %v", err)
}
if req.GetName() == "" {
return status.Error(codes.InvalidArgument, "volume Name cannot be empty")
}
reqCaps := req.GetVolumeCapabilities()
if reqCaps == nil {
return status.Error(codes.InvalidArgument, "volume Capabilities cannot be empty")
}
for _, cap := range reqCaps {
if cap.GetBlock() != nil {
return status.Error(codes.Unimplemented, "block volume not supported")
}
}
return nil
}
func (cs *ControllerServer) validateDeleteVolumeRequest() error {
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
return fmt.Errorf("invalid DeleteVolumeRequest: %v", err)
}
return nil
}
// Controller expand volume request validation
func (cs *ControllerServer) validateExpandVolumeRequest(req *csi.ControllerExpandVolumeRequest) error {
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_EXPAND_VOLUME); err != nil {
return fmt.Errorf("invalid ExpandVolumeRequest: %v", err)
}
if req.GetVolumeId() == "" {
return status.Error(codes.InvalidArgument, "Volume ID cannot be empty")
}
capRange := req.GetCapacityRange()
if capRange == nil {
return status.Error(codes.InvalidArgument, "CapacityRange cannot be empty")
}
return nil
}
|
package api
import (
"contetto"
"html"
"log"
"net/http"
"service/app/middleware/auth"
"service/app/models"
"service/app/models/capability"
"service/app/models/role"
"service/app/services"
"github.com/gorilla/mux"
"github.com/justinas/alice"
"gopkg.in/mgo.v2/bson"
)
const (
UserApiEndpoint = "/users"
)
type UserApi struct {
service *services.UserService
r *mux.Router
}
func InitUserApi(f contetto.ContettoMicroServiceBaseFramework) *UserApi {
log.Printf("initializing user api...")
api := &UserApi{
service: services.NewUserService(f),
r: ApiBaseRouter(f.Web.Router).StrictSlash(true).
PathPrefix(UserApiEndpoint).
Subrouter(),
}
api.RegisterEndpoints()
return api
}
func (ua *UserApi) GetRouter() *mux.Router {
return ua.r
}
func (ua *UserApi) RegisterEndpoints() {
log.Printf("registering user api endpoints on: %s%s", ApiBaseEndpoint, UserApiEndpoint)
ua.r.Methods("GET").
Name(capability.ReadUsers.String()).
Handler(alice.New(auth.Auth, auth.Acl).ThenFunc(ua.ListHandler))
ua.r.Methods("POST").
HeadersRegexp(HeaderAccept, MediaTypeJson,
HeaderContentType, MediaTypeJson).
Name(capability.CreateUsers.String()).
Handler(alice.New(auth.Auth, auth.Acl).ThenFunc(ua.CreateHandler))
ua.r.Path("/{id}").
Methods("PUT").
HeadersRegexp(HeaderAccept, MediaTypeJson,
HeaderContentType, MediaTypeJson).
Name(capability.EditUsers.String()).
Handler(alice.New(auth.Auth, auth.Acl).ThenFunc(ua.EditHandler))
ua.r.Path("/{id}").
Methods("DELETE").
Name(capability.DeleteUsers.String()).
Handler(alice.New(auth.Auth, auth.Acl).ThenFunc(ua.DeleteHandler))
ua.r.Path("/roles").
Methods("GET").
Name(capability.ReadUsers.String()).
Handler(alice.New(auth.Auth, auth.Acl).ThenFunc(ua.RolesHandler))
}
func (ua *UserApi) ListHandler(w http.ResponseWriter, r *http.Request) {
var query models.Query
if err := ParseForm(r, &query); err != nil {
renderer.JSON(w, http.StatusBadRequest, BadRequestErr(err.Error()))
return
}
users, err := ua.service.GetUsers(query)
if err != nil {
renderer.JSON(w, http.StatusInternalServerError, ServerErr(err.Error()))
return
}
renderer.JSON(w, http.StatusOK, users)
}
func (ua *UserApi) CreateHandler(w http.ResponseWriter, r *http.Request) {
var user models.NewUserRequest
if err := ParseJson(r.Body, &user); err != nil {
renderer.JSON(w, http.StatusBadRequest, BadRequestErr(err.Error()))
return
}
if ok, err := user.Validate(); !ok {
renderer.JSON(w, http.StatusBadRequest, models.ValidationErr(err))
return
}
p, err := auth.GetUserPrincipal(r)
if err != nil {
renderer.JSON(w, http.StatusInternalServerError, ServerErr(err.Error()))
return
}
u := models.NewUser{
FirstName: user.FirstName,
LastName: user.LastName,
Email: user.Email,
Password: user.Password,
Role: user.Role,
CreatedBy: p,
}
saved, err := ua.service.CreateUser(u)
if err != nil {
renderer.JSON(w, http.StatusInternalServerError, ServerErr(err.Error()))
return
}
renderer.JSON(w, http.StatusCreated, saved)
}
func (ua *UserApi) EditHandler(w http.ResponseWriter, r *http.Request) {
var user models.UpdateUserRequest
if err := ParseJson(r.Body, &user); err != nil {
renderer.JSON(w, http.StatusBadRequest, BadRequestErr(err.Error()))
return
}
if ok, err := user.Validate(); !ok {
renderer.JSON(w, http.StatusBadRequest, models.ValidationErr(err))
return
}
vars := mux.Vars(r)
u := models.UpdateUser{
Id: bson.ObjectIdHex(vars["id"]),
FirstName: user.FirstName,
LastName: html.EscapeString(user.LastName),
Password: user.Password,
Role: user.Role,
Website: user.Website,
Bio: user.Bio,
}
if err := ua.service.UpdateUser(&u); err != nil {
renderer.JSON(w, http.StatusInternalServerError, ServerErr(err.Error()))
return
}
renderer.JSON(w, http.StatusOK, u)
}
func (ua *UserApi) DeleteHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
user, err := ua.service.GetUser(vars["id"])
if err != nil {
renderer.JSON(w, http.StatusBadRequest, BadRequestErr(err.Error()))
return
}
if err := ua.service.DeleteUser(user.Id.Hex()); err != nil {
renderer.JSON(w, http.StatusInternalServerError, ServerErr(err.Error()))
return
}
renderer.JSON(w, http.StatusNoContent, user)
}
func (ua *UserApi) RolesHandler(w http.ResponseWriter, r *http.Request) {
renderer.JSON(w, http.StatusOK, role.Roles())
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transform_test
import (
"context"
"testing"
"github.com/google/gapid/core/assert"
"github.com/google/gapid/core/log"
"github.com/google/gapid/gapis/api"
"github.com/google/gapid/gapis/api/test"
"github.com/google/gapid/gapis/api/transform"
)
func TestInjector(t *testing.T) {
ctx := log.Testing(t)
cb := test.CommandBuilder{Arena: test.Cmds.Arena}
newCmd := func(id api.CmdID, tag uint64) api.Cmd {
return cb.CmdTypeMix(uint64(id), 10, 20, 30, 40, 50, 60, tag, 80, 90, 100, true, test.Voidᵖ(0x12345678), 100)
}
inputs := transform.NewCmdAndIDList(
newCmd(10, 0),
newCmd(30, 0),
newCmd(50, 0),
newCmd(90, 0),
newCmd(00, 0),
newCmd(60, 0),
)
expected := transform.NewCmdAndIDList(
newCmd(10, 0),
newCmd(30, 0),
newCmd(api.CmdNoID, 1),
newCmd(50, 0),
newCmd(90, 0),
newCmd(api.CmdNoID, 2),
newCmd(api.CmdNoID, 3),
newCmd(00, 0),
newCmd(60, 0),
newCmd(api.CmdNoID, 0),
)
transform := &transform.Injector{}
transform.Inject(30, newCmd(api.CmdNoID, 1))
transform.Inject(90, newCmd(api.CmdNoID, 2))
transform.Inject(90, newCmd(api.CmdNoID, 3))
transform.Inject(60, newCmd(api.CmdNoID, 0))
transform.Inject(40, newCmd(100, 5)) // Should not be injected
CheckTransform(ctx, t, transform, inputs, expected)
}
// CheckTransform checks that transfomer emits the expected commands given
// inputs.
func CheckTransform(ctx context.Context, t *testing.T, transformer transform.Transformer, inputs, expected transform.CmdAndIDList) {
r := &transform.Recorder{}
for _, in := range inputs {
transformer.Transform(ctx, in.ID, in.Cmd, r)
}
transformer.Flush(ctx, r)
assert.For(ctx, "CmdsAndIDs").ThatSlice(r.CmdsAndIDs).DeepEquals(expected)
}
|
package monitor
import (
"time"
"yunion.io/x/onecloud/pkg/apis"
)
type MonitorResourceJointListInput struct {
MonitorResourceId string `json:"monitor_resource_id"`
AlertId string `json:"alert_id"`
JointId []int64 `json:"joint_id"`
}
type MonitorResourceJointCreateInput struct {
apis.Meta
MonitorResourceId string `json:"monitor_resource_id"`
AlertId string `json:"alert_id"`
AlertRecordId string `width:"36" charset:"ascii" list:"user" update:"user"`
AlertState string `width:"18" charset:"ascii" list:"user" update:"user"`
TriggerTime time.Time `list:"user" update:"user" json:"trigger_time"`
Data EvalMatch `json:"data"`
}
|
package database
import (
"macaddress_io_grabber/models"
"time"
)
type ApplicationRange struct {
ID uint64 `gorm:"primary_key"`
LeftBorder string `gorm:"column:l_border;type:varchar(100);not null;index:l_border_idx"`
RightBorder string `gorm:"column:r_border;type:varchar(100);not null;index:r_border_idx"`
Application string `gorm:"column:application;type:varchar(100);not null;index:app_idx"`
Notes string `gorm:"column:notes;type:text"`
Reference string `gorm:"column:ref;type:text"`
CreatedAt *time.Time
UpdatedAt *time.Time
}
func (applicationRange ApplicationRange) TableName() string {
return "application_ranges"
}
func (applicationRange *ApplicationRange) ToJSONModel() models.ApplicationRangeJSON {
return models.ApplicationRangeJSON{
RangeJSON: models.RangeJSON{
LeftBorder: applicationRange.LeftBorder,
RightBorder: applicationRange.RightBorder,
},
Application: applicationRange.Application,
Notes: applicationRange.Notes,
Reference: applicationRange.Reference,
}
}
|
package main
//807. 保持城市天际线
//在二维数组grid中,grid[i][j]代表位于某处的建筑物的高度。 我们被允许增加任何数量(不同建筑物的数量可能不同)的建筑物的高度。 高度 0 也被认为是建筑物。
//
//最后,从新数组的所有四个方向(即顶部,底部,左侧和右侧)观看的“天际线”必须与原始数组的天际线相同。 城市的天际线是从远处观看时,由所有建筑物形成的矩形的外部轮廓。 请看下面的例子。
//
//建筑物高度可以增加的最大总和是多少?
//
//例子:
//输入: grid = [[3,0,8,4],[2,4,5,7],[9,2,6,3],[0,3,1,0]]
//输出: 35
//解释:
//The grid is:
//[ [3, 0, 8, 4],
//[2, 4, 5, 7],
//[9, 2, 6, 3],
//[0, 3, 1, 0] ]
//
//从数组竖直方向(即顶部,底部)看“天际线”是:[9, 4, 8, 7]
//从水平水平方向(即左侧,右侧)看“天际线”是:[8, 7, 9, 3]
//
//在不影响天际线的情况下对建筑物进行增高后,新数组如下:
//
//gridNew = [ [8, 4, 8, 7],
//[7, 4, 7, 7],
//[9, 4, 8, 7],
//[3, 3, 3, 3] ]
//说明:
//
//1 < grid.length = grid[0].length <= 50。
//grid[i][j] 的高度范围是: [0, 100]。
//一座建筑物占据一个grid[i][j]:换言之,它们是 1 x 1 x grid[i][j] 的长方体。
func maxIncreaseKeepingSkyline(grid [][]int) int {
m, n := len(grid), len(grid[0])
row := make([]int, m)
col := make([]int, n)
for i := 0; i < m; i++ {
for j := 0; j < n; j++ {
row[i] = max(row[i], grid[i][j])
col[j] = max(col[j], grid[i][j])
}
}
var result int
for i := 0; i < m; i++ {
for j := 0; j < n; j++ {
result += min(row[i], col[j]) - grid[i][j]
}
}
return result
}
|
package mysql
import (
"github.com/jinzhu/gorm"
"github.com/void616/gm.mint.sender/internal/watcher/db/mysql/model"
gormigrate "gopkg.in/gormigrate.v1"
)
var migrations = []*gormigrate.Migration{
// initial
{
ID: "2019-09-27T10:08:24.153Z",
Migrate: func(tx *gorm.DB) error {
return tx.
CreateTable(&model.Service{}).
AddUniqueIndex("ux_watcher_services_name", "name").
CreateTable(&model.Wallet{}).
AddUniqueIndex("ux_watcher_wallets_pubkeysvcid", "public_key", "service_id").
AddForeignKey("service_id", tx.NewScope(&model.Service{}).TableName()+"(id)", "RESTRICT", "RESTRICT").
CreateTable(&model.Incoming{}).
AddUniqueIndex("ux_watcher_incomings_svcidtodigest", "service_id", "to", "digest").
AddIndex("ix_watcher_incomings_notified", "notified").
AddIndex("ix_watcher_incomings_notifyat", "notify_at").
AddForeignKey("service_id", tx.NewScope(&model.Service{}).TableName()+"(id)", "RESTRICT", "RESTRICT").
CreateTable(&model.Setting{}).
Error
},
Rollback: func(tx *gorm.DB) error {
return tx.
DropTable(&model.Wallet{}).
DropTable(&model.Incoming{}).
DropTable(&model.Setting{}).
Error
},
},
}
|
package adapter
import (
"fmt"
"regexp"
"strings"
"github.com/newrelic/infrastructure-agent/pkg/log"
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
resourcepb "go.opentelemetry.io/proto/otlp/resource/v1"
"px.dev/pxapi/types"
)
const (
colNamespace = "namespace"
colService = "service"
colPod = "pod"
colContainer = "container"
)
var regExpIsArray = regexp.MustCompilePOSIX(`\[((\"[a-zA-Z0-9\-\/._]+\")+,)*(\"[a-zA-Z0-9\-\/._]+\")\]`)
type ResourceHelper struct {
excludePods *regexp.Regexp
excludeNamespaces *regexp.Regexp
}
func NewResourceHelper(excludePods, excludeNamespaces string) (*ResourceHelper, error) {
var rExcludePods *regexp.Regexp
if excludePods != "" {
log.Infof("Excluding pods matching regex '%s'", excludePods)
var err error
rExcludePods, err = regexp.Compile(excludePods)
if err != nil {
return nil, fmt.Errorf("Parsing exclude pods regex failed: %v", err)
}
}
var rExcludeNamespaces *regexp.Regexp
if excludeNamespaces != "" {
log.Infof("Excluding namespaces matching regex '%s'", excludeNamespaces)
var err error
rExcludeNamespaces, err = regexp.Compile(excludeNamespaces)
if err != nil {
return nil, fmt.Errorf("Parsing exclude namespaces regex failed: %v", err)
}
}
return &ResourceHelper{
rExcludePods,
rExcludeNamespaces,
}, nil
}
func takeNamespaceServiceAndPod(r *types.Record) (ns string, services []string, pod string) {
ns = r.GetDatum(colNamespace).String()
nsPrefix := fmt.Sprintf("%s/", ns)
srv := r.GetDatum(colService).String()
if regExpIsArray.MatchString(srv) {
services = strings.Split(srv[1:len(srv)-1], ",")
for i, name := range services {
services[i] = strings.TrimPrefix(name[1:len(name)-1], nsPrefix)
}
} else {
services = []string{strings.TrimPrefix(srv, nsPrefix)}
}
pod = strings.TrimPrefix(r.GetDatum(colPod).String(), nsPrefix)
return
}
func createResourceFunc(r *types.Record, namespace, pod, clusterName, pixieClusterID string) func([]string) []resourcepb.Resource {
resource := resourcepb.Resource{
Attributes: []*commonpb.KeyValue{
{
Key: "pixie.cluster.id",
Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: pixieClusterID}},
},
{
Key: "instrumentation.provider",
Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: instrumentationName}},
},
{
Key: "k8s.namespace.name",
Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: namespace}},
},
{
Key: "service.instance.id",
Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: pod}},
},
{
Key: "k8s.pod.name",
Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: pod}},
},
{
Key: "k8s.container.name",
Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: r.GetDatum(colContainer).String()}},
},
{
Key: "k8s.cluster.name",
Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: clusterName}},
},
},
}
return func(services []string) []resourcepb.Resource {
output := make([]resourcepb.Resource, len(services))
for i, service := range services {
resource.Attributes = append(resource.Attributes, &commonpb.KeyValue{
Key: "service.name",
Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: service}},
})
output[i] = resource
}
return output
}
}
func (rh *ResourceHelper) createResources(r *types.Record, clusterName, pixieClusterID string) []resourcepb.Resource {
namespace, services, pod := takeNamespaceServiceAndPod(r)
if rh.shouldFilter(namespace, pod) {
return nil
}
return createResourceFunc(r, namespace, pod, clusterName, pixieClusterID)(services)
}
func (rh *ResourceHelper) shouldFilter(namespace, pod string) bool {
if rh.excludeNamespaces != nil && rh.excludeNamespaces.MatchString(namespace) {
return true
}
if rh.excludePods != nil && rh.excludePods.MatchString(pod) {
return true
}
return false
}
func createArrayOfSpans(resources []resourcepb.Resource, il []*tracepb.InstrumentationLibrarySpans) []*tracepb.ResourceSpans {
spans := make([]*tracepb.ResourceSpans, len(resources))
for i := range resources {
spans[i] = &tracepb.ResourceSpans{
Resource: &resources[i],
InstrumentationLibrarySpans: il,
}
}
return spans
}
func createArrayOfMetrics(resources []resourcepb.Resource, il []*metricpb.InstrumentationLibraryMetrics) []*metricpb.ResourceMetrics {
metrics := make([]*metricpb.ResourceMetrics, len(resources))
for i := range resources {
metrics[i] = &metricpb.ResourceMetrics{
Resource: &resources[i],
InstrumentationLibraryMetrics: il,
}
}
return metrics
}
|
package requests
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
"github.com/atomicjolt/canvasapi"
"github.com/atomicjolt/canvasapi/models"
)
// ListAvatarOptions A paginated list of the possible user avatar options that can be set with the user update endpoint. The response will be an array of avatar records. If the 'type' field is 'attachment', the record will include all the normal attachment json fields; otherwise it will include only the 'url' and 'display_name' fields. Additionally, all records will include a 'type' field and a 'token' field. The following explains each field in more detail
// type:: ["gravatar"|"attachment"|"no_pic"] The type of avatar record, for categorization purposes.
// url:: The url of the avatar
// token:: A unique representation of the avatar record which can be used to set the avatar with the user update endpoint. Note: this is an internal representation and is subject to change without notice. It should be consumed with this api endpoint and used in the user update endpoint, and should not be constructed by the client.
// display_name:: A textual description of the avatar record
// id:: ['attachment' type only] the internal id of the attachment
// content-type:: ['attachment' type only] the content-type of the attachment
// filename:: ['attachment' type only] the filename of the attachment
// size:: ['attachment' type only] the size of the attachment
// https://canvas.instructure.com/doc/api/users.html
//
// Path Parameters:
// # Path.UserID (Required) ID
//
type ListAvatarOptions struct {
Path struct {
UserID string `json:"user_id" url:"user_id,omitempty"` // (Required)
} `json:"path"`
}
func (t *ListAvatarOptions) GetMethod() string {
return "GET"
}
func (t *ListAvatarOptions) GetURLPath() string {
path := "users/{user_id}/avatars"
path = strings.ReplaceAll(path, "{user_id}", fmt.Sprintf("%v", t.Path.UserID))
return path
}
func (t *ListAvatarOptions) GetQuery() (string, error) {
return "", nil
}
func (t *ListAvatarOptions) GetBody() (url.Values, error) {
return nil, nil
}
func (t *ListAvatarOptions) GetJSON() ([]byte, error) {
return nil, nil
}
func (t *ListAvatarOptions) HasErrors() error {
errs := []string{}
if t.Path.UserID == "" {
errs = append(errs, "'Path.UserID' is required")
}
if len(errs) > 0 {
return fmt.Errorf(strings.Join(errs, ", "))
}
return nil
}
func (t *ListAvatarOptions) Do(c *canvasapi.Canvas, next *url.URL) ([]*models.Avatar, *canvasapi.PagedResource, error) {
var err error
var response *http.Response
if next != nil {
response, err = c.Send(next, t.GetMethod(), nil)
} else {
response, err = c.SendRequest(t)
}
if err != nil {
return nil, nil, err
}
if err != nil {
return nil, nil, err
}
body, err := ioutil.ReadAll(response.Body)
response.Body.Close()
if err != nil {
return nil, nil, err
}
ret := []*models.Avatar{}
err = json.Unmarshal(body, &ret)
if err != nil {
return nil, nil, err
}
pagedResource, err := canvasapi.ExtractPagedResource(response.Header)
if err != nil {
return nil, nil, err
}
return ret, pagedResource, nil
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package main
import (
"context"
gosql "database/sql"
"fmt"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/util/version"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/require"
)
func registerSchemaChangeDatabaseVersionUpgrade(r *testRegistry) {
// This test tests 2 loosely related things:
// 1. Correctness of database schema changes during the 20.1/20.2 mixed-
// version state, in which 20.2 nodes still use the deprecated database
// cache and non-lease-based schema change implementation.
// 2. Ability to use ALTER DATABASE ... CONVERT TO SCHEMA WITH PARENT on
// databases created in 20.1.
// TODO (lucy): Remove this test in 21.1.
r.Add(testSpec{
Name: "schemachange/database-version-upgrade",
Owner: OwnerSQLSchema,
MinVersion: "v20.2.0",
Cluster: makeClusterSpec(3),
Run: func(ctx context.Context, t *test, c *cluster) {
runSchemaChangeDatabaseVersionUpgrade(ctx, t, c, r.buildVersion)
},
})
}
func runSchemaChangeDatabaseVersionUpgrade(
ctx context.Context, t *test, c *cluster, buildVersion version.Version,
) {
// An empty string means that the cockroach binary specified by flag
// `cockroach` will be used.
const mainVersion = ""
predecessorVersion, err := PredecessorVersion(buildVersion)
if err != nil {
t.Fatal(err)
}
createDatabaseWithTableStep := func(dbName string) versionStep {
t.l.Printf("creating database %s", dbName)
return func(ctx context.Context, t *test, u *versionUpgradeTest) {
db := u.conn(ctx, t, 1)
_, err := db.ExecContext(ctx, fmt.Sprintf(`CREATE DATABASE %s; CREATE TABLE %s.t(a INT)`, dbName, dbName))
require.NoError(t, err)
}
}
assertDatabaseResolvable := func(ctx context.Context, db *gosql.DB, dbName string) error {
var tblName string
row := db.QueryRowContext(ctx, fmt.Sprintf(`SELECT table_name FROM [SHOW TABLES FROM %s]`, dbName))
if err := row.Scan(&tblName); err != nil {
return err
}
if tblName != "t" {
return errors.AssertionFailedf("unexpected table name %s", tblName)
}
return nil
}
assertDatabaseNotResolvable := func(ctx context.Context, db *gosql.DB, dbName string) error {
_, err = db.ExecContext(ctx, fmt.Sprintf(`SELECT table_name FROM [SHOW TABLES FROM %s]`, dbName))
if err == nil || err.Error() != "pq: target database or schema does not exist" {
return errors.AssertionFailedf("unexpected error: %s", err)
}
return nil
}
// Rename the database, drop it, and create a new database with the original
// name.
runSchemaChangesStep := func(dbName string) versionStep {
return func(ctx context.Context, t *test, u *versionUpgradeTest) {
t.l.Printf("running schema changes on %s", dbName)
newDbName := dbName + "_new_name"
dbNode1 := u.conn(ctx, t, 1)
dbNode2 := u.conn(ctx, t, 2)
// Rename the database.
_, err := dbNode1.ExecContext(ctx, fmt.Sprintf(`ALTER DATABASE %s RENAME TO %s`, dbName, newDbName))
require.NoError(t, err)
if err := assertDatabaseResolvable(ctx, dbNode1, newDbName); err != nil {
t.Fatal(err)
}
if err := assertDatabaseNotResolvable(ctx, dbNode1, dbName); err != nil {
t.Fatal(err)
}
// Also run the above steps connected to a different node. Since we still
// use the incoherent database cache in the mixed-version state, we retry
// until these queries produce the expected result.
if err := testutils.SucceedsSoonError(func() error {
return assertDatabaseResolvable(ctx, dbNode2, newDbName)
}); err != nil {
t.Fatal(err)
}
if err := testutils.SucceedsSoonError(func() error {
return assertDatabaseNotResolvable(ctx, dbNode2, dbName)
}); err != nil {
t.Fatal(err)
}
// Drop the database.
_, err = dbNode1.ExecContext(ctx, fmt.Sprintf(`DROP DATABASE %s CASCADE`, newDbName))
require.NoError(t, err)
if err := assertDatabaseNotResolvable(ctx, dbNode1, newDbName); err != nil {
t.Fatal(err)
}
if err := testutils.SucceedsSoonError(func() error {
return assertDatabaseNotResolvable(ctx, dbNode2, newDbName)
}); err != nil {
t.Fatal(err)
}
// Create a new database with the original name.
_, err = dbNode1.ExecContext(ctx, fmt.Sprintf(`CREATE DATABASE %s; CREATE TABLE %s.t(a INT)`, dbName, dbName))
require.NoError(t, err)
if err := assertDatabaseResolvable(ctx, dbNode1, dbName); err != nil {
t.Fatal(err)
}
if err := testutils.SucceedsSoonError(func() error {
return assertDatabaseResolvable(ctx, dbNode1, dbName)
}); err != nil {
t.Fatal(err)
}
}
}
createParentDatabaseStep := func(ctx context.Context, t *test, u *versionUpgradeTest) {
t.l.Printf("creating parent database")
db := u.conn(ctx, t, 1)
_, err := db.ExecContext(ctx, `CREATE DATABASE new_parent_db`)
require.NoError(t, err)
}
reparentDatabaseStep := func(dbName string) versionStep {
return func(ctx context.Context, t *test, u *versionUpgradeTest) {
db := u.conn(ctx, t, 1)
t.l.Printf("reparenting database %s", dbName)
_, err = db.ExecContext(ctx, fmt.Sprintf(`ALTER DATABASE %s CONVERT TO SCHEMA WITH PARENT new_parent_db;`, dbName))
require.NoError(t, err)
}
}
validationStep := func(ctx context.Context, t *test, u *versionUpgradeTest) {
t.l.Printf("validating")
buf, err := c.RunWithBuffer(ctx, t.l, c.Node(1),
[]string{"./cockroach debug doctor cluster", "--url {pgurl:1}"}...)
require.NoError(t, err)
t.l.Printf("%s", buf)
}
interactWithReparentedSchemaStep := func(schemaName string) versionStep {
return func(ctx context.Context, t *test, u *versionUpgradeTest) {
t.l.Printf("running schema changes on %s", schemaName)
db := u.conn(ctx, t, 1)
_, err = db.ExecContext(ctx, `USE new_parent_db`)
require.NoError(t, err)
_, err = db.ExecContext(ctx, fmt.Sprintf(`INSERT INTO %s.t VALUES (1)`, schemaName))
require.NoError(t, err)
_, err = db.ExecContext(ctx, fmt.Sprintf(`ALTER TABLE %s.t ADD COLUMN b INT`, schemaName))
require.NoError(t, err)
_, err = db.ExecContext(ctx, fmt.Sprintf(`CREATE TABLE %s.t2()`, schemaName))
require.NoError(t, err)
newSchemaName := schemaName + "_new"
_, err = db.ExecContext(ctx, fmt.Sprintf(`ALTER SCHEMA %s RENAME TO %s`, schemaName, newSchemaName))
require.NoError(t, err)
}
}
dropDatabaseCascadeStep := func(ctx context.Context, t *test, u *versionUpgradeTest) {
t.l.Printf("dropping parent database")
db := u.conn(ctx, t, 1)
_, err = db.ExecContext(ctx, `
USE defaultdb;
DROP DATABASE new_parent_db CASCADE;
`)
require.NoError(t, err)
}
// This test creates several databases and then runs schema changes on each
// one at a different stage (including deleting and re-creating) in the
// rolling upgrade process. At the end we also test CONVERT TO SCHEMA WITH
// PARENT on all of them. Note that we always issue schema change statements
// to node 1 on this 3-node cluster and verify results on nodes 1 and 2.
u := newVersionUpgradeTest(c,
uploadAndStart(c.All(), predecessorVersion),
waitForUpgradeStep(c.All()),
preventAutoUpgradeStep(1),
createDatabaseWithTableStep("db_0"),
createDatabaseWithTableStep("db_1"),
createDatabaseWithTableStep("db_2"),
createDatabaseWithTableStep("db_3"),
createDatabaseWithTableStep("db_4"),
createDatabaseWithTableStep("db_5"),
// Start upgrading to 20.2.
binaryUpgradeStep(c.Node(1), mainVersion),
runSchemaChangesStep("db_1"),
binaryUpgradeStep(c.Nodes(2, 3), mainVersion),
runSchemaChangesStep("db_2"),
// Roll back to 20.1.
binaryUpgradeStep(c.Node(1), predecessorVersion),
runSchemaChangesStep("db_3"),
binaryUpgradeStep(c.Nodes(2, 3), predecessorVersion),
runSchemaChangesStep("db_4"),
// Upgrade nodes to 20.2 again and finalize the upgrade.
binaryUpgradeStep(c.All(), mainVersion),
runSchemaChangesStep("db_5"),
allowAutoUpgradeStep(1),
waitForUpgradeStep(c.All()),
createParentDatabaseStep,
reparentDatabaseStep("db_0"),
reparentDatabaseStep("db_1"),
reparentDatabaseStep("db_2"),
reparentDatabaseStep("db_3"),
reparentDatabaseStep("db_4"),
reparentDatabaseStep("db_5"),
validationStep,
// Run some schema changes on the re-parented schemas and their tables.
interactWithReparentedSchemaStep("db_0"),
interactWithReparentedSchemaStep("db_1"),
interactWithReparentedSchemaStep("db_2"),
interactWithReparentedSchemaStep("db_3"),
interactWithReparentedSchemaStep("db_4"),
interactWithReparentedSchemaStep("db_5"),
validationStep,
dropDatabaseCascadeStep,
validationStep,
)
u.run(ctx, t)
}
|
package method
import (
"context"
"gm/manager"
"gm/param"
"shared/protobuf/pb"
"shared/utility/errors"
)
func (p *HttpPostHandler) MaintainSwitch(ctx context.Context, m *param.MaintainSwitch) error {
err := manager.Global.SetMaintainSwitch(ctx, m.Switch)
if err != nil {
return errors.WrapTrace(err)
}
_, err = manager.RPCGameClient.ReloadMaintain(ctx, &pb.ReloadMaintainReq{})
if err != nil {
return errors.WrapTrace(err)
}
_, err = manager.RPCForeplayClient.ReloadMaintain(ctx, &pb.ReloadMaintainReq{})
if err != nil {
return errors.WrapTrace(err)
}
return nil
}
|
package core
import (
"context"
"errors"
"time"
"github.com/bots-house/share-file-bot/pkg/secretid"
"github.com/volatiletech/null/v8"
)
// DocumentID it's alias for share id.
type DocumentID int
// Document represents shared document.
type Document struct {
// Unique ID of Document.
ID DocumentID
// Telegram File ID
FileID string
// Public File ID
PublicID string
// Telegram Unique File ID
// UniqueFileID string
// Caption of file
Caption null.String
// MIMEType of file
MIMEType null.String
// File name
Name string
// File size in bytes
Size int
// Reference to user who uploads document.
OwnerID UserID
// Time when Document was created.
CreatedAt time.Time
}
func (doc *Document) RegenPublicID() {
doc.PublicID = secretid.Generate()
}
func NewDocument(
fileID string,
caption string,
mimeType string,
size int,
name string,
ownerID UserID,
) *Document {
return &Document{
FileID: fileID,
PublicID: secretid.Generate(),
Caption: null.NewString(caption, caption != ""),
MIMEType: null.NewString(mimeType, mimeType != ""),
Size: size,
Name: name,
OwnerID: ownerID,
CreatedAt: time.Now(),
}
}
var ErrDocumentNotFound = errors.New("document not found")
type DocumentStoreQuery interface {
ID(id DocumentID) DocumentStoreQuery
OwnerID(id UserID) DocumentStoreQuery
PublicID(id string) DocumentStoreQuery
One(ctx context.Context) (*Document, error)
Delete(ctx context.Context) error
Count(ctx context.Context) (int, error)
}
// DocumentStore define persistence interface for Document.
type DocumentStore interface {
// Add Document to store. Update ID.
Add(ctx context.Context, Document *Document) error
Query() DocumentStoreQuery
}
|
package p_00101_00200
// 119. Pascal's Triangle II, https://leetcode.com/problems/pascals-triangle-ii/
func getRow(rowIndex int) []int {
memo := make([][]int, rowIndex+1)
for i := range memo {
memo[i] = make([]int, i+1)
}
for j := 0; j < rowIndex+1; j++ {
memo[rowIndex][j] = getValue(rowIndex, j, memo)
}
return memo[rowIndex]
}
func getValue(i int, j int, memo [][]int) int {
if memo[i][j] != 0 {
return memo[i][j]
}
if j == 0 || i == j {
memo[i][j] = 1
} else {
memo[i][j] = getValue(i-1, j-1, memo) + getValue(i-1, j, memo)
}
return memo[i][j]
}
|
package helmet
import (
"fmt"
"strconv"
"strings"
"github.com/gin-gonic/gin"
)
// NoSniff applies header to protect your server from MimeType Sniffing
func NoSniff() gin.HandlerFunc {
return func(c *gin.Context) {
c.Writer.Header().Set("X-Content-Type-Options", "nosniff")
}
}
// DNSPrefetchControl sets Prefetch Control header to prevent browser from prefetching DNS
func DNSPrefetchControl() gin.HandlerFunc {
return func(c *gin.Context) {
c.Writer.Header().Set("X-DNS-Prefetch-Control", "off")
}
}
// FrameGuard sets Frame Options header to deny to prevent content from the website to be served in an iframe
func FrameGuard(opt ...string) gin.HandlerFunc {
var o string
if len(opt) > 0 {
o = opt[0]
} else {
o = "DENY"
}
return func(c *gin.Context) {
c.Writer.Header().Set("X-Frame-Options", o)
}
}
// SetHSTS Sets Strict Transport Security header to the default of 60 days
// an optional integer may be added as a parameter to set the amount in seconds
func SetHSTS(sub bool, opt ...int) gin.HandlerFunc {
var o int
if len(opt) > 0 {
o = opt[0]
} else {
o = 5184000
}
op := "max-age=" + strconv.Itoa(o)
if sub {
op += "; includeSubDomains"
}
return func(c *gin.Context) {
c.Writer.Header().Set("Strict-Transport-Security", op)
}
}
// IENoOpen sets Download Options header for Internet Explorer to prevent it from executing downloads in the site's context
func IENoOpen() gin.HandlerFunc {
return func(c *gin.Context) {
c.Writer.Header().Set("X-Download-Options", "noopen")
}
}
// XSSFilter applies very minimal XSS protection via setting the XSS Protection header on
func XSSFilter() gin.HandlerFunc {
return func(c *gin.Context) {
c.Writer.Header().Set("X-XSS-Protection", "1; mode=block")
}
}
// Default returns a number of handlers that are advised to use for basic HTTP(s) protection
func Default() (gin.HandlerFunc, gin.HandlerFunc, gin.HandlerFunc, gin.HandlerFunc, gin.HandlerFunc, gin.HandlerFunc) {
return NoSniff(), DNSPrefetchControl(), FrameGuard(), SetHSTS(true), IENoOpen(), XSSFilter()
}
// Referrer sets the Referrer Policy header to prevent the browser from sending data from your website to another one upon navigation
// an optional string can be provided to set the policy to something else other than "no-referrer".
func Referrer(opt ...string) gin.HandlerFunc {
var o string
if len(opt) > 0 {
o = opt[0]
} else {
o = "no-referrer"
}
return func(c *gin.Context) {
c.Writer.Header().Set("Referrer-Policy", o)
}
}
// NoCache obliterates cache options by setting a number of headers. This prevents the browser from storing your assets in cache
func NoCache() gin.HandlerFunc {
return func(c *gin.Context) {
c.Writer.Header().Set("Surrogate-Control", "no-store")
c.Writer.Header().Set("Cache-Control", "no-store, no-cache, must-revalidate, proxy-revalidate")
c.Writer.Header().Set("Pragma", "no-cache")
c.Writer.Header().Set("Expires", "0")
}
}
// ContentSecurityPolicy sets a header which will restrict your browser to only allow certain sources for assets on your website
// The function accepts a map of its parameters which are appended to the header so you can control which headers should be set
// The second parameter of the function is a boolean, which set to true will tell the handler to also set legacy headers, like
// those that work in older versions of Chrome and Firefox.
/*
Example usage:
opts := map[string]string{
"default-src": "'self'",
"img-src": "*",
"media-src": "media1.com media2.com",
"script-src": "userscripts.example.com"
}
s.Use(helmet.ContentSecurityPolicy(opts, true))
See [Content Security Policy on MDN](https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP) for more info.
*/
func ContentSecurityPolicy(opt map[string]string, legacy bool) gin.HandlerFunc {
policy := ""
for k, v := range opt {
policy += fmt.Sprintf("%s %s; ", k, v)
}
policy = strings.TrimSuffix(policy, "; ")
return func(c *gin.Context) {
if legacy {
c.Writer.Header().Set("X-Webkit-CSP", policy)
c.Writer.Header().Set("X-Content-Security-Policy", policy)
}
c.Writer.Header().Set("Content-Security-Policy", policy)
}
}
// ExpectCT sets Certificate Transparency header which can enforce that you're using a Certificate which is ready for the
// upcoming Chrome requirements policy. The function accepts a maxAge int which is the TTL for the policy in delta seconds,
// an enforce boolean, which simply adds an enforce directive to the policy (otherwise it's report-only mode) and a
// optional reportUri, which is the URI to which report information is sent when the policy is violated.
func ExpectCT(maxAge int, enforce bool, reportURI ...string) gin.HandlerFunc {
policy := ""
if enforce {
policy += "enforce, "
}
if len(reportURI) > 0 {
policy += fmt.Sprintf("report-uri=%s, ", reportURI[0])
}
policy += fmt.Sprintf("max-age=%d", maxAge)
return func(c *gin.Context) {
c.Writer.Header().Set("Expect-CT", policy)
}
}
// SetHPKP sets HTTP Public Key Pinning for your server. It is not necessarily a great thing to set this without proper
// knowledge of what this does. [Read here](https://developer.mozilla.org/en-US/docs/Web/HTTP/Public_Key_Pinning) otherwise you
// may likely end up DoS-ing your own server and domain. The function accepts a map of directives and their values according
// to specifications.
/*
Example usage:
keys := []string{"cUPcTAZWKaASuYWhhneDttWpY3oBAkE3h2+soZS7sWs=", "M8HztCzM3elUxkcjR2S5P4hhyBNf6lHkmjAHKhpGPWE="}
r := gin.New()
r.Use(SetHPKP(keys, 5184000, true, "domain.com"))
*/
func SetHPKP(keys []string, maxAge int, sub bool, reportURI ...string) gin.HandlerFunc {
policy := ""
for _, v := range keys {
policy += fmt.Sprintf("pin-sha256=\"%s\"; ", v)
}
policy += fmt.Sprintf("max-age=%d; ", maxAge)
if sub {
policy += "includeSubDomains; "
}
if len(reportURI) > 0 {
policy += fmt.Sprintf("report-uri=\"%s\"", reportURI[0])
}
policy = strings.TrimSuffix(policy, "; ")
return func(c *gin.Context) {
c.Writer.Header().Set("Public-Key-Pins", policy)
}
}
|
package ravendb
import (
"crypto/tls"
"crypto/x509"
"strings"
"sync"
"time"
)
// Note: Java's IDocumentStore is DocumentStore
// Note: Java's DocumentStoreBase is folded into DocumentStore
// DocumentStore represents a database
type DocumentStore struct {
// from DocumentStoreBase
onBeforeStore []func(*BeforeStoreEventArgs)
onAfterSaveChanges []func(*AfterSaveChangesEventArgs)
onBeforeDelete []func(*BeforeDeleteEventArgs)
onBeforeQuery []func(*BeforeQueryEventArgs)
// TODO: there's no way to register for this event
onSessionCreated []func(*SessionCreatedEventArgs)
subscriptions *DocumentSubscriptions
disposed bool
conventions *DocumentConventions
urls []string // urls for HTTP endopoints of server nodes
initialized bool
Certificate *tls.Certificate
TrustStore *x509.Certificate
database string // name of the database
// maps database name to DatabaseChanges. Must be protected with mutex
databaseChanges map[string]*DatabaseChanges
// Note: access must be protected with mu
// Lazy.Value is **EvictItemsFromCacheBasedOnChanges
aggressiveCacheChanges map[string]*evictItemsFromCacheBasedOnChanges
// maps database name to its RequestsExecutor
// access must be protected with mu
// TODO: in Java is ConcurrentMap<String, RequestExecutor> requestExecutors
// so must protect access with mutex and use case-insensitive lookup
requestsExecutors map[string]*RequestExecutor
multiDbHiLo *MultiDatabaseHiLoIDGenerator
maintenanceOperationExecutor *MaintenanceOperationExecutor
operationExecutor *OperationExecutor
identifier string
aggressiveCachingUsed bool
afterClose []func(*DocumentStore)
beforeClose []func(*DocumentStore)
mu sync.Mutex
}
// methods from DocumentStoreBase
// GetConventions returns DocumentConventions
func (s *DocumentStore) GetConventions() *DocumentConventions {
if s.conventions == nil {
s.conventions = NewDocumentConventions()
}
return s.conventions
}
// SetConventions sets DocumentConventions
func (s *DocumentStore) SetConventions(conventions *DocumentConventions) {
s.assertNotInitialized("conventions")
s.conventions = conventions
}
// Subscriptions returns DocumentSubscriptions which allows subscribing to changes in store
func (s *DocumentStore) Subscriptions() *DocumentSubscriptions {
return s.subscriptions
}
// GetUrls returns urls of all RavenDB nodes
func (s *DocumentStore) GetUrls() []string {
return s.urls
}
// SetUrls sets initial urls of RavenDB nodes
func (s *DocumentStore) SetUrls(urls []string) {
panicIf(len(urls) == 0, "urls is empty")
s.assertNotInitialized("urls")
for i, s := range urls {
urls[i] = strings.TrimSuffix(s, "/")
}
s.urls = urls
}
func (s *DocumentStore) ensureNotClosed() error {
if s.disposed {
return newIllegalStateError("The document store has already been disposed and cannot be used")
}
return nil
}
// AddBeforeStoreStoreListener registers a function that will be called before storing ab entity.
// It'll be registered with every new session.
// Returns listener id that can be passed to RemoveBeforeStoreListener to unregister
// the listener.
func (s *DocumentStore) AddBeforeStoreListener(handler func(*BeforeStoreEventArgs)) int {
id := len(s.onBeforeStore)
s.onBeforeStore = append(s.onBeforeStore, handler)
return id
}
// RemoveBeforeStoreListener removes a listener given id returned by AddBeforeStoreListener
func (s *DocumentStore) RemoveBeforeStoreListener(handlerID int) {
s.onBeforeStore[handlerID] = nil
}
// AddAfterSaveChangesListener registers a function that will be called before saving changes.
// It'll be registered with every new session.
// Returns listener id that can be passed to RemoveAfterSaveChangesListener to unregister
// the listener.
func (s *DocumentStore) AddAfterSaveChangesListener(handler func(*AfterSaveChangesEventArgs)) int {
s.onAfterSaveChanges = append(s.onAfterSaveChanges, handler)
return len(s.onAfterSaveChanges) - 1
}
// RemoveAfterSaveChangesListener removes a listener given id returned by AddAfterSaveChangesListener
func (s *DocumentStore) RemoveAfterSaveChangesListener(handlerID int) {
s.onAfterSaveChanges[handlerID] = nil
}
// AddBeforeDeleteListener registers a function that will be called before deleting an entity.
// It'll be registered with every new session.
// Returns listener id that can be passed to RemoveBeforeDeleteListener to unregister
// the listener.
func (s *DocumentStore) AddBeforeDeleteListener(handler func(*BeforeDeleteEventArgs)) int {
s.onBeforeDelete = append(s.onBeforeDelete, handler)
return len(s.onBeforeDelete) - 1
}
// RemoveBeforeDeleteListener removes a listener given id returned by AddBeforeDeleteListener
func (s *DocumentStore) RemoveBeforeDeleteListener(handlerID int) {
s.onBeforeDelete[handlerID] = nil
}
// AddBeforeQueryListener registers a function that will be called before running a query.
// It allows customizing query via DocumentQueryCustomization.
// It'll be registered with every new session.
// Returns listener id that can be passed to RemoveBeforeQueryListener to unregister
// the listener.
func (s *DocumentStore) AddBeforeQueryListener(handler func(*BeforeQueryEventArgs)) int {
s.onBeforeQuery = append(s.onBeforeQuery, handler)
return len(s.onBeforeQuery) - 1
}
// RemoveBeforeQueryListener removes a listener given id returned by AddBeforeQueryListener
func (s *DocumentStore) RemoveBeforeQueryListener(handlerID int) {
s.onBeforeQuery[handlerID] = nil
}
func (s *DocumentStore) registerEvents(session *InMemoryDocumentSessionOperations) {
// TODO: unregister those events?
for _, handler := range s.onBeforeStore {
if handler != nil {
session.AddBeforeStoreListener(handler)
}
}
for _, handler := range s.onAfterSaveChanges {
if handler != nil {
session.AddAfterSaveChangesListener(handler)
}
}
for _, handler := range s.onBeforeDelete {
if handler != nil {
session.AddBeforeDeleteListener(handler)
}
}
for _, handler := range s.onBeforeQuery {
if handler != nil {
session.AddBeforeQueryListener(handler)
}
}
}
func (s *DocumentStore) afterSessionCreated(session *InMemoryDocumentSessionOperations) {
for _, handler := range s.onSessionCreated {
if handler != nil {
args := &SessionCreatedEventArgs{
Session: session,
}
handler(args)
}
}
}
func (s *DocumentStore) assertInitialized() error {
if !s.initialized {
return newIllegalStateError("DocumentStore must be initialized")
}
return nil
}
func (s *DocumentStore) assertNotInitialized(property string) {
panicIf(s.initialized, "You cannot set '%s' after the document store has been initialized.", property)
}
func (s *DocumentStore) GetDatabase() string {
return s.database
}
func (s *DocumentStore) SetDatabase(database string) {
s.assertNotInitialized("database")
s.database = database
}
func (s *DocumentStore) AggressivelyCache(database string) (CancelFunc, error) {
return s.AggressivelyCacheForDatabase(time.Hour*24, database)
}
func newDocumentStore() *DocumentStore {
s := &DocumentStore{
requestsExecutors: map[string]*RequestExecutor{},
conventions: NewDocumentConventions(),
databaseChanges: map[string]*DatabaseChanges{},
aggressiveCacheChanges: map[string]*evictItemsFromCacheBasedOnChanges{},
}
s.subscriptions = newDocumentSubscriptions(s)
return s
}
func NewDocumentStore(urls []string, database string) *DocumentStore {
res := newDocumentStore()
if len(urls) > 0 {
res.SetUrls(urls)
}
if database != "" {
res.SetDatabase(database)
}
return res
}
// Get an identifier of the store. For debugging / testing.
func (s *DocumentStore) GetIdentifier() string {
if s.identifier != "" {
return s.identifier
}
if len(s.urls) == 0 {
return ""
}
if s.database != "" {
return strings.Join(s.urls, ",") + " (DB: " + s.database + ")"
}
return strings.Join(s.urls, ",")
}
func (s *DocumentStore) SetIdentifier(identifier string) {
s.identifier = identifier
}
// Close closes the Store
func (s *DocumentStore) Close() {
if s.disposed {
redbg("DocumentStore.Close: already disposed\n")
return
}
redbg("DocumentStore.Close\n")
for _, fn := range s.beforeClose {
fn(s)
}
s.beforeClose = nil
for _, evict := range s.aggressiveCacheChanges {
evict.Close()
}
for _, changes := range s.databaseChanges {
changes.Close()
}
if s.multiDbHiLo != nil {
s.multiDbHiLo.ReturnUnusedRange()
}
if s.Subscriptions() != nil {
_ = s.Subscriptions().Close()
}
s.disposed = true
for _, fn := range s.afterClose {
fn(s)
}
s.afterClose = nil
for _, re := range s.requestsExecutors {
re.Close()
}
}
// OpenSession opens a new session to document Store.
// If database is not given, we'll use store's database name
func (s *DocumentStore) OpenSession(database string) (*DocumentSession, error) {
sessionOptions := &SessionOptions{
Database: database,
}
return s.OpenSessionWithOptions(sessionOptions)
}
func (s *DocumentStore) OpenSessionWithOptions(options *SessionOptions) (*DocumentSession, error) {
if err := s.assertInitialized(); err != nil {
return nil, err
}
if err := s.ensureNotClosed(); err != nil {
return nil, err
}
sessionID := NewUUID().String()
databaseName := options.Database
if databaseName == "" {
databaseName = s.GetDatabase()
}
requestExecutor := options.RequestExecutor
if requestExecutor == nil {
requestExecutor = s.GetRequestExecutor(databaseName)
}
session := NewDocumentSession(databaseName, s, sessionID, requestExecutor)
s.registerEvents(session.InMemoryDocumentSessionOperations)
s.afterSessionCreated(session.InMemoryDocumentSessionOperations)
return session, nil
}
func (s *DocumentStore) ExecuteIndex(task *IndexCreationTask, database string) error {
if err := s.assertInitialized(); err != nil {
return err
}
return task.Execute(s, s.conventions, database)
}
func (s *DocumentStore) ExecuteIndexes(tasks []*IndexCreationTask, database string) error {
if err := s.assertInitialized(); err != nil {
return err
}
indexesToAdd := indexCreationCreateIndexesToAdd(tasks, s.conventions)
op := NewPutIndexesOperation(indexesToAdd...)
if database == "" {
database = s.GetDatabase()
}
return s.Maintenance().ForDatabase(database).Send(op)
}
// GetRequestExecutor gets a request executor.
// database is optional
func (s *DocumentStore) GetRequestExecutor(database string) *RequestExecutor {
must(s.assertInitialized())
if database == "" {
database = s.GetDatabase()
}
database = strings.ToLower(database)
s.mu.Lock()
executor, ok := s.requestsExecutors[database]
s.mu.Unlock()
if ok {
return executor
}
if !s.GetConventions().IsDisableTopologyUpdates() {
executor = RequestExecutorCreate(s.GetUrls(), database, s.Certificate, s.TrustStore, s.GetConventions())
} else {
executor = RequestExecutorCreateForSingleNodeWithConfigurationUpdates(s.GetUrls()[0], database, s.Certificate, s.TrustStore, s.GetConventions())
}
s.mu.Lock()
s.requestsExecutors[database] = executor
s.mu.Unlock()
return executor
}
// Initialize initializes document Store,
// Must be called before executing any operation.
func (s *DocumentStore) Initialize() error {
if s.initialized {
return nil
}
err := s.assertValidConfiguration()
if err != nil {
return err
}
conventions := s.conventions
if conventions.GetDocumentIDGenerator() == nil {
generator := NewMultiDatabaseHiLoIDGenerator(s, s.GetConventions())
s.multiDbHiLo = generator
genID := func(dbName string, entity interface{}) (string, error) {
return generator.GenerateDocumentID(dbName, entity)
}
conventions.SetDocumentIDGenerator(genID)
}
s.initialized = true
return nil
}
func (s *DocumentStore) assertValidConfiguration() error {
if len(s.urls) == 0 {
return newIllegalArgumentError("Must provide urls to NewDocumentStore")
}
return nil
}
type RestoreCaching struct {
re *RequestExecutor
old *AggressiveCacheOptions
}
func (r *RestoreCaching) Close() error {
r.re.aggressiveCaching = r.old
return nil
}
func (s *DocumentStore) DisableAggressiveCaching(databaseName string) *RestoreCaching {
if databaseName == "" {
databaseName = s.GetDatabase()
}
re := s.GetRequestExecutor(databaseName)
old := re.aggressiveCaching
re.aggressiveCaching = nil
res := &RestoreCaching{
re: re,
old: old,
}
return res
}
func (s *DocumentStore) Changes(database string) *DatabaseChanges {
must(s.assertInitialized())
if database == "" {
database = s.GetDatabase()
}
s.mu.Lock()
changes, ok := s.databaseChanges[database]
s.mu.Unlock()
if !ok {
changes = s.createDatabaseChanges(database)
s.mu.Lock()
s.databaseChanges[database] = changes
s.mu.Unlock()
}
return changes
}
func (s *DocumentStore) createDatabaseChanges(database string) *DatabaseChanges {
panicIf(database == "", "database can't be empty string")
onDispose := func() {
s.mu.Lock()
delete(s.databaseChanges, database)
s.mu.Unlock()
}
re := s.GetRequestExecutor(database)
return newDatabaseChanges(re, database, onDispose)
}
func (s *DocumentStore) GetLastDatabaseChangesStateError(database string) error {
if database == "" {
database = s.GetDatabase()
}
s.mu.Lock()
databaseChanges, ok := s.databaseChanges[database]
s.mu.Unlock()
if !ok {
return nil
}
ch := databaseChanges
return ch.getLastConnectionStateError()
}
func (s *DocumentStore) AggressivelyCacheFor(cacheDuration time.Duration) (CancelFunc, error) {
return s.AggressivelyCacheForDatabase(cacheDuration, "")
}
func (s *DocumentStore) AggressivelyCacheForDatabase(cacheDuration time.Duration, database string) (CancelFunc, error) {
if database == "" {
database = s.GetDatabase()
}
if database == "" {
return nil, newIllegalArgumentError("must have database")
}
s.mu.Lock()
cachingUsed := s.aggressiveCachingUsed
s.mu.Unlock()
if !cachingUsed {
err := s.listenToChangesAndUpdateTheCache(database)
if err != nil {
return nil, err
}
}
// TODO: protect access to aggressiveCaching
opts := &AggressiveCacheOptions{
Duration: cacheDuration,
}
re := s.GetRequestExecutor(database)
oldOpts := re.aggressiveCaching
re.aggressiveCaching = opts
restorer := func() {
re.aggressiveCaching = oldOpts
}
return restorer, nil
}
func (s *DocumentStore) listenToChangesAndUpdateTheCache(database string) error {
s.mu.Lock()
s.aggressiveCachingUsed = true
evict := s.aggressiveCacheChanges[database]
s.mu.Unlock()
if evict != nil {
return nil
}
evict, err := newEvictItemsFromCacheBasedOnChanges(s, database)
if err != nil {
return err
}
s.mu.Lock()
s.aggressiveCacheChanges[database] = evict
s.mu.Unlock()
return nil
}
func (s *DocumentStore) AddBeforeCloseListener(fn func(*DocumentStore)) int {
s.beforeClose = append(s.beforeClose, fn)
return len(s.beforeClose) - 1
}
func (s *DocumentStore) RemoveBeforeCloseListener(idx int) {
s.beforeClose[idx] = nil
}
func (s *DocumentStore) AddAfterCloseListener(fn func(*DocumentStore)) int {
s.afterClose = append(s.afterClose, fn)
return len(s.afterClose) - 1
}
func (s *DocumentStore) RemoveAfterCloseListener(idx int) {
s.afterClose[idx] = nil
}
func (s *DocumentStore) Maintenance() *MaintenanceOperationExecutor {
must(s.assertInitialized())
if s.maintenanceOperationExecutor == nil {
s.maintenanceOperationExecutor = NewMaintenanceOperationExecutor(s, "")
}
return s.maintenanceOperationExecutor
}
func (s *DocumentStore) Operations() *OperationExecutor {
if s.operationExecutor == nil {
s.operationExecutor = NewOperationExecutor(s, "")
}
return s.operationExecutor
}
func (s *DocumentStore) BulkInsert(database string) *BulkInsertOperation {
if database == "" {
database = s.GetDatabase()
}
return NewBulkInsertOperation(database, s)
}
|
// Copyright 2020 Paul Greenberg greenpau@outlook.com
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package identity
import (
"github.com/greenpau/go-identity/internal/utils"
"testing"
)
func TestNewDatabase(t *testing.T) {
var testFailed int
dbPath := "assets/tests/userdb.json"
db := NewDatabase()
complianceMessages, compliant := utils.GetTagCompliance(db)
if !compliant {
testFailed++
}
for _, entry := range complianceMessages {
t.Logf("%s", entry)
}
if testFailed > 0 {
t.Fatalf("encountered %d errors", testFailed)
}
user := NewUser("jsmith")
email := "jsmith@gmail.com"
//password := "jsmith123"
//newPassword := "johnsmith123"
password := NewRandomString(12)
newPassword := NewRandomString(16)
name := &Name{
First: "John",
Last: "Smith",
}
t.Logf("Username: %s", user.Username)
t.Logf("Password: %s", password)
if err := user.AddPassword(password); err != nil {
t.Fatalf("failed adding password: %s", err)
}
if err := user.AddEmailAddress(email); err != nil {
t.Fatalf("failed adding email address: %s", err)
}
if err := user.AddName(name); err != nil {
t.Fatalf("failed adding name: %s", err)
}
for _, roleName := range []string{"viewer", "editor", "admin"} {
if err := user.AddRole(roleName); err != nil {
t.Fatalf("failed adding role: %s", err)
}
}
expUserFullName := "Smith, John"
userFullName := user.GetFullName()
if userFullName != expUserFullName {
t.Fatalf("the expected user full name %s does not match the returned '%s'", expUserFullName, userFullName)
}
t.Logf("User full name: %s", userFullName)
t.Logf("User mail claim: %s", user.GetMailClaim())
t.Logf("User name claim: %s", user.GetNameClaim())
t.Logf("User roles claim: %v", user.GetRolesClaim())
if err := db.AddUser(user); err != nil {
t.Fatalf("failed adding user %v to user database: %s", user, err)
}
if err := db.SaveToFile(dbPath); err != nil {
t.Fatalf("error saving database at %s: %s", dbPath, err)
}
claims, authed, err := db.AuthenticateUser(user.Username, password, nil)
if err != nil || !authed {
t.Fatalf(
"error authenticating user %s, claims: %v, authenticated: %v, error: %s",
user.Username, claims, authed, err,
)
}
t.Logf("User claims: %v", claims)
prevPassword := password
for i := 0; i < 15; i++ {
if i != 0 {
prevPassword = newPassword
}
newPassword = NewRandomString(16)
reqOpts := make(map[string]interface{})
reqOpts["username"] = user.Username
reqOpts["email"] = email
reqOpts["current_password"] = prevPassword
reqOpts["new_password"] = newPassword
reqOpts["file_path"] = dbPath
if err := db.ChangeUserPassword(reqOpts); err != nil {
t.Fatalf("error changing user password: %s, request options: %v", err, reqOpts)
}
t.Logf("User password has changed")
}
if _, authed, _ := db.AuthenticateUser(user.Username, prevPassword, nil); authed {
t.Fatalf("expected authentication failure, but got success")
}
claims, authed, err = db.AuthenticateUser(user.Username, newPassword, nil)
if !authed {
t.Fatalf("expected authentication success, but got failure: %s", err)
}
t.Logf("User claims: %v", claims)
dbUser, err := db.GetUserByUsername(user.Username)
if err != nil {
t.Fatalf("expected valid user, got error: %s", err)
}
expectedPasswordCount := 10
if len(dbUser.Passwords) != expectedPasswordCount {
t.Fatalf("expected password count of %d, received %d", expectedPasswordCount, len(dbUser.Passwords))
}
}
func TestLoadDatabase(t *testing.T) {
expectedUserCount := 1
dbPath := "assets/tests/userdb.json"
dbCopyPath := "assets/tests/userdb_copy.json"
db := NewDatabase()
if err := db.LoadFromFile(dbPath); err != nil {
t.Fatalf("failed loading database at %s: %s", dbPath, err)
}
actualUserCount := db.GetUserCount()
if expectedUserCount != actualUserCount {
t.Fatalf(
"unexpected database user count at %s: %d (expected) vs. %d (actual)",
dbPath, expectedUserCount, actualUserCount,
)
}
if err := db.SaveToFile(dbCopyPath); err != nil {
t.Fatalf("error saving database at %s: %s", dbCopyPath, err)
}
if err := db.LoadFromFile(dbPath); err != nil {
t.Fatalf("failed loading database at %s: %s", dbPath, err)
}
if err := db.SaveToFile(dbCopyPath); err != nil {
t.Fatalf("error saving database at %s: %s", dbCopyPath, err)
}
actualUserCount = db.GetUserCount()
if expectedUserCount != actualUserCount {
t.Fatalf(
"unexpected database user count at %s: %d (expected) vs. %d (actual)",
dbPath, expectedUserCount, actualUserCount,
)
}
}
|
package main
import (
"fmt"
"io/ioutil"
"regexp"
"strconv"
"strings"
)
func main() {
file, e := ioutil.ReadFile("day18/input.txt")
if e != nil {
panic(e)
}
fileString := string(file)
fileString = strings.Replace(fileString, " ", "", -1)
fileArray := strings.Split(fileString, "\n")
fmt.Println(run2(fileArray))
}
func run2(ss []string) int {
total := 0
for _,item := range ss {
eq := strings.Split(item, "")
ans := solveEq2(eq)
fmt.Println(ans)
total += ans
}
return total
}
func solveEq2(eq []string) int {
numReg := regexp.MustCompile(`[0-9]`)
var ans int
for i := 0; i < len(eq); i++{
val := eq[i]
if val == "(" {
close := findCloseBracket(eq, i)
res := solveEq2(eq[i+1 : close])
newEq := make([]string,0)
newEq = append(newEq, eq[:i]...)
newEq = append(newEq,strconv.Itoa(res))
newEq = append(newEq, eq[close +1:]...)
eq = newEq
}
}
for i := 0; i < len(eq); {
val := eq[i]
if val == "+" {
a, _ := strconv.Atoi(eq[i-1])
b, _ := strconv.Atoi(eq[i+1])
res := a + b
newEq := make([]string,0)
newEq = append(newEq, eq[:i-1]...)
newEq = append(newEq,strconv.Itoa(res))
newEq = append(newEq, eq[i+2:]...)
eq = newEq
i--
}else{i++}
}
for i := 0; i < len(eq); i++{
val := eq[i]
if numReg.MatchString(val) {
num, _ := strconv.Atoi(val)
if i == 0 {
ans = num
} else {
ans = ans * num
}
}
}
return ans
}
func run1(ss []string) int {
total := 0
for _, val := range ss {
eq := strings.Split(val, "")
ans := solveEq1(eq)
fmt.Println(ans)
total += ans
}
return total
}
func solveEq1(eq []string) int {
numReg := regexp.MustCompile(`[0-9(]`)
cmdReg := regexp.MustCompile(`[+*]`)
var ans int
var cmd string
for i := 0; i < len(eq); {
val := eq[i]
if numReg.MatchString(val) {
var num int
var incr int
if val == "(" {
close := findCloseBracket(eq, i)
num = solveEq1(eq[i+1 : close])
incr = (close + 1) - i
} else {
num, _ = strconv.Atoi(val)
incr = 1
}
if i == 0 {
ans = num
} else {
ans = useCmd(cmd, ans, num)
}
i = i + incr
} else if cmdReg.MatchString(val) {
cmd = val
i++
}
}
return ans
}
func findCloseBracket(eq []string, startPos int) int {
count := 1
for j := startPos + 1; j < len(eq); j++ {
if eq[j] == "(" {
count++
} else if eq[j] == ")" {
count--
}
if count == 0 {
return j
}
}
return 0
}
func useCmd(cmd string, a, b int) int {
switch cmd {
case "+":
return a + b
case "*":
return a * b
}
return 0
}
|
package main
import (
"context"
"flag"
"fmt"
"log"
"os"
"strconv"
"time"
"github.com/brotherlogic/goserver/utils"
"google.golang.org/grpc"
pbrc "github.com/brotherlogic/recordcollection/proto"
//Needed to pull in gzip encoding init
_ "google.golang.org/grpc/encoding/gzip"
)
func main() {
dServer, dPort, err := utils.Resolve("recordcollection")
if err != nil {
log.Fatalf("Error in resolving recordcollection: %v", err)
}
dConn, err := grpc.Dial(dServer+":"+strconv.Itoa(int(dPort)), grpc.WithInsecure())
if err != nil {
log.Fatalf("Error dialling recordcollection")
}
defer dConn.Close()
client := pbrc.NewRecordCollectionServiceClient(dConn)
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
// Argument handler
switch os.Args[1] {
case "uncat":
err := listUncategorized(ctx, client)
if err != nil {
fmt.Printf("Error in list uncategorized: %v", err)
}
case "add":
addRecordFlags := flag.NewFlagSet("addrecord", flag.ExitOnError)
var id = addRecordFlags.Int("id", 0, "The id of the record")
var cost = addRecordFlags.Int("cost", 0, "The cost of the record (in cents)")
var folder = addRecordFlags.Int("folder", 0, "The id of the folder that this'll end up in")
if err := addRecordFlags.Parse(os.Args[2:]); err == nil {
_, err := add(ctx, client, int32(*id), int32(*cost), int32(*folder))
if err != nil {
log.Fatalf("Error adding record: %v", err)
}
}
}
}
|
package main
// Person holds person information
type Person struct {
Name string `json:"name"`
Age int `json:"age"`
}
var ExamplePerson Person
|
package main
import (
"fmt"
"sync"
)
func main() {
wg := &sync.WaitGroup{}
isFollow := true
ch := make(chan []int, 10)
for i := 0; i < 10; i++ {
wg.Add(1)
go func(i int, wg *sync.WaitGroup) {
ret := make([]int, 0)
if isFollow {
fmt.Println("isFollow:", isFollow)
}
ret = append(ret, i)
ret = append(ret, i+10)
ch <- ret
wg.Done()
}(i, wg)
}
retMap := make([]int, 0)
wg.Wait()
//time.Sleep(time.Second * 2)
fmt.Println("ch:", len(ch))
close(ch)
for r := range ch {
retMap = append(retMap, r...)
}
fmt.Println("retMap:", retMap)
//fmt.Println("ret:", ret)
}
|
// Copyright 2014 Dirk Jablonowski. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lcd20x4
import (
"fmt"
"github.com/dirkjabl/bricker"
"github.com/dirkjabl/bricker/device"
"github.com/dirkjabl/bricker/net/packet"
misc "github.com/dirkjabl/bricker/util/miscellaneous"
)
func BacklightOn(id string, uid uint32, handler func(device.Resulter, error)) *device.Device {
return device.Generator{
Id: device.FallbackId(id, "BacklightOn"),
Fid: function_backlight_on,
Uid: uid,
Handler: handler,
WithPacket: true}.CreateDevice()
}
func BacklightOnFuture(brick *bricker.Bricker, connectorname string, uid uint32) bool {
future := make(chan bool)
sub := BacklightOn("backlightonfuture"+device.GenId(), uid,
func(r device.Resulter, err error) {
future <- device.IsEmptyResultOk(r, err)
})
err := brick.Subscribe(sub, connectorname)
if err != nil {
return false
}
b := <-future
close(future)
return b
}
func BacklightOff(id string, uid uint32, handler func(device.Resulter, error)) *device.Device {
return device.Generator{
Id: device.FallbackId(id, "BacklightOff"),
Fid: function_backlight_off,
Uid: uid,
Handler: handler,
WithPacket: true}.CreateDevice()
}
func BacklightOffFuture(brick *bricker.Bricker, connectorname string, uid uint32) bool {
future := make(chan bool)
sub := BacklightOff("backlightofffuture"+device.GenId(), uid,
func(r device.Resulter, err error) {
future <- device.IsEmptyResultOk(r, err)
})
err := brick.Subscribe(sub, connectorname)
if err != nil {
return false
}
b := <-future
close(future)
return b
}
func IsBacklightOn(id string, uid uint32, handler func(device.Resulter, error)) *device.Device {
return device.Generator{
Id: device.FallbackId(id, "IsBacklightOn"),
Fid: function_is_backlight_on,
Uid: uid,
Result: &Backlight{},
Handler: handler,
WithPacket: true}.CreateDevice()
}
func IsBacklightOnFuture(brick *bricker.Bricker, connectorname string, uid uint32) *Backlight {
future := make(chan *Backlight)
sub := IsBacklightOn("isbacklightonfuture"+device.GenId(), uid,
func(r device.Resulter, err error) {
var v *Backlight = nil
if err == nil {
if value, ok := r.(*Backlight); ok {
v = value
}
}
future <- v
})
err := brick.Subscribe(sub, connectorname)
if err != nil {
return nil
}
v := <-future
close(future)
return v
}
func IsBacklightOnFutureSimple(brick *bricker.Bricker, connectorname string, uid uint32) bool {
bl := IsBacklightOnFuture(brick, connectorname, uid)
if bl != nil && bl.IsOn {
return true
}
return false
}
// Backlight is a type for the return of the IsBacklightOn subscriber.
type Backlight struct {
IsOn bool // is the backlight on
}
// FromPacket converts the packet payload to the Backlight type.
func (bl *Backlight) FromPacket(p *packet.Packet) error {
if err := device.CheckForFromPacket(bl, p); err != nil {
return err
}
blr := new(BacklightRaw)
err := p.Payload.Decode(blr)
if err == nil {
bl.FromBacklightRaw(blr)
}
return err
}
// String fullfill the stringer interface.
func (bl *Backlight) String() string {
txt := "Backlight "
if bl != nil {
txt += fmt.Sprintf("[IsOn: %t]", bl.IsOn)
} else {
txt += "[nil]"
}
return txt
}
// Copy creates a copy of the content.
func (bl *Backlight) Copy() device.Resulter {
if bl == nil {
return nil
}
return &Backlight{IsOn: bl.IsOn}
}
// FromBacklightRaw converts a BacklightRaw into a Backlight.
func (bl *Backlight) FromBacklightRaw(br *BacklightRaw) {
if bl == nil || br == nil {
return
}
bl.IsOn = misc.Uint8ToBool(br.IsOn)
}
// BacklightRaw is a type for raw coding of the backlight.
type BacklightRaw struct {
IsOn uint8
}
|
package utils
import (
"syscall/js"
)
func Keys(obj js.Value) []string {
if !obj.Truthy() {
return nil
}
var (
keys = js.Global().Get("Object").Call("keys", obj)
slice = make([]string, keys.Length())
)
for i := 0; i < keys.Length(); i++ {
slice[i] = keys.Index(i).String()
}
return slice
}
|
package def
import (
"github.com/talesmud/talesmud/pkg/service"
)
// GameCtrl def
// interface for commands package to communicate back to game instance
type GameCtrl interface {
// Used to pass messages as events inside the mud server, e.g. translate a command into other user messages etc.
OnMessageReceived() chan interface{}
// used to send replies/messages to users, origin or rooms, or global
SendMessage() chan interface{}
GetFacade() service.Facade
}
|
package util
import "testing"
func TestIsValidPortAsInt(t *testing.T) {
type args struct {
port int32
}
tests := []struct {
name string
args args
want bool
}{
{
name: "Returns false if port negative",
args: args{port: -1},
want: false,
},
{
name: "Returns false if port more than 49151",
args: args{port: 49152},
want: false,
},
{
name: "Returns true if port valid",
args: args{port: 3000},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := IsValidPortAsInt(tt.args.port); got != tt.want {
t.Errorf("IsValidPortAsInt() = %v, want %v", got, tt.want)
}
})
}
}
func TestIsValidPortAsStr(t *testing.T) {
type args struct {
port string
}
tests := []struct {
name string
args args
want bool
}{
{
name: "Returns false if port not a number",
args: args{port: "invalid"},
want: false,
},
{
name: "Returns false if port negative",
args: args{port: "-1"},
want: false,
},
{
name: "Returns false if port more than 49151",
args: args{port: "49152"},
want: false,
},
{
name: "Returns true if port valid",
args: args{port: "3000"},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := IsValidPortAsStr(tt.args.port); got != tt.want {
t.Errorf("IsValidPortAsStr() = %v, want %v", got, tt.want)
}
})
}
}
|
package mysql
import (
"fmt"
_ "github.com/go-sql-driver/mysql"
"github.com/jmoiron/sqlx"
"github.com/spf13/viper"
)
var db *sqlx.DB
// Init 初始化MySQL连接
func Init() (err error) {
// "user:password@tcp(host:port)/dbname"
dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?parseTime=true&loc=Local",
viper.GetString("mysql.user"),
viper.GetString("mysql.password"),
viper.GetString("mysql.host"),
viper.GetInt("mysql.port"),
viper.GetString("mysql.dbname"),
)
db, err = sqlx.Connect("mysql", dsn)
if err != nil {
fmt.Println("connect mysql failed")
return
}
db.SetMaxOpenConns(viper.GetInt("mysql.max_open_conns"))
db.SetMaxIdleConns(viper.GetInt("mysql.max_idle_conns"))
fmt.Println("connect mysql success")
return
}
// Close 关闭MySQL连接
func Close() {
_ = db.Close()
}
|
package main
import (
"fmt"
"time"
)
func Run6() {
//6. Trong golang mặc định thì thời gian dạng số được sử dụng với các loại mốc đơn vị nào?
fmt.Println("\n 6.")
t := time.Now()
fmt.Println("Năm, tháng, ngày, giờ, phút, giây, múi giờ")
fmt.Println(t)
}
|
package base
import (
"appdemo/errcode"
"crypto/md5"
"encoding/hex"
"encoding/json"
"strconv"
"strings"
"sync"
)
// RespHead shopapi返回结果头
type RespHead struct {
Code int `json:"code"`
Info string `json:"info"`
Desc string `json:"desc"`
Ext interface{} `json:"ext,omitempty"`
}
// Resp 返回resp
type Resp struct {
Code int `json:"code"`
Info string `json:"info"`
Desc string `json:"desc"`
Data interface{} `json:"data"`
}
// Reply304 返回304
func (c *Controller) Reply304() {
c.LogMap["304"] = "true"
c.Ctx.ResponseWriter.WriteHeader(304)
return
}
func (c *Controller) replyContent(content []byte) {
c.isReply = true
sum := md5.Sum(content)
etag := hex.EncodeToString(sum[:])
reqETag := c.Ctx.Input.Header("If-None-Match")
// TODO 判断页面内容相同,直接返回304
c.LogMap["_page_size"] = strconv.FormatInt(int64(len(content)), 10)
if etag == reqETag {
c.Ctx.ResponseWriter.WriteHeader(304)
c.Reply304()
return
}
header := c.Ctx.ResponseWriter.Header()
header["ETag"] = []string{etag}
if len(content) < 1024 {
c.Ctx.Output.EnableGzip = false
}
c.gzipReply(etag, content)
return
}
// ReplyErrCode ...
func (c *Controller) ReplyErrCode(code uint64) {
// TODO 自定义code字典
// err 内部对应错误
// ext_err 外部错误head
// ext_source 错误来源
ins := errcode.GetLocalCodeMsg(code)
if ins == nil {
ins = &errcode.APILogCode{
NameCn: "错误code 未定义",
NameEn: "error code undefined",
Text: "服务器异常,请稍后重试",
FullCode: strconv.FormatUint(code, 10),
}
}
cont, _ := json.Marshal(ins)
c.LogMap["err"] = string(cont)
// TODO 回收cont
code, _ = strconv.ParseUint(ins.FullCode, 10, 64)
c.ActionCode = code
// TODO 错误信息写到observer
content, _ := json.Marshal(map[string]interface{}{
"err": ins,
//"ext_err": c.LogMap["ext_err"],
"ext_source": c.LogMap["ext_source"],
})
// TODO 回收[]byte
head := RespHead{
Code: int(code),
Info: ins.NameEn,
Desc: ins.Text,
}
content, _ = json.Marshal(head)
c.replyContent(content)
}
// ReplySucc 返回成功
func (c *Controller) ReplySucc(data interface{}) {
c.ActionResult = 1
head := Resp{
Code: 0,
Info: "ok",
Desc: "成功",
Data: data,
}
content, _ := json.Marshal(&head)
// TODO json pool
c.replyContent(content)
}
var gzipPool, flatePool sync.Pool
// gzipReply TODO 将内容按json格式输出,gzip时做缓存优化,降低CPU消耗
func (c *Controller) gzipReply(etag string, content []byte) (err error) {
m := c.Ctx.Output
m.Header("Content-Type", "application/json; charset=utf-8")
acceptEncoding := m.Context.Input.Header("Accept-Encoding")
if m.EnableGzip && acceptEncoding != "" {
splitted := strings.SplitN(acceptEncoding, ",", -1)
encodings := make([]string, len(splitted))
for i, val := range splitted {
encodings[i] = strings.TrimSpace(val)
}
for _, val := range encodings {
if val == "gzip" {
m.Header("Content-Encoding", "gzip")
break
} else if val == "deflate" {
m.Header("Content-Encoding", "deflate")
break
}
}
} else {
m.Header("Content-Length", strconv.Itoa(len(content)))
}
m.Context.ResponseWriter.Write(content)
// TODO 先从cache里寻找压缩以后的内容,节省CPU
// TODO 区分用户个人信息与公共信息,个人信息放redis,公共信息放localcache
// TODO 非压缩的没有存到localstore,可以回收
return
}
|
package network
import (
"testing"
)
func newDateComponent() Params {
params := Params{
Symbol: "goog",
StartDate: DateComponents{
Day: 1,
Month: 1,
Year: 2016,
},
EndDate: DateComponents{
Month: 1,
Day: 15,
Year: 2016,
},
}
return params
}
func TestDateComponents(t *testing.T) {
dc := DateComponentsFromString("01-01-2015")
if dc.String() != "01/01/2015" {
t.Fatalf("Wrong date: %s", dc)
}
}
func TestFetchContents(t *testing.T) {
url := "https://api.ipify.org?format=json"
t.Log("Fetching from URL:", url)
contents, err := FetchContents(url)
if err != nil {
t.Fatal(err)
}
if contents == "" {
t.Fatalf("No contents")
}
t.Log(contents)
}
func TestFetchCSV(t *testing.T) {
params := newDateComponent()
data, err := FetchCSV(params)
if err != nil {
t.Error(err)
}
t.Log(data)
}
func TestFetchStockData(t *testing.T) {
params := newDateComponent()
stockData, err := FetchStockData(params)
if err != nil {
t.Error(err)
}
t.Log(stockData)
// check the stock data
stock := stockData[0]
if stock.Open == 543.35 {
t.Fail()
}
if stock.High == 549.91 {
t.Fail()
}
t.Log(stock.String())
}
// TestFetchStockDataFail this test should fail because the query params are not valid (Bad date)
func TestFetchStockDataFail(t *testing.T) {
params := newDateComponent()
params.StartDate.Year = 2099
_, err := FetchStockData(params)
if err != nil {
t.Log(err)
} else {
t.Fatal("Should have failed:", params)
}
}
|
package structs
type Host struct {
ID int64
Host string
}
func (h Host) TableName() string {
return "adscoop_hosts"
}
type Hosts []Host
func (h *Hosts) FindAll() error {
return AdscoopsDB.Table("adscoop_hosts").Find(&h).Error
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package main
import (
"io"
"strings"
"text/template"
)
const datumToVecTmpl = "pkg/sql/colconv/datum_to_vec_tmpl.go"
func genDatumToVec(inputFileContents string, wr io.Writer) error {
r := strings.NewReplacer(
"_TYPE_FAMILY", "{{.TypeFamily}}",
"_TYPE_WIDTH", typeWidthReplacement,
)
s := r.Replace(inputFileContents)
preludeRe := makeFunctionRegex("_PRELUDE", 1)
s = preludeRe.ReplaceAllString(s, makeTemplateFunctionCall("Prelude", 1))
convertRe := makeFunctionRegex("_CONVERT", 1)
s = convertRe.ReplaceAllString(s, makeTemplateFunctionCall("Convert", 1))
tmpl, err := template.New("utils").Parse(s)
if err != nil {
return err
}
return tmpl.Execute(wr, getRowsToVecTmplInfos())
}
func init() {
registerGenerator(genDatumToVec, "datum_to_vec.eg.go", datumToVecTmpl)
}
|
package http
import (
"net/http"
"time"
"github.com/labstack/echo/v4"
"github.com/labstack/echo/v4/middleware"
"github.com/candraalim/be_tsel_candra/config"
"github.com/candraalim/be_tsel_candra/internal/usecase/inquiry"
"github.com/candraalim/be_tsel_candra/internal/usecase/referral"
)
func setupRouter(server *echo.Echo, auth *config.AuthConfig, inquiring *inquiry.InquiringHandler, referral *referral.ReferHandler) {
// health check
server.GET("/ping", func(c echo.Context) error {
return c.String(http.StatusOK, "services up and running... "+time.Now().Format(time.RFC3339))
})
var basicAuthFunc middleware.BasicAuthValidator = func(username, password string, context echo.Context) (b bool, e error) {
if username == auth.Username && password == auth.Password {
b = true
return
}
return
}
basicAuth := middleware.BasicAuth(basicAuthFunc)
group := server.Group("/1.0/referral", basicAuth)
{
group.GET("/:msisdn/code", inquiring.GetReferralCode)
group.GET("/:msisdn", inquiring.GetListReferral)
group.GET("/:msisdn/reward", inquiring.GetCurrentReferralReward)
}
{
group.POST("", referral.ProcessReferral)
}
}
|
package main
import (
"fmt"
_ "unsafe"
"strconv"
)
func main() {
basic2string_1()
basic2string_2()
basic2string_3()
}
// 方法1:基本数据类型转字符串
func basic2string_1() {
var num1 int = 90
var num2 float64 = 23.456
var b bool = true
// var mychar byte = 'h'
var str string
str = fmt.Sprintf("%d", num1)
fmt.Printf("str type %T str=%q\n", str, str)
str = fmt.Sprintf("%.2f", num2)
fmt.Printf("str type %T str=%q\n", str, str)
str = fmt.Sprintf("%t", b)
fmt.Printf("str type %T str=%q\n", str, str)
}
// 方法2:基本数据类型转字符串
func basic2string_2() {
var num3 int = 99
var num4 float64 = 23.456
var b2 bool = true
var str string
str = strconv.FormatInt(int64(num3), 10)
fmt.Printf("str type %T str=%q\n", str, str)
str = strconv.FormatFloat(num4, 'f', 10, 64)
fmt.Printf("str type %T str=%q\n", str, str)
str = strconv.FormatBool(b2)
fmt.Printf("str type %T str=%q\n", str, str)
}
// 方法3:基本数据类型转字符串
func basi2string_3() {
var num1 = 4567
var str string
str = strconv.Itoa(int(num1))
fmt.Printf("str type %T str=%q\n", str, str)
} |
/*
Copyright 2014 Huawei Technologies Co., Ltd. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package models
import (
"time"
)
//
type ImageV1 struct {
ID int64 `json:"id" gorm:"primary_key"`
Namespace string `json:"namespace" sql:"not null;type:varchar(255)"`
Repository string `json:"repository" sql:"not null;type:varchar(255)"`
Short string `json:"short" sql:"null;type:text"`
Description string `json:"description" sql:"null;type:text"`
Manifests string `json:"manifests" sql:"null;type:text"`
Type string `json:"type" sql:"not null;type:varchar(255)"`
Keys string `json:"keys" sql:"null;type:text"`
Size int64 `json:"size" sql:"default:0"`
Locked bool `json:"locked" sql:"default:false"`
CreatedAt time.Time `json:"create_at" sql:""`
UpdatedAt time.Time `json:"update_at" sql:""`
DeletedAt *time.Time `json:"delete_at" sql:"index"`
}
//
func (*ImageV1) TableName() string {
return "image_v1"
}
//
type VirtualV1 struct {
ID int64 `json:"id" gorm:"primary_key"`
ImageV1 int64 `json:"image_v1" sql:"not null"`
OS string `json:"os" sql:"null;type:varchar(255)"`
Arch string `json:"arch" sql:"null;type:varchar(255)"`
Image string `json:"image" sql:"not null;varchar(255)" gorm:"unique_index:image_tag"`
Tag string `json:"tag" sql:"null;varchar(255)" gorm:"unique_index:image_tag"`
Manifests string `json:"manifests" sql:"null;type:text"`
OSS string `json:"oss" sql:"null;type:text"`
Path string `json:"arch" sql:"null;type:text"`
Size int64 `json:"size" sql:"default:0"`
Locked bool `json:"locked" sql:"default:false"`
CreatedAt time.Time `json:"create_at" sql:""`
UpdatedAt time.Time `json:"update_at" sql:""`
DeletedAt *time.Time `json:"delete_at" sql:"index"`
}
func (*VirtualV1) TableName() string {
return "virtual_v1"
}
|
package models
import "time"
type Video struct {
CreatedTime time.Time
Title string
Description string
SoftTags []string
Src string
Domain string
SHA256 string
Format string
} |
package ekatime
import "bytes"
var (
// _WeekdayStr is just English names of days of week.
_WeekdayStr = [...]string {
"Unknown",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday",
"Monday",
"Tuesday",
}
_WeekdayBytes = [len(_WeekdayStr)][]byte{}
)
// asPartOfDate returns current Weekday but as part of Date object
// (casted, bit shifted, ready for being bit added to some Date object).
func (w Weekday) asPartOfDate() Date {
return Date(w+1) << _DATE_OFFSET_WEEKDAY
}
// byteSliceEncode returns the current weekday's []byte representation,
// the same as String() returns but double quoted.
func (w Weekday) byteSliceEncode() []byte {
if w < 0 || w > 6 {
//noinspection GoAssignmentToReceiver
w = -1
}
return _WeekdayBytes[w+1]
}
// byteSliceDecode decodes the weekday's value from 'data',
// saving it into the current weekday's object. Always returns nil.
// Saves -1 if 'data' does not contain valid weekday.
func (w *Weekday) byteSliceDecode(data []byte) error {
if data != nil {
for i, n := 1, len(_WeekdayBytes); i < n; i++ {
if bytes.Equal(data, _WeekdayBytes[i]) {
*w = Weekday(i-1)
return nil
}
}
}
*w = -1
return nil
}
// initWeekday fills weekdays []byte representation for fast encoding/decoding.
func initWeekday() {
for i, n := 0, len(_WeekdayStr); i < n; i++ {
_WeekdayBytes[i] = []byte("\"" + _WeekdayStr[i] + "\"")
}
}
|
// Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package main
import (
"errors"
"testing"
"github.com/stretchr/testify/assert"
)
func TestParseArgs(t *testing.T) {
_, err := parseArgs([]string{}, -1)
assert.True(t, errors.Is(err, errUsage))
_, err = parseArgs([]string{"build"}, -1)
assert.True(t, errors.Is(err, errUsage))
_, err = parseArgs([]string{"typo", "target"}, -1)
assert.NotNil(t, err)
args, err := parseArgs([]string{"build", "target"}, -1)
assert.Nil(t, err)
assert.Equal(t, parsedArgs{
subcmd: "build",
targets: []string{"target"},
additional: []string{}}, *args)
args, err = parseArgs([]string{"test", "target1", "target2"}, -1)
assert.Nil(t, err)
assert.Equal(t, parsedArgs{
subcmd: "test",
targets: []string{"target1", "target2"},
additional: []string{}}, *args)
// Make sure additional arguments are captured correctly.
args, err = parseArgs([]string{"test", "target1", "target2", "--verbose_failures"}, 3)
assert.Nil(t, err)
assert.Equal(t, parsedArgs{
subcmd: "test",
targets: []string{"target1", "target2"},
additional: []string{"--verbose_failures"},
}, *args)
}
|
package netxmocks
import (
"context"
"crypto/tls"
"errors"
"net"
"reflect"
"testing"
)
func TestTLSHandshakerHandshake(t *testing.T) {
expected := errors.New("mocked error")
conn := &Conn{}
ctx := context.Background()
config := &tls.Config{}
th := &TLSHandshaker{
MockHandshake: func(ctx context.Context, conn net.Conn,
config *tls.Config) (net.Conn, tls.ConnectionState, error) {
return nil, tls.ConnectionState{}, expected
},
}
tlsConn, connState, err := th.Handshake(ctx, conn, config)
if !errors.Is(err, expected) {
t.Fatal("not the error we expected", err)
}
if !reflect.ValueOf(connState).IsZero() {
t.Fatal("expected zero ConnectionState here")
}
if tlsConn != nil {
t.Fatal("expected nil conn here")
}
}
|
package controllers
import (
"github.com/kataras/iris"
"github.com/kataras/iris/mvc"
"../models"
)
type UsersController struct {
model models.UserModel
}
func NewUsersController(app *mvc.Application) {
app.Handle(&UsersController{
model: models.UserModel{},
})
}
func (uc *UsersController) Get() mvc.Result {
users := uc.model.All()
return mvc.View{
Name: "users/index.html",
Data: iris.Map{
"users": users,
},
}
}
func (uc *UsersController) GetBy(id int64) mvc.Result {
user := uc.model.Find(id)
return mvc.View{
Name: "users/show.html",
Data: iris.Map{
"user": user,
},
}
}
func (uc *UsersController) Post(ctx iris.Context) mvc.Result {
// email := ctx.URLParam("email")
params := userParams(ctx)
println("VALUES:", params["email"], params["name"])
user := models.User{Email: params["email"], Name: params["name"]}
user = uc.model.Create(user)
// TODO: redirect insted of view render
return mvc.View{
Name: "users/show.html",
Data: iris.Map{
"user": user,
},
}
}
func (uc *UsersController) PutBy(ctx iris.Context, id int64) mvc.Result {
params := userParams(ctx)
paramsStruct := models.User{
Name: params["name"],
Email: params["email"],
}
user := uc.model.Update(id, paramsStruct)
// TODO: redirect insted of view render
return mvc.View{
Name: "users/show.html",
Data: iris.Map{
"user": user,
},
}
}
func (uc *UsersController) DeleteBy(id int64) mvc.Result {
uc.model.Delete(id)
// TODO: redirect instead of view render
return mvc.View{
Name: "users/index.html",
}
}
// this could be useless if we need different types f.e. int32
func userParams(ctx iris.Context) map[string]string {
m := map[string]string{
"email": ctx.FormValue("email"),
"name": ctx.FormValue("name"),
}
return m
}
|
package main
import (
"fmt"
)
func main() {
messages := make(chan string, 3)
go func() {
messages <- "100"
messages <- "200"
messages <- "300"
}()
fmt.Println(<-messages)
fmt.Println(<-messages)
fmt.Println(<-messages)
//fmt.Println(<-messages) //goroutines are asleep - deadlock
}
|
package missprop
import (
"go/ast"
"go/types"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/ast/inspector"
)
var Analyzer = &analysis.Analyzer{
Name: "missprop",
Doc: Doc,
Run: run,
Requires: []*analysis.Analyzer{
inspect.Analyzer,
},
}
const Doc = "missprop is the tool to find missing props in composite literal"
func run(pass *analysis.Pass) (interface{}, error) {
inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
nodeFilter := []ast.Node{
(*ast.CompositeLit)(nil),
}
inspect.Preorder(nodeFilter, func(n ast.Node) {
switch n := n.(type) {
case *ast.CompositeLit:
t, ok := pass.TypesInfo.TypeOf(n).Underlying().(*types.Struct)
if !ok {
return
}
if len(n.Elts) == 0 {
return
}
if _, ok := n.Elts[0].(*ast.KeyValueExpr); !ok {
return
}
used := map[string]bool{}
for _, elt := range n.Elts {
key := elt.(*ast.KeyValueExpr).Key
used[key.(*ast.Ident).Name] = true
}
for i := 0; i < t.NumFields(); i++ {
name := t.Field(i).Name()
if _, ok := used[name]; !ok {
pass.Reportf(n.Pos(), "find missing props: %v", name)
}
}
}
})
return nil, nil
}
|
// Copyright (c) 2015, Daniel Martí <mvdan@mvdan.cc>
// See LICENSE for licensing information
package main
import (
"fmt"
"log"
"github.com/mvdan/fdroidcl"
"github.com/mvdan/fdroidcl/adb"
)
var cmdInstall = &Command{
UsageLine: "install <appid...>",
Short: "Install an app",
}
func init() {
cmdInstall.Run = runInstall
}
func runInstall(args []string) {
if len(args) < 1 {
log.Fatalf("No package names given")
}
device := mustOneDevice()
apps := findApps(args)
inst := mustInstalled(device)
for _, app := range apps {
if _, e := inst[app.ID]; e {
log.Fatalf("%s is already installed", app.ID)
}
}
downloadAndDo(apps, device, installApk)
}
func downloadAndDo(apps []*fdroidcl.App, device *adb.Device, doApk func(*adb.Device, *fdroidcl.Apk, string)) {
type downloaded struct {
apk *fdroidcl.Apk
path string
}
toInstall := make([]downloaded, len(apps))
for i, app := range apps {
apk := app.SuggestedApk(device)
if apk == nil {
log.Fatalf("No suitable APKs found for %s", app.ID)
}
path := downloadApk(apk)
toInstall[i] = downloaded{apk: apk, path: path}
}
for _, t := range toInstall {
doApk(device, t.apk, t.path)
}
}
func installApk(device *adb.Device, apk *fdroidcl.Apk, path string) {
fmt.Printf("Installing %s... ", apk.AppID)
if err := device.Install(path); err != nil {
fmt.Println()
log.Fatalf("Could not install %s: %v", apk.AppID, err)
}
fmt.Println("done")
}
|
package logger
import (
"github.com/sirupsen/logrus"
"github.com/honeycombio/samproxy/config"
)
// LogrusLogger is a Logger implementation that sends all logs to stdout using
// the Logrus package to get nice formatting
type LogrusLogger struct {
Config config.Config `inject:""`
logger *logrus.Logger
level *logrus.Level
}
type LogrusEntry struct {
entry *logrus.Entry
}
func (l *LogrusLogger) Start() error {
l.logger = logrus.New()
if l.level != nil {
l.logger.SetLevel(*l.level)
}
return nil
}
func (l *LogrusLogger) WithField(key string, value interface{}) Entry {
return &LogrusEntry{
entry: l.logger.WithField(key, value),
}
}
func (l *LogrusLogger) WithFields(fields map[string]interface{}) Entry {
return &LogrusEntry{
entry: l.logger.WithFields(fields),
}
}
func (l *LogrusLogger) Debugf(f string, args ...interface{}) {
l.logger.Debugf(f, args...)
}
func (l *LogrusLogger) Infof(f string, args ...interface{}) {
l.logger.Infof(f, args...)
}
func (l *LogrusLogger) Errorf(f string, args ...interface{}) {
l.logger.Errorf(f, args...)
}
func (l *LogrusLogger) SetLevel(level string) error {
logrusLevel, err := logrus.ParseLevel(level)
if err != nil {
return err
}
// record the choice and set it if we're already initialized
l.level = &logrusLevel
if l.logger != nil {
l.logger.SetLevel(logrusLevel)
}
return nil
}
func (l *LogrusEntry) WithField(key string, value interface{}) Entry {
return &LogrusEntry{
entry: l.entry.WithField(key, value),
}
}
func (l *LogrusEntry) WithFields(fields map[string]interface{}) Entry {
return &LogrusEntry{
entry: l.entry.WithFields(fields),
}
}
func (l *LogrusEntry) Debugf(f string, args ...interface{}) {
l.entry.Debugf(f, args...)
}
func (l *LogrusEntry) Infof(f string, args ...interface{}) {
l.entry.Infof(f, args...)
}
func (l *LogrusEntry) Errorf(f string, args ...interface{}) {
l.entry.Errorf(f, args...)
}
|
// consolidate a LaTeX top-level source file into a single file in preparation
// for running through pandoc to generate an ePub.
package main
/*
Copyright (c) 2012 Kyle Isom <kyle@tyrfingr.is>
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the
above copyright notice and this permission notice appear in all
copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA
OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
*/
import (
"bufio"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"regexp"
)
var inFileName, outFileName string
var includeFile = regexp.MustCompile("^\\s*\\\\input{(.*)}$")
var endNewline = regexp.MustCompile("\n$")
func main() {
outFN := flag.String("o", "gopherref_c.tex", "output file name")
inFN := flag.String("i", "gopherref.tex", "input file name")
flag.Parse()
outFileName = *outFN
inFileName = *inFN
inFile, err := os.Open(inFileName)
if err != nil {
fmt.Println("[!] couldn't open ", inFileName)
os.Exit(1)
}
buf := bufio.NewReader(inFile)
fullLine := []byte{}
for {
line, isPrefix, err := buf.ReadLine()
if err == io.EOF {
break
} else if err != nil {
fmt.Printf("[!] unrecoverable error reading %s: %s\n",
inFileName, err.Error())
os.Exit(1)
}
fullLine = append(fullLine, line...)
if isPrefix {
continue
}
if includeFile.Match(fullLine) {
includeFile := includeFile.ReplaceAll(fullLine, []byte("$1.tex"))
fullLine, err = ioutil.ReadFile(string(includeFile))
if err != nil {
fmt.Printf("[!] unrecoverable error reading %s: %s\n",
inFileName, err.Error())
os.Exit(1)
}
}
appendFile(outFileName, fullLine)
fullLine = []byte{}
}
}
func appendFile(fileName string, line []byte) {
file, err := os.OpenFile(fileName, os.O_WRONLY|os.O_APPEND, 0600)
if err != nil && os.IsNotExist(err) {
file, err = os.Create(fileName)
}
if err != nil {
fmt.Printf("[+] unrecoverable error writing %s: %s\n",
fileName, err.Error())
}
defer file.Close()
_, err = file.Write(line)
if err != nil {
fmt.Printf("[+] unrecoverable error writing %s: %s\n",
fileName, err.Error())
}
}
|
package service
import (
"context"
"strings"
"github.com/movsb/taoblog/modules/utils"
"github.com/movsb/taoblog/service/models"
"github.com/movsb/taorm/taorm"
)
func (s *Service) tags() *taorm.Stmt {
return s.tdb.Model(models.Tag{})
}
// GetTagByName gets a tag by Name.
func (s *Service) GetTagByName(name string) *models.Tag {
var tag models.Tag
err := s.tags().Where("name=?", name).Find(&tag)
if err != nil {
panic(&TagNotFoundError{})
}
return &tag
}
func (s *Service) ListTagsWithCount() []*models.TagWithCount {
var tags []*models.TagWithCount
s.tdb.Raw(`
SELECT tags.name AS name, count(tags.id) AS count
FROM tags INNER JOIN post_tags ON tags.id = post_tags.tag_id
GROUP BY tags.id
ORDER BY count desc
`).MustFind(&tags)
return tags
}
func (s *Service) getObjectTagIDs(postID int64, alias bool) (ids []int64) {
sql := `SELECT tag_id FROM post_tags WHERE post_id=?`
rows, err := s.tdb.Query(sql, postID)
if err != nil {
panic(err)
}
defer rows.Close()
for rows.Next() {
var id int64
err = rows.Scan(&id)
if err != nil {
panic(err)
}
ids = append(ids, id)
}
if alias {
ids = s.getAliasTagsAll(ids)
}
return
}
// GetObjectTagNames ...
func (s *Service) GetObjectTagNames(postID int64) []string {
query := `select tags.name from post_tags,tags where post_tags.post_id=? and post_tags.tag_id=tags.id`
args := []interface{}{postID}
rows, err := s.tdb.Query(query, args...)
if err != nil {
panic(err)
}
defer rows.Close()
names := make([]string, 0)
for rows.Next() {
var name string
err = rows.Scan(&name)
if err != nil {
panic(err)
}
names = append(names, name)
}
return names
}
func (s *Service) getAliasTagsAll(ids []int64) []int64 {
sids := utils.JoinInts(ids, ",")
if sids == "" {
return ids
}
sql1 := `SELECT alias FROM tags WHERE id in (?)`
sql2 := `SELECT id FROM tags WHERE alias in (?)`
rows, err := s.tdb.Query(sql1, sids)
if err != nil {
panic(err)
}
for rows.Next() {
var alias int64
if err = rows.Scan(&alias); err != nil {
panic(err)
}
if alias > 0 {
ids = append(ids, alias)
}
}
rows.Close()
rows, err = s.tdb.Query(sql2, sids)
if err != nil {
panic(err)
}
for rows.Next() {
var id int64
if err = rows.Scan(&id); err != nil {
panic(err)
}
ids = append(ids, id)
}
rows.Close()
return ids
}
// UpdateObjectTags ...
func (s *Service) UpdateObjectTags(pid int64, tags []string) {
newTags := tags
oldTags := s.GetObjectTagNames(pid)
var (
toBeDeled []string
toBeAdded []string
)
for _, t := range oldTags {
if !utils.StrInSlice(newTags, t) {
toBeDeled = append(toBeDeled, t)
}
}
for _, t := range newTags {
t = strings.TrimSpace(t)
if t != "" && !utils.StrInSlice(oldTags, t) {
toBeAdded = append(toBeAdded, t)
}
}
for _, t := range toBeDeled {
s.removeObjectTag(pid, t)
}
for _, t := range toBeAdded {
var tid int64
if !s.hasTagName(t) {
tid = s.addTag(t)
} else {
tag := s.getRootTag(t)
tid = tag.ID
}
s.addObjectTag(pid, tid)
}
}
func (s *Service) removeObjectTag(pid int64, tagName string) {
tagObj := s.GetTagByName(tagName)
s.tdb.From(models.ObjectTag{}).
Where("post_id=? AND tag_id=?", pid, tagObj.ID).
MustDelete()
}
func (s *Service) deletePostTags(ctx context.Context, postID int64) {
s.tdb.From(models.ObjectTag{}).Where(`post_id=?`, postID).MustDelete()
}
func (s *Service) addObjectTag(pid int64, tid int64) {
objtag := models.ObjectTag{
PostID: pid,
TagID: tid,
}
err := s.tdb.Model(&objtag).Create()
if err == nil {
return
}
if _, ok := err.(*taorm.DupKeyError); ok {
return
}
panic(err)
}
func (s *Service) hasTagName(tagName string) bool {
var tag models.Tag
err := s.tags().Where("name=?", tagName).Find(&tag)
if err == nil {
return true
}
if taorm.IsNotFoundError(err) {
return false
}
panic(err)
}
func (s *Service) addTag(tagName string) int64 {
tagObj := models.Tag{
Name: tagName,
}
s.tdb.Model(&tagObj).MustCreate()
return tagObj.ID
}
func (s *Service) getRootTag(tagName string) models.Tag {
tagObj := s.GetTagByName(tagName)
if tagObj.Alias == 0 {
return *tagObj
}
ID := tagObj.Alias
for {
var tagObj models.Tag
s.tdb.Where("id=?", ID).MustFind(&tagObj)
if tagObj.Alias == 0 {
return tagObj
}
ID = tagObj.Alias
}
}
|
package common
import (
"bytes"
"crypto/rand"
"testing"
"github.com/ontio/ontology/common/serialization"
)
func BenchmarkZeroCopySource(b *testing.B) {
const N = 12000
buf := make([]byte, N)
rand.Read(buf)
for i := 0; i < b.N; i++ {
source := NewZeroCopySource(buf)
for j := 0; j < N/100; j++ {
source.NextUint16()
source.NextByte()
source.NextUint64()
source.NextVarUint()
source.NextBytes(20)
}
}
}
func BenchmarkDerserialize(b *testing.B) {
const N = 12000
buf := make([]byte, N)
rand.Read(buf)
for i := 0; i < b.N; i++ {
reader := bytes.NewBuffer(buf)
for j := 0; j < N/100; j++ {
serialization.ReadUint16(reader)
serialization.ReadByte(reader)
serialization.ReadUint64(reader)
serialization.ReadVarUint(reader, 0)
serialization.ReadBytes(reader, 20)
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.