text stringlengths 11 4.05M |
|---|
package handler
import (
"context"
"errors"
"path/filepath"
"testing"
proto "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/user/v1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
)
// UserGetSecureQuestionListTestSuite 获取密保问题列表测试
type UserGetSecureQuestionListTestSuite struct {
suite.Suite
JinmuIDService *JinmuIDService
Account *Account
}
// UserGetSecureQuestionListTestSuite 设置测试环境
func (suite *UserGetSecureQuestionListTestSuite) SetupSuite() {
envFilepath := filepath.Join("testdata", "local.svc-jinmuid.env")
suite.JinmuIDService = newJinmuIDServiceForTest()
suite.Account = newTestingAccountFromEnvFile(envFilepath)
}
// TestUserGetSecureQuestionList 测试获取密保问题列表
func (suite *UserGetSecureQuestionListTestSuite) TestUserGetSecureQuestionList() {
t := suite.T()
ctx := context.Background()
ctx, userID, err := mockSigninByPhonePassword(ctx, suite.JinmuIDService, suite.Account.phone, suite.Account.phonePassword, suite.Account.seed, suite.Account.nationCode)
assert.NoError(t, err)
req := &proto.UserGetSecureQuestionListRequest{
UserId: userID,
}
resp := new(proto.UserGetSecureQuestionListResponse)
err = suite.JinmuIDService.UserGetSecureQuestionList(ctx, req, resp)
assert.NoError(t, err)
}
// TestUserGetSecureQuestionListIsError 未设置密保问题
func (suite *UserGetSecureQuestionListTestSuite) TestUserGetSecureQuestionListIsError() {
t := suite.T()
ctx := context.Background()
ctx, userID, err := mockSigninByPhonePassword(ctx, suite.JinmuIDService, suite.Account.phone, suite.Account.phonePassword, suite.Account.seed, suite.Account.nationCode)
assert.NoError(t, err)
req := &proto.UserGetSecureQuestionListRequest{
UserId: userID,
}
resp := new(proto.UserGetSecureQuestionListResponse)
err = suite.JinmuIDService.UserGetSecureQuestionList(ctx, req, resp)
assert.Error(t, errors.New("[[errcode:49000] secure questions are not set"), err)
}
// TestUserGetSecureQuestionListUserIDError 未设置密保问题UserID不存在
func (suite *UserGetSecureQuestionListTestSuite) TestUserGetSecureQuestionListUserIDError() {
t := suite.T()
ctx := context.Background()
req := &proto.UserGetSecureQuestionListRequest{
UserId: suite.Account.userID,
}
resp := new(proto.UserGetSecureQuestionListResponse)
err := suite.JinmuIDService.UserGetSecureQuestionList(ctx, req, resp)
assert.Error(t, errors.New("[errcode:1600] Invalid user 0"), err)
}
func (suite *UserGetSecureQuestionListTestSuite) TearDownSuite() {
ctx := context.Background()
suite.JinmuIDService.datastore.SafeCloseDB(ctx)
}
func TestUserGetSecureQuestionListTestSuite(t *testing.T) {
suite.Run(t, new(UserGetSecureQuestionListTestSuite))
}
|
/*
The localfile backend, for dealing with a Vagrant catalog on a local filesystem
*/
package caryatid
import (
"bytes"
"fmt"
"log"
"os"
"regexp"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
)
type CaryatidS3Backend struct {
AwsSession *session.Session
S3Service *s3.S3
S3Downloader *s3manager.Downloader
S3Uploader *s3manager.Uploader
Manager *BackendManager
CatalogLocation *caryatidS3Location
}
type caryatidS3Location struct {
Bucket string
Resource string
}
func uri2s3location(uri string) (loc *caryatidS3Location, err error) {
s3Regex := regexp.MustCompile("^s3://([a-zA-Z0-9\\-_]+)/(.*)")
result := s3Regex.FindAllStringSubmatch(uri, -1)
if result == nil {
err = fmt.Errorf("Invalid S3 URI '%v'", uri)
return
} else if len(result) != 1 || len(result[0]) != 3 {
err = fmt.Errorf("Apparently the regexp is wrong and I don't know what I'm doing, sorry")
return
}
log.Printf("result:\n%v\n", result)
loc = new(caryatidS3Location)
loc.Bucket = result[0][1]
loc.Resource = result[0][2]
return
}
func (backend *CaryatidS3Backend) verifyCredential() (err error) {
_, err = backend.S3Service.ListObjects(&s3.ListObjectsInput{
Bucket: aws.String(backend.CatalogLocation.Bucket),
})
return
}
func (backend *CaryatidS3Backend) SetManager(manager *BackendManager) (err error) {
backend.Manager = manager
backend.AwsSession = session.Must(session.NewSessionWithOptions(session.Options{
SharedConfigState: session.SharedConfigEnable,
}))
backend.S3Service = s3.New(backend.AwsSession)
backend.S3Downloader = s3manager.NewDownloader(backend.AwsSession)
backend.S3Uploader = s3manager.NewUploader(backend.AwsSession)
backend.CatalogLocation, err = uri2s3location(backend.Manager.CatalogUri)
if err != nil {
return
}
return
}
func (backend *CaryatidS3Backend) GetManager() (manager *BackendManager, err error) {
manager = backend.Manager
if manager == nil {
err = fmt.Errorf("The Manager property was not set")
}
return
}
func (backend *CaryatidS3Backend) SetCredential(backendCredential string) (err error) {
if backendCredential == "" {
err = fmt.Errorf("Backend credential is empty")
return
}
err = backend.verifyCredential()
return
}
func (backend *CaryatidS3Backend) GetCatalogBytes() (catalogBytes []byte, err error) {
var (
dlerr error
catalogExists bool
)
catalogExists = true
dlBuffer := &aws.WriteAtBuffer{}
_, dlerr = backend.S3Downloader.Download(
dlBuffer,
&s3.GetObjectInput{
Bucket: aws.String(backend.CatalogLocation.Bucket),
Key: aws.String(backend.CatalogLocation.Resource),
},
)
if aerr, ok := dlerr.(awserr.Error); ok {
switch aerr.Code() {
case s3.ErrCodeNoSuchKey:
log.Printf("No file at '%v'; starting with empty catalog\n", backend.Manager.CatalogUri)
catalogExists = false
case s3.ErrCodeNoSuchBucket:
err = fmt.Errorf("Bucket '%v' does not exist\n", backend.CatalogLocation.Bucket)
default:
err = dlerr
}
}
if err != nil {
log.Printf("CaryatidS3Backend.GetCatalogBytes(): Could not download from S3: %v", err)
return
}
if catalogExists {
catalogBytes = dlBuffer.Bytes()
} else {
catalogBytes = []byte("{}")
}
return
}
func (backend *CaryatidS3Backend) SetCatalogBytes(serializedCatalog []byte) (err error) {
upParams := &s3manager.UploadInput{
Bucket: aws.String(backend.CatalogLocation.Bucket),
Key: aws.String(backend.CatalogLocation.Resource),
Body: bytes.NewReader(serializedCatalog),
}
_, err = backend.S3Uploader.Upload(upParams)
if err != nil {
log.Println("CaryatidS3Backend.SetCatalogBytes(): Error trying to upload catalog: ", err)
return
}
return
}
func (backend *CaryatidS3Backend) CopyBoxFile(path string, boxName string, boxVersion string, boxProvider string) (err error) {
var (
boxFileLoc caryatidS3Location
fileHandler *os.File
)
boxFileLoc.Bucket = backend.CatalogLocation.Bucket
// TODO: Do we need the if statement? Can we just use the second version and be OK if LastIndex() returns -1 ?
lastSlashIdx := strings.LastIndex(backend.CatalogLocation.Resource, "/")
if lastSlashIdx < 0 {
boxFileLoc.Resource = fmt.Sprintf(
"%v/%v_%v_%v.box",
boxName, boxName, boxVersion, boxProvider)
} else {
boxFileLoc.Resource = fmt.Sprintf(
"%v/%v/%v_%v_%v.box",
backend.CatalogLocation.Resource[0:lastSlashIdx],
boxName, boxName, boxVersion, boxProvider)
}
if fileHandler, err = os.Open(path); err != nil {
return
}
defer fileHandler.Close()
_, err = backend.S3Uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String(boxFileLoc.Bucket),
Key: aws.String(boxFileLoc.Resource),
Body: fileHandler,
})
if err != nil {
return
}
return
}
func (backend *CaryatidS3Backend) DeleteFile(uri string) (err error) {
var (
fileLoc *caryatidS3Location
)
if fileLoc, err = uri2s3location(uri); err != nil {
return
}
_, err = backend.S3Service.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(fileLoc.Bucket),
Key: aws.String(fileLoc.Resource),
})
if err != nil {
return
}
err = backend.S3Service.WaitUntilObjectNotExists(&s3.HeadObjectInput{
Bucket: aws.String(fileLoc.Bucket),
Key: aws.String(fileLoc.Resource),
})
if err != nil {
return
}
return
}
func (backend *CaryatidS3Backend) Scheme() string {
return "s3"
}
|
package raw_client
import (
"context"
)
type PostDevAppDeleteRequest struct {
App string `json:"app"`
/*RequestToken string `json:"__REQUEST_TOKEN__"`*/
}
type PostDevAppDeleteResponse struct {
}
func PostDevAppDelete(ctx context.Context, apiClient *ApiClient, req PostDevAppDeleteRequest) (*PostDevAppDeleteResponse, error) {
apiRequest := ApiRequest{
Method: "POST",
Scheme: "https",
Path: "/k/api/dev/app/delete.json",
Json: req,
}
var postAppResponse PostDevAppDeleteResponse
if err := apiClient.Call(ctx, apiRequest, &postAppResponse); err != nil {
return nil, err
}
return &postAppResponse, nil
}
|
package theme
import (
"flag"
"fmt"
)
func Scaffold() {
// Declare flags.
name := flag.String("name", "test", "The name of your theme.")
// Command line parsing of flags.
flag.Parse()
// Output.
fmt.Println("The theme you are making is called: " + *name)
} |
package main
import "fmt"
func main() {
for i := 65; i <= 90; i++ {
fmt.Println(i, " - ", string(i), " - ", []byte(string(i)))
}
fmt.Println("\n rune example: ")
x := 'A'
fmt.Println(x)
fmt.Printf("%T \n", x)
fmt.Println("\n string example: ")
y := "A"
fmt.Println(y)
fmt.Printf("%T \n", y)
}
/*
NOTE:
Some operating systems (Windows) might not print characters where i < 256
If you have this issue, you can use this code:
fmt.Println(i, " - ", string(i), " - ", []int32(string(i)))
UTF-8 is the text coding scheme used by Go.
UTF-8 works with 1 - 4 bytes.
A byte is 8 bits.
[]byte deals with bytes, that is, only 1 byte (8 bits) at a time.
[]int32 allows us to store the value of 4 bytes, that is, 4 bytes * 8 bits per byte = 32 bits.
*/
// 65 - A - [65]
// 66 - B - [66]
// 67 - C - [67]
// 68 - D - [68]
// 69 - E - [69]
// 70 - F - [70]
// 71 - G - [71]
// 72 - H - [72]
// 73 - I - [73]
// 74 - J - [74]
// 75 - K - [75]
// 76 - L - [76]
// 77 - M - [77]
// 78 - N - [78]
// 79 - O - [79]
// 80 - P - [80]
// 81 - Q - [81]
// 82 - R - [82]
// 83 - S - [83]
// 84 - T - [84]
// 85 - U - [85]
// 86 - V - [86]
// 87 - W - [87]
// 88 - X - [88]
// 89 - Y - [89]
// 90 - Z - [90]
// rune example:
// 65
// int32
// string example:
// A
// string
|
/*
Copyright 2016 Vastech SA (PTY) LTD
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package report
import (
"fmt"
"io"
"log"
"os"
"os/exec"
"path/filepath"
"strconv"
"sync"
"text/template"
"time"
"tptsreporter/grafana"
"gorm.io/gorm"
)
// Report groups functions related to generating the report.
// After reading and closing the pdf returned by Generate(), call Clean() to delete the pdf file as well the temporary build files
type Report interface {
Generate(*StateReportGenerationDB) error
Title() string
Panels() []grafana.Panel
Clean()
}
type report struct {
gClient grafana.Client
time grafana.TimeRange
dash grafana.Dashboard
texTemplate string
uidDash string
randTmpDir string
titleDash string
}
// ConfigDB is to store database configuration details
type ConfigDB struct {
Username string `yaml:"user"`
Password string `yaml:"password"`
NameDB string `yaml:"dbname"`
Host string `yaml:"host"`
Port string `yaml:"port"`
ModeSSL string `yaml:"sslmode"`
}
//AppData is a custom type to store the Data related to the Application
type AppData struct {
Title string
State string
UI string
Error string
Timeout int
NameFile string
NameReport string
UUID string
URL string
Host string
Route string
}
//Organization model
type Organization struct {
ID int
Name string
}
// StateReportGenerationDB struct for Report Generation status
type StateReportGenerationDB struct {
gorm.Model
Value string
TitleDash string
UIDDash string
FromTS int64
ToTS int64
CountPanels int
NameFilePDF string
State int
Error string
Message string
ReadyToSendFile bool
CompletedFileTransfer bool
TimestampStart int64
UUID string
IDOrg int
NameOrg string
TimestampStateChange int64
TimedOut bool
}
// CacheAgeDB struct for Cache age status
type CacheAgeDB struct {
gorm.Model
TS int64 `gorm:"unique"`
}
//Names of files and folders
const (
NameDirImages = "images"
NameReportTexFile = "report.tex"
NameReportPDF = "report.pdf"
NameDirTemp = "tptsgr-tmp"
NameFileLog = "logTPTSReporter"
NameDirStatic = `tptsgr-static`
NameDirCache = `tptsgr-cache`
NameDirTmplHTML = `html`
PeriodClearCache = time.Duration(24*7) * time.Hour
)
// Directories is Current Working Directory
type Directories struct {
CWD, Static, Cache, Temp string
}
// Dir stores the current working paths of all directories
var Dir Directories
//DB is the database object
var DB *gorm.DB
// CfgDB is the Configuration object
var CfgDB ConfigDB
// New creates a new Report.
// texTemplate is the content of a LaTex template file. If empty, a default tex template is used.
func New(g grafana.Client, tR grafana.TimeRange, sdb *StateReportGenerationDB) Report {
return new(g, tR, sdb)
}
func new(g grafana.Client, tR grafana.TimeRange, sdb *StateReportGenerationDB) *report {
dash, err := g.GetDashboard(sdb.UIDDash)
if err != nil {
//err = fmt.Errorf("error fetching dashboard %v: %v", rep.uidDash, err)
err = fmt.Errorf("error fetching dashboard %v: %v", sdb.UIDDash, err)
log.Println(err)
}
return &report{g, tR, dash, "", sdb.UIDDash, filepath.Join(Dir.Temp, sdb.Value), dash.Title}
}
// Generate returns the report.pdf file. After reading this file it should be Closed()
// After closing the file, call report.Clean() to delete the file as well the temporary build files
func (rep *report) Generate(sdb *StateReportGenerationDB) (err error) {
org := Organization{
ID: sdb.IDOrg,
Name: sdb.NameOrg,
}
err = rep.renderPNGsParallel(rep.dash, sdb)
if err != nil {
err = fmt.Errorf("error rendering PNGs in paralel for dash %+v: %v", rep.dash.Title, err)
log.Println(err)
}
if sdb.TimedOut {
return fmt.Errorf("request timed out")
} else {
log.Println(rep.time)
err = generateAbReport(rep.dash, rep.randTmpDir, sdb.TitleDash+"_"+sdb.Value, rep.time, org)
if err != nil {
log.Println(err)
} else {
sdb.ReadyToSendFile = true
}
}
return
}
// Title returns the dashboard title parsed from the dashboard definition
func (rep *report) Title() string {
return rep.titleDash
}
func (rep *report) Panels() []grafana.Panel {
return rep.dash.Panels
}
// Clean deletes the temporary directory used during report generation
func (rep *report) Clean() {
err := os.RemoveAll(rep.randTmpDir)
if err != nil {
log.Println("Error cleaning up tmp dir:", err)
}
}
func (rep *report) imgDirPath() string {
return filepath.Join(rep.randTmpDir, NameDirImages)
}
func (rep *report) pdfPath() string {
return filepath.Join(rep.randTmpDir, NameReportPDF)
}
func (rep *report) texPath() string {
return filepath.Join(rep.randTmpDir, NameReportTexFile)
}
func (rep *report) renderPNGsParallel(dash grafana.Dashboard, sdb *StateReportGenerationDB) error {
//buffer all panels on a channel
panels := make(chan grafana.Panel, len(dash.Panels))
for _, p := range dash.Panels {
panels <- p
}
close(panels)
//fetch images in parallel form Grafana server.
//limit concurrency using a worker pool to avoid overwhelming grafana
//for dashboards with many panels.
var wg sync.WaitGroup
workers := 1
wg.Add(workers)
errs := make(chan error, len(dash.Panels)) //routines can return errors on a channel
for i := 0; i < workers; i++ {
go func(panels <-chan grafana.Panel, errs chan<- error) {
defer wg.Done()
countRenderedPanelImages := float64(0)
countTotalPanelImages := float64(len(panels))
for p := range panels {
if p.Title != "" {
if sdb.TimedOut {
log.Println("Time out!, stoping further downloading...")
return
}
err := rep.renderPNG(p)
if err != nil {
//log.Printf("Error creating image for panel: %v", err)
log.Printf("Error creating image for panel: %v, %v", p.Title, err)
errs <- err
sdb.Error += "\n" + err.Error()
sdb.Message += "\n" + err.Error()
}
countRenderedPanelImages++
val_progress := int(countRenderedPanelImages / countTotalPanelImages * 100)
sdb.State = val_progress
log.Println("Progress = ", val_progress, "%")
sdb.Message = strconv.Itoa(int(countRenderedPanelImages)) + " of " + strconv.Itoa(int(countTotalPanelImages))
sdb.TimestampStateChange = time.Now().UnixNano()
DB.Save(sdb)
}
}
}(panels, errs)
}
wg.Wait()
close(errs)
countErr := 0
for err := range errs {
if err != nil {
countErr++
}
}
if countErr > 0 {
return fmt.Errorf("%d panels have issues rendering image", countErr)
}
return nil
}
func (rep *report) renderPNG(p grafana.Panel) error {
body, err := rep.gClient.GetPanelPng(p, rep.uidDash, rep.time)
if err != nil {
log.Println(err)
return fmt.Errorf("error getting panel %+v: %v", p.Title, err)
}
defer body.Close()
err = os.MkdirAll(rep.imgDirPath(), 0777)
if err != nil {
log.Println(err)
return fmt.Errorf("error creating img directory:%v", err)
}
imgFileName := fmt.Sprintf("image%d.png", p.Id)
file, err := os.Create(filepath.Join(rep.imgDirPath(), imgFileName))
if err != nil {
log.Println(err)
return fmt.Errorf("error creating image file:%v", err)
}
defer file.Close()
_, err = io.Copy(file, body)
if err != nil {
log.Println(err)
return fmt.Errorf("error copying body to file:%v", err)
}
return nil
}
func (rep *report) generateTeXFile(dash grafana.Dashboard) error {
type templData struct {
grafana.Dashboard
grafana.TimeRange
grafana.Client
}
err := os.MkdirAll(rep.randTmpDir, 0777)
if err != nil {
return fmt.Errorf("error creating temporary directory at %v: %v", rep.randTmpDir, err)
}
file, err := os.Create(rep.texPath())
if err != nil {
return fmt.Errorf("error creating tex file at %v : %v", rep.texPath(), err)
}
defer file.Close()
tmpl, err := template.New("report").Delims("[[", "]]").Parse(rep.texTemplate)
if err != nil {
return fmt.Errorf("error parsing template '%s': %v", rep.texTemplate, err)
}
data := templData{dash, rep.time, rep.gClient}
err = tmpl.Execute(file, data)
if err != nil {
return fmt.Errorf("error executing tex template:%v", err)
}
return nil
}
func (rep *report) runLaTeX() (pdf *os.File, err error) {
cmdPre := exec.Command("pdflatex", "-halt-on-error", "-draftmode", NameReportTexFile)
cmdPre.Dir = rep.randTmpDir
outBytesPre, errPre := cmdPre.CombinedOutput()
log.Println("Calling LaTeX - preprocessing")
if errPre != nil {
err = fmt.Errorf("error calling LaTeX preprocessing: %q. Latex preprocessing failed with output: %s ", errPre, string(outBytesPre))
return
}
cmd := exec.Command("pdflatex", "-halt-on-error", NameReportTexFile)
cmd.Dir = rep.randTmpDir
outBytes, err := cmd.CombinedOutput()
log.Println("Calling LaTeX and building PDF")
if err != nil {
err = fmt.Errorf("error calling LaTeX: %q. Latex failed with output: %s ", err, string(outBytes))
return
}
pdf, err = os.Open(rep.pdfPath())
return
}
|
// Package flatten provides a Flatten() func for flattenning out nested slices of ints.
package flatten
import "fmt"
// Flatten will take a slice of arbitrarily nested (slices of) interfaces and/or ints, and return a flat slice of ints.
func Flatten(input interface{}) []int {
result := make([]int, 0)
switch typeSwitch := input.(type) {
// Straightforward integer append
case int:
result = append(result, typeSwitch)
// Also fairly straightforward
case []int:
result = append(result, typeSwitch...)
// Nested with the same type; make recursive call(s)
case []interface{}:
for _, data := range typeSwitch {
result = append(result, Flatten(data)...)
}
// Not handling other types, beyond these three
default:
fmt.Printf("Unknown type '%[1]T': %#[1]v\n", typeSwitch)
}
return result
}
|
package main
import (
"crypto/sha1"
"fmt"
)
func main() {
str1 := "this is a test str, first"
str2 := "this is a test str, second"
mSha1 := sha1.New()
mSha1.Write([]byte(str1))
ret1 := mSha1.Sum(nil)
fmt.Printf("raw ret:%s, hex ret:%x \n", ret1, ret1)
mSha1.Write([]byte(str2))
ret2 := mSha1.Sum(nil)
fmt.Printf("raw ret:%s, hex ret:%x \n", ret2, ret2)
} |
package main
import (
"html/template"
"log"
"os"
)
func main() {
// say this is a template and put it in tpl
tpl, err := template.ParseFiles("tpl.gohtml")
if err != nil {
log.Fatal(err)
}
// err = tpl.Execute(os.Stdout, nil) // we execute the first of the templates in the terminal
err = tpl.ExecuteTemplate(os.Stdout, "tpl.gohtml", nil) // we execute the template in the terminal
if err != nil {
log.Fatal(err)
}
}
/*
We can give templates any extensions we want like
[.php; .jj; .js; .mais; ...]
The custom is to give the .gohtml extension
*/
|
package access
import (
"github.com/ololko/simple-HTTP-server/pkg/events/models"
)
type DataAccessor interface{
ReadEvent(models.RequestT, chan<- models.AnswerT, chan<- error)
WriteEvent(models.EventT, chan<- error)
} |
// vi:nu:et:sts=4 ts=4 sw=4
package main
import (
"fmt"
"html/template"
"log"
"net/http"
"strconv"
"sync"
"github.com/gomodule/redigo/redis"
)
var hitmeTpl = `<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Document</title>
</head>
<body>
<p>OUCH - You have hit me {{.}} times!</p>
</body>
</html>`
//***************************************************************
// Redis Interface
//***************************************************************
func redisConnect() redis.Conn {
c, err := redis.Dial("tcp", "redis:6379")
if err != nil {
log.Fatal(err)
}
return c
}
func updateCount(cnt int) {
c := redisConnect()
defer c.Close()
// set the value on redis for the key viewedcount
reply, err := c.Do("SET", "count", cnt)
if err != nil {
log.Fatal(err)
}
log.Println("GET ", reply)
}
func getCount() int {
c := redisConnect()
defer c.Close()
// get the value from redis for the key viewed count
reply, err := c.Do("GET", "count")
if err != nil {
log.Fatal(err)
}
if reply != nil {
s := string(reply.([]byte))
log.Println("GET ", s)
i, _ := strconv.Atoi(s)
return i
}
return 0
}
//***************************************************************
// HTTP Request Handlers
//***************************************************************
// Request Handlers run as independent goroutines so any shared
// data must be protected.
type httpHandler struct{
mu sync.Mutex
count int
}
func (h *httpHandler) base(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "You have reached - %s\n", r.URL.Path)
}
func (h *httpHandler) hi(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello! - %s\n", r.URL.Path)
}
func (h *httpHandler) hitme(w http.ResponseWriter, r *http.Request) {
h.mu.Lock()
defer h.mu.Unlock()
h.count = getCount()
h.count++
updateCount(h.count)
tpl, err := template.New("HitMe").Parse(hitmeTpl)
if err != nil {
log.Fatalln("Error while parsing template:", err)
}
err = tpl.ExecuteTemplate(w, "HitMe", h.count)
if err != nil {
log.Fatalln("Error while executing template:", err)
}
}
//***************************************************************
// m a i n
//***************************************************************
func main() {
h := new(httpHandler)
http.HandleFunc("/", h.base)
http.HandleFunc("/hi", h.hi)
http.HandleFunc("/hitme", h.hitme)
log.Fatal(http.ListenAndServe(":8080", nil))
}
|
package entity
import (
"errors"
)
type CustomError int
const (
ErrInvalidNumber CustomError = iota
ErrDataMalformed
ErrInvalidID
)
func (s CustomError) Error() error {
return [...]error{
errors.New("ErrInvalidNumber"),
errors.New("ErrDataMalformed"),
errors.New("ErrInvalidID"),
}[s]
}
|
package main
// build vars
var (
Version string
Build string
mlCli = &mlCLI{}
config = &CliConfig{}
)
func main() {
config.init(Version, Build)
cli()
}
|
package main
import (
"flag"
"fmt"
"log"
"net/http"
"net/url"
"os"
"strings"
"sync"
"time"
"github.com/PuerkitoBio/fetchbot"
"github.com/PuerkitoBio/goquery"
"github.com/goccy/go-json"
)
var (
// Protect access to dup
dupMu sync.RWMutex
// Duplicates table
dup = make(map[string]struct{})
// Command-line flags
flagSeed = flag.String("seed", "https://docs.oracle.com/en/database/oracle/oracle-database/21/lnpls/index.html https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/index.html", "seed URLs (space separated)")
// https://docs.oracle.com/en/database/oracle/oracle-database/21/lnpls/
)
func main() {
if err := Main(); err != nil {
log.Fatalf("ERROR: %+v", err)
}
}
func Main() error {
flag.Parse()
type description struct {
Path, Description string
}
// Create the muxer
mux := fetchbot.NewMux()
var q *fetchbot.Queue
// Handle all errors the same
mux.HandleErrors(fetchbot.HandlerFunc(func(ctx *fetchbot.Context, res *http.Response, err error) {
log.Printf("[ERR] %s %s - %s\n", ctx.Cmd.Method(), ctx.Cmd.URL(), err)
}))
// Parse the provided seed
seeds := strings.Fields(*flagSeed)
us := make([]*url.URL, 0, len(seeds))
hosts := make([]string, cap(us))
for _, seed := range seeds {
u, err := url.Parse(seed)
if err != nil {
return fmt.Errorf("%q: %w", seed, err)
}
us = append(us, u)
hosts = append(hosts, u.Host)
}
enc := json.NewEncoder(os.Stdout)
for _, u := range us {
// Handle GET requests for html responses, to parse the body and enqueue all links as HEAD
// requests.
mux.Response().Method("GET").Host(u.Host).ContentType("text/html").Handler(fetchbot.HandlerFunc(
func(ctx *fetchbot.Context, res *http.Response, err error) {
if err != nil {
log.Println(ctx.Cmd.URL(), err)
return
}
log.Println("Handling", ctx.Cmd.URL())
// Process the body to find the links
doc, err := goquery.NewDocumentFromReader(res.Body)
if err != nil {
log.Printf("[ERR] %s %s - %s\n", ctx.Cmd.Method(), ctx.Cmd.URL(), err)
return
}
/*
<body>
<article>
<header>
<h1>Description of the illustration accessible_by_clause.eps</h1>
</header>
<div><pre
*/
doc.Find("body>article").Each(func(i int, s *goquery.Selection) {
if strings.HasPrefix(s.Find("header>h1").Text(), "Description ") {
desc := description{
Path: ctx.Cmd.URL().Path,
Description: s.Find("div>pre").Text(),
}
if desc.Description == "" {
return
}
if err := enc.Encode(desc); err != nil {
log.Println("ERROR:", err)
_ = q.Cancel()
}
}
})
// Enqueue all links as HEAD requests
log.Println("enqueue", ctx.Cmd.URL())
enqueueLinks(ctx, hosts, doc)
}))
}
// Create the Fetcher, handle the logging first, then dispatch to the Muxer
f := fetchbot.New(mux)
f.CrawlDelay = 100 * time.Millisecond
f.AutoClose = true
log.Printf("Start")
// Start processing
q = f.Start()
// Enqueue the seed, which is the first entry in the dup map
for _, u := range us {
seed := u.String()
dup[seed] = struct{}{}
_, err := q.SendStringGet(seed)
if err != nil {
log.Printf("[ERR] GET %s - %s\n", seed, err)
}
}
q.Block()
return q.Close()
}
func enqueueLinks(ctx *fetchbot.Context, matchHosts []string, doc *goquery.Document) {
doc.Find("a[href]").Each(func(i int, s *goquery.Selection) {
val, _ := s.Attr("href")
// Resolve address
u, err := ctx.Cmd.URL().Parse(val)
if err != nil {
log.Printf("error: resolve URL %s - %s\n", val, err)
return
}
if !(u.Scheme == "http" || u.Scheme == "https") {
return
}
ok := len(matchHosts) == 0
for _, host := range matchHosts {
if ok = host == u.Host; ok {
break
}
}
if !ok {
return
}
u.Fragment, u.RawFragment = "", ""
k := u.String()
dupMu.RLock()
_, ok = dup[k]
dupMu.RUnlock()
if !ok {
dupMu.Lock()
if _, ok = dup[k]; !ok {
dup[k] = struct{}{}
_, _ = ctx.Q.SendStringGet(u.String())
}
dupMu.Unlock()
}
})
}
|
package _const
const (
AlphaNum = "alphaNum"
Alpha = "alpha"
Number = "number"
)
|
/*
You are given a number n. Determine whether n has exactly 3 divisors or not.
Examples
isExactlyThree(4) ➞ true
// 4 has only 3 divisors: 1, 2 and 4
isExactlyThree(12) ➞ false
// 12 has 6 divisors: 1, 2, 3, 4, 6, 12
isExactlyThree(25) ➞ true
// 25 has only 3 divisors: 1, 5, 25
Notes
1 ≤ n ≤ 10^12
*/
package main
func main() {
assert(three(0) == false)
assert(three(4) == true)
assert(three(12) == false)
assert(three(25) == true)
assert(three(121) == true)
assert(three(48) == false)
assert(three(1) == false)
assert(three(81) == false)
assert(three(1521) == false)
assert(three(225) == false)
assert(three(27550356289) == true)
assert(three(25235235235) == false)
assert(three(10) == false)
assert(three(64) == false)
assert(three(9) == true)
assert(three(144) == false)
assert(three(3) == false)
assert(three(2) == false)
assert(three(42351351) == false)
assert(three(999966000289) == true)
assert(three(20152357681) == true)
assert(three(531625249) == true)
assert(three(264306808866) == false)
assert(three(975179493674) == false)
assert(three(49) == true)
assert(three(165983) == false)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func three(n uint64) bool {
return divisors(n) == 3
}
// https://www2.math.upenn.edu/~deturck/m170/wk2/numdivisors.html
// To calculate how many divisors a number has, take all the exponents in the prime factorization,
// add 1 to each, and then multiply these "exponents + 1"s together.
func divisors(n uint64) uint64 {
p := factor(n)
v := 1
for i := 0; i < len(p); {
j := i + 1
for ; j < len(p); j++ {
if p[i] != p[j] {
break
}
}
v *= (j - i + 1)
i = j
}
return uint64(v)
}
func factor(n uint64) (f []uint64) {
if n == 1 {
return []uint64{1}
}
for i := uint64(2); i <= n; i++ {
for n%i == 0 {
n /= i
f = append(f, i)
}
}
return
}
|
package add
import (
"github.com/devspace-cloud/devspace/cmd/flags"
"github.com/devspace-cloud/devspace/pkg/util/factory"
"github.com/spf13/cobra"
)
// NewAddCmd creates a new cobra command
func NewAddCmd(f factory.Factory, globalFlags *flags.GlobalFlags) *cobra.Command {
addCmd := &cobra.Command{
Use: "add",
Short: "Convenience command: adds something to devspace.yaml",
Long: `
#######################################################
#################### devspace add #####################
#######################################################
Adds config sections to devspace.yaml
`,
Args: cobra.NoArgs,
}
addCmd.AddCommand(newSyncCmd(f, globalFlags))
addCmd.AddCommand(newProviderCmd(f))
addCmd.AddCommand(newPortCmd(f, globalFlags))
addCmd.AddCommand(newImageCmd(f, globalFlags))
addCmd.AddCommand(newDeploymentCmd(f, globalFlags))
return addCmd
}
|
/*
* Tencent is pleased to support the open source community by making Blueking Container Service available.
* Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package option
import (
"github.com/Tencent/bk-bcs/bcs-common/common/conf"
)
// ControllerOption options for controller
type ControllerOption struct {
// Address address for server
Address string
// PodIPs contains ipv4 and ipv6 address get from status.podIPs
PodIPs []string
// Port port for server
Port int
// MetricPort port for metric server
MetricPort int
// LogConfig for blog
conf.LogConfig
// KubernetesQPS the qps of k8s client request
KubernetesQPS int
// KubernetesBurst the burst of k8s client request
KubernetesBurst int
}
|
package aws
import (
"context"
"sync"
awssdk "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/pkg/errors"
typesaws "github.com/openshift/installer/pkg/types/aws"
)
// Metadata holds additional metadata for InstallConfig resources that
// does not need to be user-supplied (e.g. because it can be retrieved
// from external APIs).
type Metadata struct {
session *session.Session
availabilityZones []string
edgeZones []string
privateSubnets Subnets
publicSubnets Subnets
edgeSubnets Subnets
vpc string
instanceTypes map[string]InstanceType
Region string `json:"region,omitempty"`
Subnets []string `json:"subnets,omitempty"`
Services []typesaws.ServiceEndpoint `json:"services,omitempty"`
mutex sync.Mutex
}
// NewMetadata initializes a new Metadata object.
func NewMetadata(region string, subnets []string, services []typesaws.ServiceEndpoint) *Metadata {
return &Metadata{Region: region, Subnets: subnets, Services: services}
}
// Session holds an AWS session which can be used for AWS API calls
// during asset generation.
func (m *Metadata) Session(ctx context.Context) (*session.Session, error) {
m.mutex.Lock()
defer m.mutex.Unlock()
return m.unlockedSession(ctx)
}
func (m *Metadata) unlockedSession(ctx context.Context) (*session.Session, error) {
if m.session == nil {
var err error
m.session, err = GetSessionWithOptions(WithRegion(m.Region), WithServiceEndpoints(m.Region, m.Services))
if err != nil {
return nil, errors.Wrap(err, "creating AWS session")
}
}
return m.session, nil
}
// AvailabilityZones retrieves a list of availability zones for the configured region.
func (m *Metadata) AvailabilityZones(ctx context.Context) ([]string, error) {
m.mutex.Lock()
defer m.mutex.Unlock()
if len(m.availabilityZones) == 0 {
session, err := m.unlockedSession(ctx)
if err != nil {
return nil, err
}
m.availabilityZones, err = availabilityZones(ctx, session, m.Region)
if err != nil {
return nil, errors.Wrap(err, "error retrieving Availability Zones")
}
}
return m.availabilityZones, nil
}
// EdgeZones retrieves a list of Local zones for the configured region.
func (m *Metadata) EdgeZones(ctx context.Context) ([]string, error) {
m.mutex.Lock()
defer m.mutex.Unlock()
if len(m.edgeZones) == 0 {
session, err := m.unlockedSession(ctx)
if err != nil {
return nil, err
}
m.edgeZones, err = localZones(ctx, session, m.Region)
if err != nil {
return nil, errors.Wrap(err, "getting Local Zones")
}
}
return m.edgeZones, nil
}
// EdgeSubnets retrieves subnet metadata indexed by subnet ID, for
// subnets that the cloud-provider logic considers to be edge
// (i.e. Local Zone).
func (m *Metadata) EdgeSubnets(ctx context.Context) (Subnets, error) {
err := m.populateSubnets(ctx)
if err != nil {
return nil, errors.Wrap(err, "error retrieving Edge Subnets")
}
return m.edgeSubnets, nil
}
// SetZoneAttributes retrieves AWS Zone attributes and update required fields in zones.
func (m *Metadata) SetZoneAttributes(ctx context.Context, zoneNames []string, zones Zones) error {
sess, err := m.Session(ctx)
if err != nil {
return errors.Wrap(err, "unable to get aws session to populate zone details")
}
azs, err := describeFilteredZones(ctx, sess, m.Region, zoneNames)
if err != nil {
return errors.Wrap(err, "unable to filter zones")
}
for _, az := range azs {
zoneName := awssdk.StringValue(az.ZoneName)
if _, ok := zones[zoneName]; !ok {
zones[zoneName] = &Zone{Name: zoneName}
}
if zones[zoneName].GroupName == "" {
zones[zoneName].GroupName = awssdk.StringValue(az.GroupName)
}
if zones[zoneName].Type == "" {
zones[zoneName].Type = awssdk.StringValue(az.ZoneType)
}
if az.ParentZoneName != nil {
zones[zoneName].ParentZoneName = awssdk.StringValue(az.ParentZoneName)
}
}
return nil
}
// AllZones return all the zones and it's attributes available on the region.
func (m *Metadata) AllZones(ctx context.Context) (Zones, error) {
sess, err := m.Session(ctx)
if err != nil {
return nil, errors.Wrap(err, "unable to get aws session to populate zone details")
}
azs, err := describeAvailabilityZones(ctx, sess, m.Region, []string{})
if err != nil {
return nil, errors.Wrap(err, "unable to gather availability zones")
}
zoneDesc := make(Zones, len(azs))
for _, az := range azs {
zoneName := awssdk.StringValue(az.ZoneName)
zoneDesc[zoneName] = &Zone{
Name: zoneName,
GroupName: awssdk.StringValue(az.GroupName),
Type: awssdk.StringValue(az.ZoneType),
}
if az.ParentZoneName != nil {
zoneDesc[zoneName].ParentZoneName = awssdk.StringValue(az.ParentZoneName)
}
}
return zoneDesc, nil
}
// PrivateSubnets retrieves subnet metadata indexed by subnet ID, for
// subnets that the cloud-provider logic considers to be private
// (i.e. not public).
func (m *Metadata) PrivateSubnets(ctx context.Context) (Subnets, error) {
err := m.populateSubnets(ctx)
if err != nil {
return nil, errors.Wrap(err, "error retrieving Private Subnets")
}
return m.privateSubnets, nil
}
// PublicSubnets retrieves subnet metadata indexed by subnet ID, for
// subnets that the cloud-provider logic considers to be public
// (e.g. with suitable routing for hosting public load balancers).
func (m *Metadata) PublicSubnets(ctx context.Context) (Subnets, error) {
err := m.populateSubnets(ctx)
if err != nil {
return nil, errors.Wrap(err, "error retrieving Public Subnets")
}
return m.publicSubnets, nil
}
// VPC retrieves the VPC ID containing PublicSubnets and PrivateSubnets.
func (m *Metadata) VPC(ctx context.Context) (string, error) {
err := m.populateSubnets(ctx)
if err != nil {
return "", errors.Wrap(err, "error retrieving VPC")
}
return m.vpc, nil
}
func (m *Metadata) populateSubnets(ctx context.Context) error {
m.mutex.Lock()
defer m.mutex.Unlock()
if len(m.Subnets) == 0 {
return errors.New("no subnets configured")
}
if m.vpc != "" || len(m.privateSubnets) > 0 || len(m.publicSubnets) > 0 || len(m.edgeSubnets) > 0 {
// Call to populate subnets has already happened
return nil
}
session, err := m.unlockedSession(ctx)
if err != nil {
return err
}
sb, err := subnets(ctx, session, m.Region, m.Subnets)
m.vpc = sb.VPC
m.privateSubnets = sb.Private
m.publicSubnets = sb.Public
m.edgeSubnets = sb.Edge
return err
}
// InstanceTypes retrieves instance type metadata indexed by InstanceType for the configured region.
func (m *Metadata) InstanceTypes(ctx context.Context) (map[string]InstanceType, error) {
m.mutex.Lock()
defer m.mutex.Unlock()
if len(m.instanceTypes) == 0 {
session, err := m.unlockedSession(ctx)
if err != nil {
return nil, err
}
m.instanceTypes, err = instanceTypes(ctx, session, m.Region)
if err != nil {
return nil, errors.Wrap(err, "error listing instance types")
}
}
return m.instanceTypes, nil
}
|
package httputil
import "strings"
// NormalizeBase adds a leading slash and a trailing slash if missing.
func NormalizeBase(base string) string {
if base == "" {
return ""
}
if !strings.HasPrefix(base, "/") {
base = "/" + base
}
if !strings.HasSuffix(base, "/") {
base = base + "/"
}
return base
}
// TrimTrailingSlash trims the trailing slash if present.
func TrimTrailingSlash(base string) string {
return strings.TrimSuffix(base, "/")
}
|
package stack
import "testing"
func TestBrowser(t *testing.T) {
b := NewBrowser()
b.Push("www.qq.com")
t.Log((b))
b.Push("www.baidu.com")
b.Push("www.sina.com")
t.Log((b))
b.Back()
t.Log((b))
b.Forward()
t.Log((b))
}
|
package main
import (
"server/controllers"
"github.com/gin-contrib/cors"
"github.com/gin-gonic/gin"
)
func main() {
router := gin.Default()
router.Use(cors.Default())
router.POST("/admin/register", controllers.KitchenRegister)
router.POST("/admin/signin", controllers.KitchenSignin)
router.POST("/signup", controllers.HandleSignup)
router.POST("/signin", controllers.HandleSignin)
router.POST("/add/food", controllers.AddFood)
router.GET(("/get/food"), controllers.GetFood)
router.POST("/post/order", controllers.PostOrder)
router.GET("/get/order/", controllers.GetOrder)
router.POST("/user/address", controllers.PostAddress)
router.Run()
}
|
package main
import (
"log"
"time"
"github.com/disq/werify/cmd/werifyd/pool"
t "github.com/disq/werify/cmd/werifyd/types"
)
const healthCheckInterval = 60 * time.Second
func (s *Server) healthchecker() {
for {
select {
case <-s.context.Done():
return
case <-s.forceHealthcheck:
log.Println("Starting forced health checks...")
s.runHealthcheck()
case <-time.After(healthCheckInterval):
s.runHealthcheck()
}
}
}
func (s *Server) runHealthcheck() {
ch := make(chan t.PoolData)
p := pool.NewPool(s.context, ch)
p.Start(s.numWorkers, func(pd t.PoolData) {
s.healthcheck(pd.GetHost())
//log.Printf("HC for %s: %v\n", pd.GetHost().Endpoint, err)
})
s.hostMu.RLock()
defer s.hostMu.RUnlock()
for _, h := range s.hosts {
// Run each RPC call for each Host in a worker concurrently
ch <- h
}
close(ch)
p.Wait()
}
|
package request
import (
"context"
"fmt"
"testing"
)
type M map[string]Upstream
func TestRequest(t *testing.T) {
r := NewRequest(context.Background())
p := map[string]interface{}{
"extend_flag": 1,
}
rs, err := r.Get("$api_server/internal/enterprise/getMultiCompanyBrief", p, []string{"eeqeq"})
if err != nil {
panic(err)
}
fmt.Println(rs.Content())
}
|
// Copyright 2022 Gabriel Boorse
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package centipede
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
)
func TestZebra(t *testing.T) {
colors := []string{"Yellow", "Blue", "Red", "Ivory", "Green"}
nationality := []string{"Norwegian", "Ukrainian", "Englishman", "Spaniard", "Japanese"}
drink := []string{"Water", "Tea", "Milk", "Orange juice", "Coffee"}
smoke := []string{"Kools", "Chesterfield", "Old Gold", "Lucky Strike", "Parliament"}
pet := []string{"Fox", "Horse", "Snails", "Dog", "Zebra"}
categories := [][]string{colors, nationality, drink, smoke, pet}
// initialize variables
vars := make(Variables[int], 0)
fiveDomain := IntRange(0, 5)
constraints := make(Constraints[int], 0)
// add uniqueness constraints for each category
for _, category := range categories {
categoryVars := make(VariableNames, 0)
for _, vName := range category {
varName := VariableName(vName)
vari := NewVariable(varName, fiveDomain)
vars = append(vars, vari)
categoryVars = append(categoryVars, varName)
}
constraints = append(constraints, AllUnique[int](categoryVars...)...)
}
// intRelConstraint checks if two int variables satisfy a binary relation
intRelConstraint := func(var1 VariableName, var2 VariableName, rel func(int, int) bool) Constraint[int] {
return Constraint[int]{Vars: VariableNames{var1, var2}, ConstraintFunction: func(variables *Variables[int]) bool {
if variables.Find(var1).Empty || variables.Find(var2).Empty {
return true
}
v1 := variables.Find(var1).Value
v2 := variables.Find(var2).Value
return rel(v1, v2)
}}
}
// nextToConstraint checks if two int vars differ by at most one
nextToConstraint := func(var1 VariableName, var2 VariableName) Constraint[int] {
return intRelConstraint(var1, var2, func(v1, v2 int) bool { return v2 == v1+1 || v2 == v1-1 })
}
// offsetConstraint checks if int var1 plus offset equals var2
offsetConstraint := func(var1 VariableName, var2 VariableName, offset int) Constraint[int] {
return intRelConstraint(var1, var2, func(v1, v2 int) bool { return v2 == v1+offset })
}
vars.SetValue("Milk", 2)
vars.SetValue("Norwegian", 0)
constraints = append(constraints,
Equals[int]("Englishman", "Red"),
Equals[int]("Spaniard", "Dog"),
Equals[int]("Coffee", "Green"),
Equals[int]("Ukrainian", "Tea"),
offsetConstraint("Ivory", "Green", 1),
Equals[int]("Old Gold", "Snails"),
Equals[int]("Kools", "Yellow"),
nextToConstraint("Chesterfield", "Fox"),
nextToConstraint("Kools", "Horse"),
nextToConstraint("Norwegian", "Blue"),
Equals[int]("Lucky Strike", "Orange juice"),
Equals[int]("Japanese", "Parliament"))
// create solver
solver := NewBackTrackingCSPSolver(vars, constraints)
// simplify variable domains following initial assignment
solver.State.MakeArcConsistent(context.TODO())
success, err := solver.Solve(context.TODO()) // run the solution
assert.Nil(t, err)
assert.True(t, success)
values := map[string]int{}
for _, variable := range solver.State.Vars {
values[string(variable.Name)] = variable.Value
}
assert.Equal(t, values["Yellow"], 0)
assert.Equal(t, values["Blue"], 1)
assert.Equal(t, values["Red"], 2)
assert.Equal(t, values["Ivory"], 3)
assert.Equal(t, values["Green"], 4)
assert.Equal(t, values["Norwegian"], 0)
assert.Equal(t, values["Ukrainian"], 1)
assert.Equal(t, values["Englishman"], 2)
assert.Equal(t, values["Spaniard"], 3)
assert.Equal(t, values["Japanese"], 4)
assert.Equal(t, values["Water"], 0)
assert.Equal(t, values["Tea"], 1)
assert.Equal(t, values["Milk"], 2)
assert.Equal(t, values["Orange juice"], 3)
assert.Equal(t, values["Coffee"], 4)
assert.Equal(t, values["Kools"], 0)
assert.Equal(t, values["Chesterfield"], 1)
assert.Equal(t, values["Old Gold"], 2)
assert.Equal(t, values["Lucky Strike"], 3)
assert.Equal(t, values["Parliament"], 4)
assert.Equal(t, values["Fox"], 0)
assert.Equal(t, values["Horse"], 1)
assert.Equal(t, values["Snails"], 2)
assert.Equal(t, values["Dog"], 3)
assert.Equal(t, values["Zebra"], 4)
}
|
/*
@Time : 2019-03-28 10:45
@Author : zhangjun
@File : contributors
@Description:
@Run:
*/
package main
import (
"encoding/json"
"fmt"
"log"
"net/http"
)
//GET /repos/:owner/:repo/contributors
var contributorsURL = "https://api.github.com/repos/childeYin/Cultivate/contributors"
var reposContributorsUrl = "https://github.com/childeYin/Cultivate/graphs/contributors"
type User struct {
Id int
Login string
HtmlUrl string `json:"html_url"`
AvatarUrl string `json:"avatar_url"`
Contributions int
}
/*
* 1.抓取地址信息
* 2.解析数据
*/
func main() {
result, err := getContributorsList()
if err != nil {
log.Fatal(err)
}
fmt.Printf("repos Contributors url is 【%s】\n", reposContributorsUrl)
for _, item := range result {
fmt.Printf("【%s】constributors number is 【%d】\n", item.Login, item.Contributions)
}
}
func getContributorsList( ) (contributor []User, err error) {
var contributors []User
resp, err := http.Get(contributorsURL)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
resp.Body.Close()
return nil, fmt.Errorf("failed %s", resp.Status)
}
//content, err := ioutil.ReadAll(resp.Body)
//respBody := string(content)
//fmt.Print(respBody)
if err := json.NewDecoder(resp.Body).Decode(&contributors); err != nil {
resp.Body.Close()
return nil, err
}
//fmt.Print(contributors)
resp.Body.Close()
return contributors, err
}
|
package websocket
import (
"bufio"
"crypto/sha1"
"encoding/base64"
"fmt"
"github.com/pkg/errors"
"net"
"net/http"
"strings"
)
// Currently, only ignores if no |Host| was supplied in the header.
const Strict = false
// GUID used by every WebSocket server (as specified on the RFC).
const WS_SERVER_ID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
// Calculates the server response key, as defined by the RFC (see 1.3)
func calculateServerResponseKey(key string) string {
s := sha1.Sum([]byte(key + WS_SERVER_ID))
b64 := base64.StdEncoding.EncodeToString(s[:])
return b64
}
// Handshake handles checking and upgrading a HTTP request into a WebSocket
// connection (as defined by RFC 6455 - The WebSocket Protocol).
func handshake(conn net.Conn, validUri string) (res http.Response, retErr error) {
var b64ConnKey string
// Properly set the status code text on exit
defer func() {
res.Status = http.StatusText(res.StatusCode)
}()
// Set initial response, just in case...
res.StatusCode = http.StatusForbidden
res.Proto = "HTTP/1.0"
res.ProtoMajor = 1
res.ProtoMinor = 0
req, err := http.ReadRequest(bufio.NewReader(conn))
if err != nil {
retErr = errors.Wrap(err, "Failed to receive client handshake")
return
}
res.Proto = req.Proto
res.ProtoMajor = req.ProtoMajor
res.ProtoMinor = req.ProtoMinor
// 4.2.1 Reading the Client's Opening Handshake
// 1. Check that it's a HTTP/1.1 or higher GET
// (ignore "Request-URI" but check its presence)
if !strings.HasPrefix(req.Proto, "HTTP/") {
retErr = errors.New(fmt.Sprintf("Non HTTP request: '%s'", req.Proto))
return
} else if req.ProtoMajor == 1 && req.ProtoMinor < 1 ||
req.ProtoMajor < 1 {
retErr = errors.New("Non HTTP/1.1 or higher")
return
} else if req.Method != "GET" {
retErr = errors.New("Non GET request")
return
} else if req.RequestURI == "" {
retErr = errors.New("Missing RequestURI in request")
return
} else if req.RequestURI != validUri {
retErr = errors.New("Invalid RequestURI")
return
}
// 2. Check for a |Host| header with server's authority
if host := req.Header.Get("Host"); len(host) == 0 {
retErr = errors.New("Missing |Host| header field")
if Strict {
return
} else {
fmt.Printf(" Ignoring: %s\n", retErr.Error())
retErr = nil
}
}
// 3. Check for an |Upgrade| header == "websocket"
if want, got := "websocket", req.Header.Get("Upgrade"); want != got {
retErr = errors.New(fmt.Sprintf(
"Invalid |Upgrade| header field: wanted '%s', got '%s'\n",
want, got))
return
}
// 4. Check for a |Connection| header == "Upgrade"
if want, got := "Upgrade", req.Header.Get("Connection"); !strings.Contains(got, want) {
retErr = errors.New(fmt.Sprintf(
"Invalid |Connection| header field: wanted '%s', got '%s'\n",
want, got))
return
}
// 5. Check for a |Sec-WebSocket-Key| with the base-64 key
if b64ConnKey = req.Header.Get("Sec-WebSocket-Key"); len(b64ConnKey) == 0{
retErr = errors.New("Missing |Sec-WebSocket-Key| header field")
return
}
// 6..10 are optional (and, therefore, ignored)
// 4.2.2 Sending the Server's Opening Handshake
res.Header = make (http.Header)
// 1. Connection isn't HTTPS, so ignore it
// 2. Stuff that the server can do... ignore (as it isn't a MUST)
// 3. "The server MAY...": NOPE
// 4. Maybe later
// 5. Set fields required to accept the connection
// 5.1 Set status as Switching Protocols
res.StatusCode = http.StatusSwitchingProtocols
// 5.2 Set |Upgrade| = "websocket"
res.Header.Set("Upgrade", "websocket")
// 5.3 Set |Connection| = "Upgrade"
res.Header.Set("Connection", "Upgrade")
// 5.4 Set |Sec-WebSocket-Accept| with the base_64(SHA_1(key+UIGD))
serverKey := calculateServerResponseKey(b64ConnKey)
res.Header.Set("Sec-WebSocket-Accept", serverKey)
// 5.5 Later
// 5.6 Later
return
}
// Updates a HTTP Response to have an response code of Service Unavailable.
func updateResponseUnavailable(res *http.Response) {
res.StatusCode = http.StatusServiceUnavailable
res.Status = http.StatusText(res.StatusCode)
// Remove any previously set header
res.Header = nil
}
|
package set
import (
"fmt"
"sync"
)
type SafeInt64Set struct {
sync.RWMutex
M map[int64]struct{}
}
func NewSafeInt64Set() *SafeInt64Set {
return &SafeInt64Set{M: make(map[int64]struct{})}
}
func (this *SafeInt64Set) String() string {
s := this.Slice()
return fmt.Sprint(s)
}
func (this *SafeInt64Set) Add(item int64) *SafeInt64Set {
if this.Contains(item) {
return this
}
this.Lock()
this.M[item] = struct{}{}
this.Unlock()
return this
}
func (this *SafeInt64Set) Contains(item int64) bool {
this.RLock()
_, exists := this.M[item]
this.RUnlock()
return exists
}
func (this *SafeInt64Set) Adds(items []int64) *SafeInt64Set {
count := len(items)
if count == 0 {
return this
}
todo := make([]int64, 0, count)
this.RLock()
for i := 0; i < count; i++ {
_, exists := this.M[items[i]]
if exists {
continue
}
todo = append(todo, items[i])
}
this.RUnlock()
count = len(todo)
if count == 0 {
return this
}
this.Lock()
for i := 0; i < count; i++ {
this.M[todo[i]] = struct{}{}
}
this.Unlock()
return this
}
func (this *SafeInt64Set) Size() int {
this.RLock()
l := len(this.M)
this.RUnlock()
return l
}
func (this *SafeInt64Set) Clear() {
this.Lock()
this.M = make(map[int64]struct{})
this.Unlock()
}
func (this *SafeInt64Set) Slice() []int64 {
this.RLock()
ret := make([]int64, len(this.M))
i := 0
for item := range this.M {
ret[i] = item
i++
}
this.RUnlock()
return ret
}
|
package main
import (
"fmt"
"strconv"
"github.com/Cloud-Foundations/Dominator/imageserver/client"
"github.com/Cloud-Foundations/Dominator/lib/log"
)
func deleteUnreferencedObjectsSubcommand(args []string,
logger log.DebugLogger) error {
imageSClient, _ := getClients()
percentage, err := strconv.ParseUint(args[0], 10, 8)
if err != nil {
return fmt.Errorf("error parsing percentage: %s", err)
}
bytes, err := strconv.ParseUint(args[1], 10, 64)
if err != nil {
return fmt.Errorf("error parsing bytes: %s", err)
}
if err := client.DeleteUnreferencedObjects(imageSClient, uint8(percentage),
bytes); err != nil {
return fmt.Errorf("error deleting unreferenced objects: %s", err)
}
return nil
}
|
package models
import (
"database/sql"
"encoding/json"
"fmt"
"github.com/astaxie/beego/orm"
"github.com/devplayg/ipas-mcs/objs"
"strings"
)
func GetAllSystemConfig() ([]objs.SysConfig, error) {
query := "select section, keyword, value_s, value_n from sys_config"
var rows []objs.SysConfig
o := orm.NewOrm()
_, err := o.Raw(query).QueryRows(&rows)
return rows, err
}
func GetSystemConfig(section, keyword string) ([]objs.SysConfig, error) {
query := "select section, keyword, value_s, value_n from sys_config where section = ?"
var where string
args := make([]interface{}, 0)
args = append(args, section)
if len(keyword) > 0 {
where += " and keyword = ?"
args = append(args, keyword)
}
var rows []objs.SysConfig
o := orm.NewOrm()
_, err := o.Raw(query + where, args).QueryRows(&rows)
return rows, err
}
func UpdateRow(tableName string, pkColumn string, pkValue interface{}, data map[string]interface{}) (sql.Result, error) {
args := make([]interface{}, 0)
setPhrases := make([]string, 0)
for k, v := range data {
setPhrases = append(setPhrases, fmt.Sprintf("%s = ?", k))
args = append(args, v)
}
args = append(args, pkValue)
query := fmt.Sprintf("update %s set %s where %s = ?", tableName, strings.Join(setPhrases, ","), pkColumn)
o := orm.NewOrm()
return o.Raw(query, args).Exec()
}
func RemoveRow(tableName string, pkColumn string, pkValue interface{}) (sql.Result, error) {
query := fmt.Sprintf("delete from %s where %s = ?", tableName, pkColumn)
o := orm.NewOrm()
return o.Raw(query, pkValue).Exec()
}
// 감사로깅
func Audit(log *objs.AuditMsg) error {
o := orm.NewOrm()
o.Begin()
var message string
if log.Message != nil {
m, _ := json.Marshal(log.Message)
message = string(m)
}
// 간단한 감사이력 로깅
query := "insert into adt_audit(member_id, category, ip, message) values(?, ?, inet_aton(?), ?)"
rs, err := o.Raw(query, log.MemberId, log.Category, log.IP, message).Exec()
if err != nil {
o.Rollback()
return err
} else {
defer o.Commit()
// 상세 감사이력 로깅
if log.Detail != nil {
lastInsertId, _ := rs.LastInsertId()
var detail string
d, _ := json.Marshal(log.Detail)
detail = string(d)
query := "insert into adt_audit_detail(audit_id, detail) values(?, ?)"
_, err2 := o.Raw(query, lastInsertId, detail).Exec()
if err2 != nil {
return err2
}
}
}
return nil
}
func GetServer(s objs.Server) (*objs.Server, error ){
query := "select * from ast_server where true"
var where string
args := make([]interface{}, 0)
if s.ID > 0 {
where += " and server_id = ?"
args = append(args, s.ID)
}
if s.Category1 > 0 {
where += " and category1 = ?"
args = append(args, s.Category1)
}
if s.Category2 > 0 {
where += " and category2 = ?"
args = append(args, s.Category2)
}
var server objs.Server
o := orm.NewOrm()
err := o.Raw(query + where, args).QueryRow(&server)
return &server, err
} |
package camt
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document04400103 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:camt.044.001.03 Document"`
Message *FundConfirmedCashForecastReportCancellationV03 `xml:"FndConfdCshFcstRptCxl"`
}
func (d *Document04400103) AddMessage() *FundConfirmedCashForecastReportCancellationV03 {
d.Message = new(FundConfirmedCashForecastReportCancellationV03)
return d.Message
}
// Scope
// A report provider, such as a transfer agent, sends the FundConfirmedCashForecastReportCancellation message to the report user, such as an investment manager or pricing agent, to cancel a previously sent FundConfirmedCashForecastReport message.
// Usage
// The FundConfirmedCashForecastReportCancellation message is used to cancel an entire FundConfirmedCashForecastReport message that was previously sent by the report provider. This message must contain reference to the of the message being cancelled.
// This message may also contain details of the message to be cancelled, but this is not recommended.
type FundConfirmedCashForecastReportCancellationV03 struct {
// Identifies the message.
MessageIdentification *iso20022.MessageIdentification1 `xml:"MsgId"`
// Collective reference identifying a set of messages.
PoolReference *iso20022.AdditionalReference3 `xml:"PoolRef,omitempty"`
// Reference to a linked message that was previously sent.
PreviousReference *iso20022.AdditionalReference3 `xml:"PrvsRef,omitempty"`
// Reference to a linked message that was previously received.
RelatedReference []*iso20022.AdditionalReference3 `xml:"RltdRef,omitempty"`
// Pagination of the message.
MessagePagination *iso20022.Pagination `xml:"MsgPgntn"`
// The FundDetailedConfirmedCashForecastReport to be cancelled.
CashForecastReportToBeCancelled *iso20022.FundConfirmedCashForecastReport3 `xml:"CshFcstRptToBeCanc,omitempty"`
}
func (f *FundConfirmedCashForecastReportCancellationV03) AddMessageIdentification() *iso20022.MessageIdentification1 {
f.MessageIdentification = new(iso20022.MessageIdentification1)
return f.MessageIdentification
}
func (f *FundConfirmedCashForecastReportCancellationV03) AddPoolReference() *iso20022.AdditionalReference3 {
f.PoolReference = new(iso20022.AdditionalReference3)
return f.PoolReference
}
func (f *FundConfirmedCashForecastReportCancellationV03) AddPreviousReference() *iso20022.AdditionalReference3 {
f.PreviousReference = new(iso20022.AdditionalReference3)
return f.PreviousReference
}
func (f *FundConfirmedCashForecastReportCancellationV03) AddRelatedReference() *iso20022.AdditionalReference3 {
newValue := new(iso20022.AdditionalReference3)
f.RelatedReference = append(f.RelatedReference, newValue)
return newValue
}
func (f *FundConfirmedCashForecastReportCancellationV03) AddMessagePagination() *iso20022.Pagination {
f.MessagePagination = new(iso20022.Pagination)
return f.MessagePagination
}
func (f *FundConfirmedCashForecastReportCancellationV03) AddCashForecastReportToBeCancelled() *iso20022.FundConfirmedCashForecastReport3 {
f.CashForecastReportToBeCancelled = new(iso20022.FundConfirmedCashForecastReport3)
return f.CashForecastReportToBeCancelled
}
|
package rest
import (
"fmt"
"github.com/jinmukeji/jiujiantang-services/pkg/rest"
proto "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/user/v1"
"github.com/kataras/iris/v12"
)
const (
// SendViaPhone 发送途径为手机号码
SendViaPhone = "phone"
// SendViaEmail 发送途径为邮箱
SendViaEmail = "email"
)
// GetLatestVerificationCode 最新验证码请求
type GetLatestVerificationCode struct {
SendVia string `json:"send_via"` // 发送途径
Email string `json:"email"` // 邮箱
Phone string `json:"phone"` // 手机号码
NationCode string `json:"nation_code"` // 区号
}
// LatestVerificationCodeBody 获取最新验证码请求
type LatestVerificationCodeBody struct {
SendInformation []GetLatestVerificationCode `json:"send_information"` // 发送信息
}
// LatestVerificationCode 最新验证码
type LatestVerificationCode struct {
Email string `json:"email"` // 邮箱
Phone string `json:"phone"` // 手机号码
NationCode string `json:"nation_code"` // 区号
VerificationCode string `json:"verification_code"` // 验证码
}
// GetLatestVerificationCodes 获取最新验证码
func (h *webHandler) GetLatestVerificationCodes(ctx iris.Context) {
var reqGetLatestVerificationCodes LatestVerificationCodeBody
errReadJSON := ctx.ReadJSON(&reqGetLatestVerificationCodes)
if errReadJSON != nil {
writeError(ctx, wrapError(ErrParsingRequestFailed, "", errReadJSON), false)
return
}
req := new(proto.GetLatestVerificationCodesRequest)
latestVerificationCodes := make([]*proto.SingleGetLatestVerificationCode, len(reqGetLatestVerificationCodes.SendInformation))
for idx, item := range reqGetLatestVerificationCodes.SendInformation {
protoSendVia, errmapRestSendViaToProto := mapRestSendViaToProto(item.SendVia)
if errmapRestSendViaToProto != nil {
writeError(ctx, wrapError(ErrInvalidValue, "", errmapRestSendViaToProto), false)
return
}
latestVerificationCodes[idx] = &proto.SingleGetLatestVerificationCode{
SendVia: protoSendVia,
Email: item.Email,
Phone: item.Phone,
NationCode: item.NationCode,
}
}
req.SendTo = latestVerificationCodes
resp, errGetLatestVerificationCodes := h.rpcSvc.GetLatestVerificationCodes(
newRPCContext(ctx), req,
)
if errGetLatestVerificationCodes != nil {
writeRpcInternalError(ctx, errGetLatestVerificationCodes, false)
return
}
LatestVerificationCodes := make([]LatestVerificationCode, len(resp.LatestVerificationCodes))
for idx, item := range resp.LatestVerificationCodes {
LatestVerificationCodes[idx].Email = item.Email
LatestVerificationCodes[idx].Phone = item.Phone
LatestVerificationCodes[idx].NationCode = item.NationCode
LatestVerificationCodes[idx].VerificationCode = item.VerificationCode
}
rest.WriteOkJSON(ctx, LatestVerificationCodes)
}
// mapRestSendViaToProto 将 rest 使用的 string 类型的 发送验证码的方式 send_via 转换为 proto 类型
func mapRestSendViaToProto(sendType string) (proto.SendVia, error) {
switch sendType {
case SendViaPhone:
return proto.SendVia_SEND_VIA_PHONE_SEND_VIA, nil
case SendViaEmail:
return proto.SendVia_SEND_VIA_USERNAME_SEND_VIA, nil
}
return proto.SendVia_SEND_VIA_INVALID, fmt.Errorf("invalid string send via %s", sendType)
}
|
// Widget use to display single item details.
//
// @author TSS
package gui
import (
"fmt"
"time"
"github.com/jroimartin/gocui"
"github.com/mashmb/1pass/1pass-core/core/domain"
)
const (
detailsHelp string = `Scroll up/down: k/j
Scroll left/right: h/l
Cover item: TAB`
)
type detailsWidget struct {
name string
title string
parent string
lockHandler func(ui *gocui.Gui, view *gocui.View) error
helpWidget *helpWidget
item *domain.Item
}
func newDetailsWidget(parent string, helpWidget *helpWidget, lockHandler func(ui *gocui.Gui, view *gocui.View) error) *detailsWidget {
return &detailsWidget{
name: "detailsWidget",
title: "Details",
parent: parent,
lockHandler: lockHandler,
helpWidget: helpWidget,
}
}
func (dw *detailsWidget) resetOrigin(view *gocui.View) error {
if view != nil {
ox, oy := view.Origin()
if ox > 0 {
if err := view.SetOrigin(0, oy); err != nil {
return err
}
}
if oy > 0 {
if err := view.SetOrigin(ox, 0); err != nil {
return err
}
}
}
return nil
}
func (dw *detailsWidget) scrollDown(ui *gocui.Gui, view *gocui.View) error {
if view != nil {
ox, oy := view.Origin()
if err := view.SetOrigin(ox, oy+1); err != nil {
return err
}
}
return nil
}
func (dw *detailsWidget) scrollLeft(ui *gocui.Gui, view *gocui.View) error {
if view != nil {
ox, oy := view.Origin()
if ox > 0 {
if err := view.SetOrigin(ox-1, oy); err != nil {
return err
}
}
}
return nil
}
func (dw *detailsWidget) scrollRight(ui *gocui.Gui, view *gocui.View) error {
if view != nil {
ox, oy := view.Origin()
if err := view.SetOrigin(ox+1, oy); err != nil {
return err
}
}
return nil
}
func (dw *detailsWidget) scrollUp(ui *gocui.Gui, view *gocui.View) error {
if view != nil {
ox, oy := view.Origin()
if oy > 0 {
if err := view.SetOrigin(ox, oy-1); err != nil {
return err
}
}
}
return nil
}
func (dw *detailsWidget) toggleDetails(ui *gocui.Gui, view *gocui.View) error {
if err := dw.update(true, ui); err != nil {
return err
}
if _, err := ui.SetCurrentView(dw.parent); err != nil {
return err
}
dw.helpWidget.help = itemsHelp
if err := dw.helpWidget.update(ui); err != nil {
return err
}
return nil
}
func (dw *detailsWidget) update(overview bool, ui *gocui.Gui) error {
view, err := ui.View(dw.name)
if err != nil {
return err
}
view.Clear()
if err := dw.resetOrigin(view); err != nil {
return err
}
if dw.item != nil {
fmt.Fprint(view, fmt.Sprintf("%v\n", dw.item.Category.GetName()))
fmt.Fprint(view, "------------------------------\n")
fmt.Fprint(view, fmt.Sprintf("%v\n\n", dw.item.Title))
updated := time.Unix(dw.item.Updated, 0).Format("2006-01-02 15:04:05")
created := time.Unix(dw.item.Created, 0).Format("2006-01-02 15:04:05")
fmt.Fprint(view, fmt.Sprintf("Updated: %v\nCreated: %v\nTrashed: %v\n", updated, created, dw.item.Trashed))
if dw.item.Url != "" {
fmt.Fprint(view, fmt.Sprintf("URL: %v\n", dw.item.Url))
}
if overview {
if dw.item.Sections != nil {
for _, section := range dw.item.Sections {
fmt.Fprint(view, "\n")
if section.Title != "" {
fmt.Fprint(view, fmt.Sprintf("%v\n", section.Title))
}
fmt.Fprint(view, "------------------------------\n")
if section.Fields != nil {
for _, field := range section.Fields {
fmt.Fprint(view, fmt.Sprintf("%v: %v\n", field.Name, "**********"))
}
}
}
fmt.Fprint(view, "\n")
}
if dw.item.Notes != "" {
if dw.item.Sections == nil {
fmt.Fprint(view, "\n")
}
fmt.Fprint(view, "Notes\n")
fmt.Fprint(view, "------------------------------\n")
fmt.Fprint(view, "**********\n")
}
} else {
if dw.item.Sections != nil {
for _, section := range dw.item.Sections {
fmt.Fprint(view, "\n")
if section.Title != "" {
fmt.Fprint(view, fmt.Sprintf("%v\n", section.Title))
}
fmt.Fprint(view, "------------------------------\n")
if section.Fields != nil {
for _, field := range section.Fields {
fmt.Fprint(view, fmt.Sprintf("%v: %v\n", field.Name, field.Value))
}
}
}
fmt.Fprint(view, "\n")
}
if dw.item.Notes != "" {
if dw.item.Sections == nil {
fmt.Fprint(view, "\n")
}
fmt.Fprint(view, "Notes\n")
fmt.Fprint(view, "------------------------------\n")
fmt.Fprint(view, fmt.Sprintf("%v\n", dw.item.Notes))
}
}
}
return nil
}
func (dw *detailsWidget) Keybindings(ui *gocui.Gui) error {
if err := ui.SetKeybinding(dw.name, gocui.KeyCtrlL, gocui.ModNone, dw.lockHandler); err != nil {
return err
}
if err := ui.SetKeybinding(dw.name, 'j', gocui.ModNone, dw.scrollDown); err != nil {
return err
}
if err := ui.SetKeybinding(dw.name, gocui.KeyArrowDown, gocui.ModNone, dw.scrollDown); err != nil {
return err
}
if err := ui.SetKeybinding(dw.name, 'k', gocui.ModNone, dw.scrollUp); err != nil {
return err
}
if err := ui.SetKeybinding(dw.name, gocui.KeyArrowUp, gocui.ModNone, dw.scrollUp); err != nil {
return err
}
if err := ui.SetKeybinding(dw.name, 'l', gocui.ModNone, dw.scrollRight); err != nil {
return err
}
if err := ui.SetKeybinding(dw.name, gocui.KeyArrowRight, gocui.ModNone, dw.scrollRight); err != nil {
return err
}
if err := ui.SetKeybinding(dw.name, 'h', gocui.ModNone, dw.scrollLeft); err != nil {
return err
}
if err := ui.SetKeybinding(dw.name, gocui.KeyArrowLeft, gocui.ModNone, dw.scrollLeft); err != nil {
return err
}
if err := ui.SetKeybinding(dw.name, gocui.KeyTab, gocui.ModNone, dw.toggleDetails); err != nil {
return err
}
return nil
}
func (dw *detailsWidget) Layout(ui *gocui.Gui) error {
maxX, maxY := ui.Size()
if view, err := ui.SetView(dw.name, int(0.5*float32(maxX-2)+1), 4, maxX-2, maxY-5); err != nil {
if err != gocui.ErrUnknownView {
return err
}
ui.Highlight = true
ui.SelFgColor = gocui.ColorBlue
view.Title = dw.title
}
return nil
}
|
package leetcode
/*Given an integer array arr. You have to sort the integers in the array in ascending order by the number of 1's in their binary representation and in case of two or more integers have the same number of 1's you have to sort them in ascending order.
Return the sorted array.
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/sort-integers-by-the-number-of-1-bits
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。*/
import "sort"
type Ints []int
func sortByBits(arr []int) []int {
var Arr Ints
Arr = arr
sort.Sort(Arr)
return arr
}
func (this Ints) Len() int {
return len(this)
}
func (this Ints) Less(i, j int) bool {
bitsI, bitsJ := countBits(this[i]), countBits(this[j])
if bitsI < bitsJ {
return true
} else if bitsI > bitsJ {
return false
} else {
return this[i] < this[j]
}
}
func (this Ints) Swap(i, j int) {
this[i], this[j] = this[j], this[i]
}
func countBits(n int) int {
rlt := 0
for n != 0 {
if n&0x1 == 1 {
rlt++
}
n = n >> 1
}
return rlt
}
|
package main
import "fmt"
type Person struct {
First string
Last string
Age int
}
type Gamer struct {
Person
First string
Plays int
FavoriteGame string
}
type BoardGamer struct {
Person
Gamer
First string
}
func main() {
b := BoardGamer{}
b.First = "BoardGamer Joe"
b.Gamer.First = "Gamer Joe"
b.Person.First = "Person Joe"
b.Last = "smith"
b.Age = 42
b.Plays = 100
b.FavoriteGame = "LotR"
fmt.Println(b)
fmt.Println(&b)
}
|
package main
import (
"github.com/streadway/amqp"
"log"
"os"
"sync"
)
type ConsumerHandler func(amqp.Delivery)
type Consumer struct {
uri string
conn *amqp.Connection
connLock *sync.Mutex
handLock *sync.Mutex
wg *sync.WaitGroup
done chan bool
closes []chan error
}
type ConsumerSubscribeOptions struct {
Queue string
Exchange string
RoutingKey string
Type string
EDurable bool
EAutoDelete bool
QDurable bool
QAutoDelete bool
QExclusive bool
Ack bool
}
func NewConsumer() *Consumer {
uri := os.Getenv("RABBITMQ_URL")
if uri == "" {
uri = "amqp://guest:guest@localhost"
}
consumer := &Consumer{
uri: uri,
connLock: &sync.Mutex{},
handLock: &sync.Mutex{},
wg: new(sync.WaitGroup),
}
return consumer
}
func (self *Consumer) Open() error {
self.connLock.Lock()
defer self.connLock.Unlock()
var err error
if self.conn != nil {
return nil
}
if self.conn, err = amqp.Dial(self.uri); err != nil {
self.conn = nil
return err
}
go func() {
err := <-self.conn.NotifyClose(make(chan *amqp.Error))
self.handleError(err)
}()
self.done = make(chan bool)
log.Printf("[info ] [amqp] successfuly connected")
return nil
}
func (self *Consumer) Close() error {
defer self.connLock.Unlock()
self.connLock.Lock()
if self.conn == nil {
return nil
}
if err := self.conn.Close(); err != nil {
return err
}
self.conn = nil
self.done = nil
log.Printf("[info ] [amqp] connection closed")
return nil
}
func (self *Consumer) GracefulShutdown() error {
log.Printf("[warn ] [amqp] process graceful shutdown")
close(self.done)
self.wg.Wait()
if err := self.Close(); err != nil {
return err
}
log.Printf("[warn ] [amqp] shutdown complete")
return nil
}
func (self *Consumer) Channel() (*amqp.Channel, error) {
return self.conn.Channel()
}
func (self *Consumer) NewSubscriber(fn ConsumerHandler, o ConsumerSubscribeOptions) error {
ch, err := self.Channel()
if err != nil {
return err
}
err = ch.ExchangeDeclare(
o.Exchange, // name of the exchange
o.Type, // type
o.EDurable, // durable
o.EAutoDelete, // delete when complete
false, // internal
false, // noWait
nil, // arguments
)
if err != nil {
ch.Close()
return err
}
q, err := ch.QueueDeclare(
o.Queue, // name of the queue
o.QDurable, // durable
o.QAutoDelete, // delete when usused
o.QExclusive, // exclusive
false, // noWait
nil, // arguments
)
if err != nil {
ch.Close()
return err
}
log.Printf(
"[info ] [amqp] binding %s to %s using [%s]",
q.Name, o.Exchange, o.RoutingKey,
)
err = ch.QueueBind(
q.Name, // name of the queue
o.RoutingKey, // bindingKey
o.Exchange, // sourceExchange
false, // noWait
nil, // arguments
)
if err != nil {
ch.Close()
return err
}
log.Printf("[info ] [amqp] consume %s", q.Name)
messages, err := ch.Consume(
q.Name, // name
"", // consumerTag,
!o.Ack, // auto ack
false, // exclusive
false, // noLocal
false, // noWait
nil, // arguments
)
if err != nil {
ch.Close()
return err
}
self.wg.Add(1)
go self.handle(ch, fn, o.Ack, messages)
return nil
}
func (self *Consumer) handleError(err *amqp.Error) {
if err != nil {
log.Printf("[error] [amqp] %s", err)
for _, c := range self.closes {
c <- err
}
}
}
func (self *Consumer) NotifyError(c chan error) chan error {
self.handLock.Lock()
defer self.handLock.Unlock()
self.closes = append(self.closes, c)
return c
}
func (self *Consumer) handle(ch *amqp.Channel, fn ConsumerHandler, ack bool, messages <-chan amqp.Delivery) {
defer ch.Close()
defer self.wg.Done()
for {
select {
case message, ok := <-messages:
// connection level error
if !ok {
return
}
fn(message)
if ack {
message.Ack(false)
}
case <-self.done:
// gracefull shutdown
return
}
}
}
/*
func (c *Consumer) Publish(msg amqp.Publishing) error {
err := c.pubCh.Publish(c.Exchange, c.RoutingKey, false, false, msg)
if err != nil {
return err
}
return nil
}
*/
|
package resolver
import (
"github.com/taktakty/netlabi/models"
genModels "github.com/taktakty/netlabi/models/generated"
"context"
)
func (r *queryResolver) GetRack(ctx context.Context, input genModels.GetIDInput) (*models.Rack, error) {
var rack models.Rack
rack.ID = input.ID
if err := db.First(&rack).Error; err != nil {
return &rack, err
}
return &rack, nil
}
func (r *queryResolver) GetRacks(ctx context.Context, input genModels.SearchRackInput) ([]*models.Rack, error) {
var racks []*models.Rack
tx := db
if input.Name != nil && *input.Name != "" {
tx = tx.Where("name LIKE ?", "%"+*input.Name+"%")
}
if input.Status != nil && *input.Status != 0 {
tx = tx.Where("status = ?", *input.Status)
}
if input.SiteID != nil && *input.SiteID != "" {
tx = tx.Where("site_id = ?", *input.SiteID)
}
if input.SiteName != nil && *input.SiteName != "" {
var sites []*models.Site
if err := db.Select("id").Where("name Like ?", "%"+*input.SiteName+"%").Find(&sites).Error; err != nil {
return racks, err
}
var ids []models.ID
for _, site := range sites {
ids = append(ids, site.ID)
}
tx = tx.Where("site_id IN (?)", ids)
}
if err := tx.Find(&racks).Error; err != nil {
return racks, err
}
return racks, nil
}
type rackResolver struct{ *Resolver }
func (r *rackResolver) Site(ctx context.Context, obj *models.Rack) (*models.Site, error) {
return models.CtxLoaders(ctx).SiteLoader.Load(obj.SiteID)
}
func (r *rackResolver) Devices(ctx context.Context, obj *models.Rack) ([]*models.Device, error) {
return models.CtxLoaders(ctx).DeviceByRackSliceLoader.Load(obj.ID)
}
|
package sessionmanager
import (
"context"
"sync"
cid "gx/ipfs/QmR8BauakNcBa3RbE4nbQu76PDiJgoQgz8AJdhJuiU4TAw/go-cid"
blocks "gx/ipfs/QmWoXtvgC8inqFkAATB7cp2Dax7XBi9VDvSg9RCCZufmRk/go-block-format"
exchange "gx/ipfs/QmP2g3VxmC7g7fyRJDj1VJ72KHZbJ9UW24YjSWEj1XTb4H/go-ipfs-exchange-interface"
peer "gx/ipfs/QmPJxxDsX2UbchSHobbYuvz7qnyJTFKvaKMzE2rZWJ4x5B/go-libp2p-peer"
bssession "gx/ipfs/QmYJ48z7NEzo3u2yCvUvNtBQ7wJWd5dX2nxxc7FeA6nHq1/go-bitswap/session"
)
// Session is a session that is managed by the session manager
type Session interface {
exchange.Fetcher
InterestedIn(cid.Cid) bool
ReceiveBlockFrom(peer.ID, blocks.Block)
UpdateReceiveCounters(blocks.Block)
}
type sesTrk struct {
session Session
pm bssession.PeerManager
srs bssession.RequestSplitter
}
// SessionFactory generates a new session for the SessionManager to track.
type SessionFactory func(ctx context.Context, id uint64, pm bssession.PeerManager, srs bssession.RequestSplitter) Session
// RequestSplitterFactory generates a new request splitter for a session.
type RequestSplitterFactory func(ctx context.Context) bssession.RequestSplitter
// PeerManagerFactory generates a new peer manager for a session.
type PeerManagerFactory func(ctx context.Context, id uint64) bssession.PeerManager
// SessionManager is responsible for creating, managing, and dispatching to
// sessions.
type SessionManager struct {
ctx context.Context
sessionFactory SessionFactory
peerManagerFactory PeerManagerFactory
requestSplitterFactory RequestSplitterFactory
// Sessions
sessLk sync.Mutex
sessions []sesTrk
// Session Index
sessIDLk sync.Mutex
sessID uint64
}
// New creates a new SessionManager.
func New(ctx context.Context, sessionFactory SessionFactory, peerManagerFactory PeerManagerFactory, requestSplitterFactory RequestSplitterFactory) *SessionManager {
return &SessionManager{
ctx: ctx,
sessionFactory: sessionFactory,
peerManagerFactory: peerManagerFactory,
requestSplitterFactory: requestSplitterFactory,
}
}
// NewSession initializes a session with the given context, and adds to the
// session manager.
func (sm *SessionManager) NewSession(ctx context.Context) exchange.Fetcher {
id := sm.GetNextSessionID()
sessionctx, cancel := context.WithCancel(ctx)
pm := sm.peerManagerFactory(sessionctx, id)
srs := sm.requestSplitterFactory(sessionctx)
session := sm.sessionFactory(sessionctx, id, pm, srs)
tracked := sesTrk{session, pm, srs}
sm.sessLk.Lock()
sm.sessions = append(sm.sessions, tracked)
sm.sessLk.Unlock()
go func() {
defer cancel()
select {
case <-sm.ctx.Done():
sm.removeSession(tracked)
case <-ctx.Done():
sm.removeSession(tracked)
}
}()
return session
}
func (sm *SessionManager) removeSession(session sesTrk) {
sm.sessLk.Lock()
defer sm.sessLk.Unlock()
for i := 0; i < len(sm.sessions); i++ {
if sm.sessions[i] == session {
sm.sessions[i] = sm.sessions[len(sm.sessions)-1]
sm.sessions = sm.sessions[:len(sm.sessions)-1]
return
}
}
}
// GetNextSessionID returns the next sequentional identifier for a session.
func (sm *SessionManager) GetNextSessionID() uint64 {
sm.sessIDLk.Lock()
defer sm.sessIDLk.Unlock()
sm.sessID++
return sm.sessID
}
// ReceiveBlockFrom receives a block from a peer and dispatches to interested
// sessions.
func (sm *SessionManager) ReceiveBlockFrom(from peer.ID, blk blocks.Block) {
sm.sessLk.Lock()
defer sm.sessLk.Unlock()
k := blk.Cid()
for _, s := range sm.sessions {
if s.session.InterestedIn(k) {
s.session.ReceiveBlockFrom(from, blk)
}
}
}
// UpdateReceiveCounters records the fact that a block was received, allowing
// sessions to track duplicates
func (sm *SessionManager) UpdateReceiveCounters(blk blocks.Block) {
sm.sessLk.Lock()
defer sm.sessLk.Unlock()
for _, s := range sm.sessions {
s.session.UpdateReceiveCounters(blk)
}
}
|
package main
import "fmt"
const (
// SatoshiPerBitcent is the number of satoshi in one bitcoin cent.
SatoshiPerBitcent = 1e6
// SatoshiPerBitcoin is the number of satoshi in one bitcoin (1 BTC).
SatoshiPerBitcoin = 1e8
// MaxSatoshi is the maximum transaction amount allowed in satoshi.
MaxSatoshi = 21e6 * SatoshiPerBitcoin
)
func main() {
fmt.Println(SatoshiPerBitcent == 1000000)
}
|
// main
package main
import (
"flag"
"fmt"
"log"
"os"
"os/signal"
"sync"
"syscall"
"time"
nsq "github.com/bitly/go-nsq"
"labix.org/v2/mgo"
"labix.org/v2/mgo/bson"
)
const updateDuration = 5 * time.Second
var (
fatalErr error
counts map[string]int
countsLock sync.Mutex
)
func fatal(e error) {
fmt.Println(e)
flag.PrintDefaults()
fatalErr = e
}
func main() {
defer func() {
if fatalErr != nil {
os.Exit(1)
}
}()
//connect db
log.Println("Connecting to database...")
db, err := mgo.Dial("localhost")
if err != nil {
fatal(err)
return
}
defer func() {
log.Println("Closing database connection...")
db.Close()
}()
pollData := db.DB("ballots").C("polls")
//connect nsq
log.Println("Connecting to nsq...")
q, err := nsq.NewConsumer("votes", "counter", nsq.NewConfig())
if err != nil {
fatal(err)
return
}
q.AddHandler(nsq.HandlerFunc(func(m *nsq.Message) error {
countsLock.Lock()
defer countsLock.Unlock()
if counts == nil {
counts = make(map[string]int)
}
vote := string(m.Body)
counts[vote]++
log.Println("getting vote from nsq: ", counts)
return nil
}))
if err := q.ConnectToNSQLookupd("localhost:4161"); err != nil {
fatal(err)
return
}
//update db
log.Println("Waiting for votes on nsq...")
// var updater *time.Timer
// updater = time.AfterFunc(updateDuration, func() {
// countsLock.Lock()
// defer countsLock.Unlock()
// if len(counts) == 0 {
// log.Println("No new votes, skipping database update.")
// } else {
// log.Println("Updating database....")
// log.Println(counts)
// ok := true
// for option, count := range counts {
// sel := bson.M{"options": bson.M{"$in": []string{option}}}
// up := bson.M{"$inc": bson.M{"results." + option: count}}
// if _, err := pollData.UpdateAll(sel, up); err != nil {
// log.Println("failed to update: ", err)
// ok = false
// }
// }
// if ok {
// log.Println("Finished updating database...")
// counts = nil //reset counts
// }
// }
// updater.Reset(updateDuration)
// })
var updaterStopCh = make(chan struct{}, 1)
go func() {
var ticker = time.NewTicker(updateDuration)
for {
select {
case <-ticker.C:
case <-updaterStopCh:
log.Println("Stopping updater...")
ticker.Stop()
return
}
func() {
countsLock.Lock()
defer countsLock.Unlock()
if len(counts) == 0 {
log.Println("No new votes, skipping database update.")
} else {
log.Println("Updating database....")
log.Println(counts)
ok := true
for option, count := range counts {
sel := bson.M{"options": bson.M{"$in": []string{option}}}
up := bson.M{"$inc": bson.M{"results." + option: count}}
if _, err := pollData.UpdateAll(sel, up); err != nil {
log.Println("failed to update: ", err)
ok = false
}
}
if ok {
log.Println("Finished updating database...")
counts = nil //reset counts
log.Println("Clearing counter: ", counts)
}
}
}()
}
}()
termChan := make(chan os.Signal, 1)
signal.Notify(termChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
for {
select {
case <-termChan:
//updater.Stop()
close(updaterStopCh) //this get same result as updaterStopCh <- struct{}{}
q.Stop() //this actually, q send signal to q.stopChan, so in case below if <-q.stoChan then program exit
case <-q.StopChan:
//finished
return
}
}
}
|
package main
import (
"bufio"
"fmt"
"os"
"regexp"
"strconv"
"strings"
)
type BagProperty struct {
name string
num int
}
var graph = make(map[string][]BagProperty, 0)
var visited = make(map[string]bool, 0)
func buildGraph() map[string][]BagProperty {
f, _ := os.Open("input.txt")
defer f.Close()
re := regexp.MustCompile(`([0-9]+) ([a-z]+ [a-z]+ bag)`)
scanner := bufio.NewScanner(f)
for scanner.Scan() {
split := strings.Split(scanner.Text(), " ")
node := strings.Join(split[0:3], " ")
node = node[:len(node)-1]
visited[node] = false
graph[node] = []BagProperty{}
parts := re.FindAllStringSubmatch(scanner.Text(), -1)
for i := 0; i < len(parts); i++ {
numBags, _ := strconv.Atoi(parts[i][1])
bagName := parts[i][2]
graph[node] = append(graph[node], BagProperty{bagName, numBags})
}
}
return graph
}
func dfs(node string) bool {
if node == "shiny gold bag" {
return true
}
found := false
visited[node] = true
edges := graph[node]
for i := 0; i < len(edges); i++ {
edgeNode := edges[i].name
if !visited[edgeNode] {
found = dfs(edgeNode)
if found {
return true
}
}
}
return false
}
func doPartOne() int {
buildGraph()
ans := 0
for k, _ := range graph {
found := dfs(k)
if found {
ans++
}
for key, _ := range visited {
visited[key] = false
}
}
return ans - 1
}
func countBags(node string) int {
visited[node] = true
edges := graph[node]
numBags := 0
for i := 0; i < len(edges); i++ {
edgeNode := edges[i].name
numBags += edges[i].num + edges[i].num*countBags(edgeNode)
}
return numBags
}
func doPartTwo() int {
buildGraph()
ans := countBags("shiny gold bag")
return ans
}
func main() {
fmt.Println(doPartOne())
// Reset graph state
graph = make(map[string][]BagProperty, 0)
visited = make(map[string]bool, 0)
fmt.Println(doPartTwo())
}
|
// Copyright 2017 Mirantis
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package utils
import (
"github.com/golang/glog"
"github.com/Mirantis/k8s-netchecker-server/pkg/extensions"
"k8s.io/apimachinery/pkg/api/errors"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api"
"k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/pkg/apis/extensions/v1beta1"
"k8s.io/client-go/rest"
)
const AgentLabelKey = "app"
var AgentLabelValues = []string{"netchecker-agent", "netchecker-agent-hostnet"}
type Proxy interface {
Pods() (*v1.PodList, error)
}
type KubeProxy struct {
Client kubernetes.Interface
}
func (kp *KubeProxy) SetupClientSet() error {
config, err := rest.InClusterConfig()
if err != nil {
return err
}
clientSet, err := kubernetes.NewForConfig(config)
if err != nil {
return err
}
kp.Client = clientSet
return nil
}
func (kp *KubeProxy) initThirdParty() error {
tpr, err := kp.Client.ExtensionsV1beta1().ThirdPartyResources().Get("agent.network-checker.ext", meta_v1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
tpr := &v1beta1.ThirdPartyResource{
ObjectMeta: meta_v1.ObjectMeta{
Name: "agent.network-checker.ext",
},
Versions: []v1beta1.APIVersion{
{Name: "v1"},
},
Description: "Agent ThirdPartyResource",
}
result, err := kp.Client.ExtensionsV1beta1().ThirdPartyResources().Create(tpr)
if err != nil {
return err
}
glog.V(5).Infof("CREATED: %#v\nFROM: %#v\n", result, tpr)
} else {
return err
}
} else {
glog.V(5).Infof("SKIPPING: already exists %#v\n", tpr)
}
return err
}
func configureClient(config *rest.Config) {
groupversion := schema.GroupVersion{
Group: "network-checker.ext",
Version: "v1",
}
config.GroupVersion = &groupversion
config.APIPath = "/apis"
config.ContentType = runtime.ContentTypeJSON
config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: api.Codecs}
schemeBuilder := runtime.NewSchemeBuilder(
func(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(
groupversion,
&extensions.Agent{},
&extensions.AgentList{},
)
return nil
})
meta_v1.AddToGroupVersion(api.Scheme, groupversion)
schemeBuilder.AddToScheme(api.Scheme)
}
func (kp *KubeProxy) Pods() (*v1.PodList, error) {
requirement, err := labels.NewRequirement(AgentLabelKey, selection.In, AgentLabelValues)
if err != nil {
return nil, err
}
glog.V(10).Infof("Selector for kubernetes pods: %v", requirement.String())
pods, err := kp.Client.Core().Pods("").List(meta_v1.ListOptions{LabelSelector: requirement.String()})
return pods, err
}
|
package chain
import (
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/btcutil/gcs"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/lightninglabs/neutrino"
"github.com/lightninglabs/neutrino/banman"
"github.com/lightninglabs/neutrino/headerfs"
)
// NeutrinoChainService is an interface that encapsulates all the public
// methods of a *neutrino.ChainService
type NeutrinoChainService interface {
Start() error
GetBlock(chainhash.Hash, ...neutrino.QueryOption) (*btcutil.Block, error)
GetBlockHeight(*chainhash.Hash) (int32, error)
BestBlock() (*headerfs.BlockStamp, error)
GetBlockHash(int64) (*chainhash.Hash, error)
GetBlockHeader(*chainhash.Hash) (*wire.BlockHeader, error)
IsCurrent() bool
SendTransaction(*wire.MsgTx) error
GetCFilter(chainhash.Hash, wire.FilterType,
...neutrino.QueryOption) (*gcs.Filter, error)
GetUtxo(...neutrino.RescanOption) (*neutrino.SpendReport, error)
BanPeer(string, banman.Reason) error
IsBanned(addr string) bool
AddPeer(*neutrino.ServerPeer)
AddBytesSent(uint64)
AddBytesReceived(uint64)
NetTotals() (uint64, uint64)
UpdatePeerHeights(*chainhash.Hash, int32, *neutrino.ServerPeer)
ChainParams() chaincfg.Params
Stop() error
PeerByAddr(string) *neutrino.ServerPeer
}
var _ NeutrinoChainService = (*neutrino.ChainService)(nil)
|
package configuration
import (
"labix.org/v2/mgo"
"log"
"os"
)
const ERROR_MESSAGE = "Erreur lors du dialogue avec la base de donnée : %s"
func GetAnnouncementCollection() (*mgo.Collection, *mgo.Session) {
db := GetConfiguration().GetDatabase()
session, err := mgo.Dial(os.Getenv("MONGOLAB_URI"))
if err != nil {
log.Printf(ERROR_MESSAGE, err.Error())
}
database := session.DB(db.Name)
collection := database.C("announcement")
return collection, session
}
func GetAccountCollection() (*mgo.Collection, *mgo.Session) {
db := GetConfiguration().GetDatabase()
session, err := mgo.Dial(os.Getenv("MONGOLAB_URI"))
if err != nil {
log.Printf(ERROR_MESSAGE, err.Error())
return nil, nil
}
database := session.DB(db.Name)
collection := database.C("account")
return collection, session
}
func GetUserCollection() *mgo.Collection {
db := GetConfiguration().GetDatabase()
session, err := mgo.Dial(os.Getenv("MONGOLAB_URI"))
if err != nil {
log.Printf("Erreur lors du dialogue avec la base de donnée : ", err)
}
database := session.DB(db.Name)
collection := database.C("users")
return collection
}
func GetSpecyCollection() (*mgo.Collection, *mgo.Session) {
db := GetConfiguration().GetDatabase()
session, err := mgo.Dial(os.Getenv("MONGOLAB_URI"))
if err != nil {
log.Panicf(ERROR_MESSAGE, err.Error())
}
database := session.DB(db.Name)
collection := database.C("species")
return collection, session
}
|
package categories
import (
"fmt"
"sort"
"strings"
"github.com/Nv7-Github/Nv7Haven/eod/base"
"github.com/Nv7-Github/Nv7Haven/eod/types"
"github.com/Nv7-Github/Nv7Haven/eod/util"
)
type catSortInfo struct {
Name string
Cnt int
}
func (b *Categories) CatCmd(category string, sortKind string, hasUser bool, user string, m types.Msg, rsp types.Rsp) {
b.lock.RLock()
dat, exists := b.dat[m.GuildID]
b.lock.RUnlock()
if !exists {
return
}
category = strings.TrimSpace(category)
if base.IsFoolsMode && !base.IsFool(category) {
rsp.ErrorMessage(base.MakeFoolResp(category))
return
}
id := m.Author.ID
if hasUser {
id = user
}
inv, res := dat.GetInv(id, !hasUser)
if !res.Exists {
rsp.ErrorMessage(res.Message)
return
}
cat, res := dat.GetCategory(category)
if !res.Exists {
rsp.ErrorMessage(res.Message)
return
}
category = cat.Name
out := make([]struct {
found int
text string
name string
}, len(cat.Elements))
found := 0
i := 0
fnd := 0
var text string
for name := range cat.Elements {
exists := inv.Elements.Contains(name)
if exists {
text = name + " " + types.Check
found++
fnd = 1
} else {
text = name + " " + types.X
fnd = 0
}
out[i] = struct {
found int
text string
name string
}{
found: fnd,
text: text,
name: name,
}
i++
}
var o []string
switch sortKind {
case "catfound":
sort.Slice(out, func(i, j int) bool {
return out[i].found > out[j].found
})
case "catnotfound":
sort.Slice(out, func(i, j int) bool {
return out[i].found < out[j].found
})
case "catelemcount":
rsp.ErrorMessage("Invalid sort!")
return
default:
util.SortElemObj(out, len(out), func(index int, sort bool) string {
if sort {
return out[index].name
}
return out[index].text
}, func(index int, val string) {
out[index].text = val
}, sortKind, dat)
}
o = make([]string, len(out))
for i, val := range out {
o[i] = val.text
}
b.base.NewPageSwitcher(types.PageSwitcher{
Kind: types.PageSwitchInv,
Thumbnail: cat.Image,
Title: fmt.Sprintf("%s (%d, %s%%)", category, len(out), util.FormatFloat(float32(found)/float32(len(out))*100, 2)),
PageGetter: b.base.InvPageGetter,
Items: o,
Color: cat.Color,
}, m, rsp)
}
type catData struct {
text string
name string
found float32
count int
}
func (b *Categories) AllCatCmd(sortBy string, hasUser bool, user string, m types.Msg, rsp types.Rsp) {
b.lock.RLock()
dat, exists := b.dat[m.GuildID]
b.lock.RUnlock()
if !exists {
return
}
id := m.Author.ID
if hasUser {
id = user
}
inv, res := dat.GetInv(id, !hasUser)
if !res.Exists {
rsp.ErrorMessage(res.Message)
return
}
dat.Lock.RLock()
out := make([]catData, len(dat.Categories))
i := 0
for _, cat := range dat.Categories {
count := 0
for elem := range cat.Elements {
exists := inv.Elements.Contains(elem)
if exists {
count++
}
}
perc := float32(count) / float32(len(cat.Elements))
text := "(" + util.FormatFloat(perc*100, 2) + "%)"
if count == len(cat.Elements) {
text = types.Check
}
out[i] = catData{
text: fmt.Sprintf("%s %s", cat.Name, text),
name: cat.Name,
found: perc,
count: len(cat.Elements),
}
i++
}
dat.Lock.RUnlock()
switch sortBy {
case "catfound":
sort.Slice(out, func(i, j int) bool {
return out[i].found > out[j].found
})
case "catnotfound":
sort.Slice(out, func(i, j int) bool {
return out[i].found < out[j].found
})
case "catelemcount":
sort.Slice(out, func(i, j int) bool {
return out[i].count > out[j].count
})
default:
sort.Slice(out, func(i, j int) bool {
return util.CompareStrings(out[i].name, out[j].name)
})
}
names := make([]string, len(out))
for i, dat := range out {
names[i] = dat.text
}
b.base.NewPageSwitcher(types.PageSwitcher{
Kind: types.PageSwitchInv,
Title: fmt.Sprintf("All Categories (%d)", len(out)),
PageGetter: b.base.InvPageGetter,
Items: names,
}, m, rsp)
}
|
package utils
import (
"net/url"
"path"
"strings"
)
// URLPathFullClean returns a URL path with the query parameters appended (full path) with the path portion parsed
// through path.Clean given a *url.URL.
func URLPathFullClean(u *url.URL) (output string) {
lengthPath := len(u.Path)
lengthQuery := len(u.RawQuery)
appendForwardSlash := lengthPath > 1 && u.Path[lengthPath-1] == '/'
switch {
case lengthPath == 1 && lengthQuery == 0:
return u.Path
case lengthPath == 1:
return path.Clean(u.Path) + "?" + u.RawQuery
case lengthQuery != 0 && appendForwardSlash:
return path.Clean(u.Path) + "/?" + u.RawQuery
case lengthQuery != 0:
return path.Clean(u.Path) + "?" + u.RawQuery
case appendForwardSlash:
return path.Clean(u.Path) + "/"
default:
return path.Clean(u.Path)
}
}
// IsURISafeRedirection returns true if the URI passes the IsURISecure and HasURIDomainSuffix, i.e. if the scheme is
// secure and the given URI has a hostname that is either exactly equal to the given domain or if it has a suffix of the
// domain prefixed with a period.
func IsURISafeRedirection(uri *url.URL, domain string) bool {
return IsURISecure(uri) && HasURIDomainSuffix(uri, domain)
}
// IsURISecure returns true if the URI has a secure schemes (https or wss).
func IsURISecure(uri *url.URL) bool {
switch uri.Scheme {
case https, wss:
return true
default:
return false
}
}
// HasURIDomainSuffix returns true if the URI hostname is equal to the domain suffix or if it has a suffix of the domain
// suffix prefixed with a period.
func HasURIDomainSuffix(uri *url.URL, domainSuffix string) bool {
return HasDomainSuffix(uri.Hostname(), domainSuffix)
}
// HasDomainSuffix returns true if the URI hostname is equal to the domain or if it has a suffix of the domain
// prefixed with a period.
func HasDomainSuffix(domain, domainSuffix string) bool {
if domainSuffix == "" {
return false
}
if domain == domainSuffix {
return true
}
if strings.HasSuffix(domain, period+domainSuffix) {
return true
}
return false
}
|
package xmlsec
// EncryptedData represents the <EncryptedData> XML tag. See
// https://www.w3.org/TR/2002/REC-xmlenc-core-20021210/Overview.html#sec-Usage
type EncryptedData struct {
XMLName string `xml:"http://www.w3.org/2001/04/xmlenc# EncryptedData"`
Type string `xml:",attr"`
EncryptionMethod Method `xml:"EncryptionMethod"`
KeyInfo KeyInfo `xml:"http://www.w3.org/2000/09/xmldsig# KeyInfo"`
CipherData CipherData `xml:"http://www.w3.org/2001/04/xmlenc# CipherData"`
}
// CipherData represents the <CipherData> tag.
type CipherData struct {
CipherValue string `xml:"CipherValue"`
}
// KeyInfo represents the <KeyInfo> tag.
type KeyInfo struct {
EncryptedKey EncryptedKey `xml:"http://www.w3.org/2001/04/xmlenc# EncryptedKey"`
}
// EncryptedKey represents the <EncryptedKey> XML element. See
// https://www.w3.org/TR/2002/REC-xmlenc-core-20021210/Overview.html#sec-EncryptedKey
type EncryptedKey struct {
EncryptionMethod Method `xml:"EncryptionMethod"`
KeyInfo struct {
X509Data string
} `xml:"http://www.w3.org/2000/09/xmldsig# KeyInfo"`
CipherData CipherData `xml:"http://www.w3.org/2001/04/xmlenc# CipherData"`
}
const (
defaultDataEncryptionMethodAlgorithm = "http://www.w3.org/2001/04/xmlenc#aes128-cbc"
defaultKeyEncryptionMethodAlgorithm = "http://www.w3.org/2001/04/xmlenc#rsa-oaep-mgf1p"
)
// NewEncryptedDataTemplate returns an EncryptedData object that uses the given
// data and key encryption algorithms.
func NewEncryptedDataTemplate(dataEncryptionMethodAlgorithm string, keyEncryptionMethodAlgorithm string) *EncryptedData {
return &EncryptedData{
Type: "http://www.w3.org/2001/04/xmlenc#Element",
EncryptionMethod: Method{
Algorithm: dataEncryptionMethodAlgorithm,
},
KeyInfo: KeyInfo{
EncryptedKey: EncryptedKey{
EncryptionMethod: Method{
Algorithm: keyEncryptionMethodAlgorithm,
},
},
},
}
}
|
package fingerprint
import "os"
// Fingerprinter defines operations for calculating fingerprints from audio files
type Fingerprinter interface {
// CalcFingerprint returns a list of fingerprints from an input path
CalcFingerprint(fPath string) ([]*Fingerprint, error)
}
// Fingerprint is an audio file fingerprint. The JSON structure allows the struct to
// parse the chromaprint fpcalc command when executed with the -json flag
type Fingerprint struct {
Duration float32 `json:"duration"`
Value string `json:"fingerprint"`
InputFile os.FileInfo `json:"-"`
}
|
package UI
const (
RUNEVT=1
)
|
package suffix
import (
"sort"
)
type Suffix interface {
DistinctSubCount() int
DistinctSub() [][]byte
SubCount() int
LongestRepeatedSubs() [][]byte
}
type array struct {
txt []byte
sa []int
lcp []int
}
func NewArray(txt []byte) Suffix {
a := &array{txt: txt}
a.sa = a.newArray()
a.lcp = a.newLcp()
return a
}
// LongestRepeatedSubs returns the longest
// repeated substring from a.txt
func (a *array) LongestRepeatedSubs() [][]byte {
var max, key int
lrs := make([][]byte, 0)
for k, v := range a.lcp {
if v > max {
max = v
key = k
}
}
lrs = append(lrs, a.txt[a.sa[key]:a.sa[key]+max])
for i := key + 1; i < len(a.lcp); i++ {
if a.lcp[i] == max {
lrs = append(lrs, a.txt[a.sa[i]:a.sa[i]+max])
}
}
return lrs
}
// DistinctSub returns all the distinct substrings of a.txt
func (a *array) DistinctSub() [][]byte {
n := a.DistinctSubCount()
dist := make([][]byte, n)
var x, k int
for i, n := range a.sa {
x = a.lcp[i] + n
for x < len(a.sa) {
dist[k] = a.txt[n : x+1]
k++
x += 1
}
}
return dist
}
// SubCount returns the total substring count in a.txt
func (a *array) SubCount() int {
return (len(a.txt) * (len(a.txt) + 1)) / 2
}
// DistinctSubCount returns the total distinct substrings in a.txt
func (a *array) DistinctSubCount() int {
var dup int
for _, v := range a.lcp {
dup += v
}
return a.SubCount() - dup
}
type suffix struct {
Index int
Rank [2]int
}
func (a *array) newArray() []int {
n := len(a.txt)
suffixes := make([]suffix, n)
for i := range a.txt {
suffixes[i].Index = i
suffixes[i].Rank[0] = int(a.txt[i]) - 'a'
if i+1 < n {
suffixes[i].Rank[1] = int(a.txt[i+1]) - 'a'
} else {
suffixes[i].Rank[1] = -1
}
}
sortFunc := func(i, j int) bool {
if suffixes[i].Rank[0] == suffixes[j].Rank[0] {
return suffixes[i].Rank[1] < suffixes[j].Rank[1]
}
return suffixes[i].Rank[0] < suffixes[j].Rank[0]
}
sort.Slice(suffixes, sortFunc)
ind := make([]int, n)
for k := 4; k < 2*n; k *= 2 {
var rank int
prev_rank := suffixes[0].Rank[0]
suffixes[0].Rank[0] = rank
ind[suffixes[0].Index] = 0
for i := 1; i < n; i++ {
if suffixes[i].Rank[0] == prev_rank &&
suffixes[i].Rank[1] == suffixes[i-1].Rank[1] {
prev_rank = suffixes[i].Rank[0]
suffixes[i].Rank[0] = rank
} else {
prev_rank = suffixes[i].Rank[0]
rank += 1
suffixes[i].Rank[0] = rank
}
ind[suffixes[i].Index] = i
}
for i := 0; i < n; i++ {
nextindex := suffixes[i].Index + k/2
if nextindex < n {
suffixes[i].Rank[1] = suffixes[ind[nextindex]].Rank[0]
} else {
suffixes[i].Rank[1] = -1
}
}
sort.Slice(suffixes, sortFunc)
}
suffixArr := make([]int, n)
for i := 0; i < n; i++ {
suffixArr[i] = suffixes[i].Index
}
return suffixArr
}
func (a *array) newLcp() []int {
n := len(a.sa)
lcp := make([]int, n)
var common, s1, s2 int
for i := 1; i < n; i++ {
s1 = a.sa[i-1]
s2 = a.sa[i]
for s1 < len(a.txt) && s2 < len(a.txt) && a.txt[s1] == a.txt[s2] {
common++
s1++
s2++
}
lcp[i] = common
common = 0
}
return lcp
}
|
package config
import (
"encoding/json"
"fmt"
"os"
)
const configPath = "config.json"
var (
config *Config
)
func init() {
file, err := os.Open(configPath)
if err != nil {
panic(err)
}
defer file.Close()
cfg := &Config{}
decoder := json.NewDecoder(file)
if err := decoder.Decode(cfg); err != nil {
panic(err)
}
config = cfg
}
type Config struct {
DB *DB `json:"db"`
Server *Server `json:"server"`
Logger *Logger `json:"logger"`
}
// DB config
type DB struct {
Host string `json:"host"`
User string `json:"user"`
Pass string `json:"pass"`
Name string `json:"name"`
}
func GetDB() *DB {
return config.DB
}
// Server config
type Server struct {
Addr string `json:"addr"`
}
func GetServer() *Server {
return config.Server
}
// Logger config
type Logger struct {
FileName string `json:"filename"`
Level int `json:"level"`
MaxLines int `json:"maxlines"`
MaxSize int `json:"maxsize"`
Daily bool `json:"daily"`
MaxDays int `json:"maxdays"`
Color bool `json:"color"`
}
func (l *Logger) String() string {
data, _ := json.Marshal(l)
return fmt.Sprintf("%s", data)
}
func GetLogger() *Logger {
return config.Logger
}
|
package context
import (
"MainApplication/config"
"MainApplication/internal/User/UserModel"
"context"
crypto "crypto/rand"
"errors"
"github.com/microcosm-cc/bluemonday"
"math/big"
"net/http"
"time"
)
const (
CookieName = "session_id"
CsrfCookieName = "token"
)
var UserFromContextError = errors.New("Could not get user from context!")
type userKey struct {
}
func GetUserFromCtx(ctx context.Context) (error, UserModel.User) {
ctxUser := ctx.Value(userKey{})
user, ok := ctxUser.(UserModel.User)
if !ok {
return UserFromContextError, UserModel.User{}
}
return nil, user
}
func SaveUserToContext(ctx context.Context, user UserModel.User) context.Context {
return context.WithValue(ctx, userKey{}, user)
}
func GenerateCSRF() string {
var token string
for i := 0; i < config.CsrfSize; i++ {
pos, _ := crypto.Int(crypto.Reader, big.NewInt(int64(len(config.SidRunes))))
token += string(config.SidRunes[pos.Int64()])
}
return token
}
func GetStrFormValueSafety(r *http.Request, field string) string {
xss:=r.FormValue(field)
p := bluemonday.UGCPolicy()
ok:=p.Sanitize(xss)
return ok
}
func CreateCsrfCookie() *http.Cookie {
cookie := &http.Cookie{
Name: CsrfCookieName,
Value: GenerateCSRF(),
Expires: time.Now().Add(15 * time.Minute),
}
cookie.Path = "/"
return cookie
}
|
package coinbase
import (
"errors"
"net/http"
"time"
"github.com/fabioberger/coinbase-go/config"
)
// ClientOAuthAuthentication Struct implements the Authentication interface
// and takes care of authenticating OAuth RPC requests on behalf of a client
// (i.e GetBalance())
type clientOAuthAuthentication struct {
Tokens *oauthTokens
BaseUrl string
Client http.Client
}
// ClientOAuth instantiates ClientOAuthAuthentication with the client OAuth tokens
func clientOAuth(tokens *oauthTokens) *clientOAuthAuthentication {
a := clientOAuthAuthentication{
Tokens: tokens,
BaseUrl: config.BaseUrl,
Client: http.Client{
Transport: &http.Transport{
Dial: dialTimeout,
},
},
}
return &a
}
// Client OAuth authentication requires us to attach an unexpired OAuth token to
// the request header
func (a clientOAuthAuthentication) authenticate(req *http.Request, endpoint string, params []byte) error {
// Ensure tokens havent expired
if time.Now().UTC().Unix() > a.Tokens.ExpireTime {
return errors.New("The OAuth tokens are expired. Use refreshTokens to refresh them")
}
req.Header.Set("Authorization", "Bearer "+a.Tokens.AccessToken)
return nil
}
func (a clientOAuthAuthentication) getBaseUrl() string {
return a.BaseUrl
}
func (a clientOAuthAuthentication) getClient() *http.Client {
return &a.Client
}
|
package main
import (
"bufio"
"encoding/binary"
"errors"
"flag"
"fmt"
"hash/crc32"
"io"
"log"
"net"
"os"
"os/signal"
"strings"
"sync"
"time"
"github.com/kawasin73/umutex"
)
const (
LInsert = 1 + iota
LDelete
LUpdate
LRead
LCommit
LAbort
)
var (
ErrExist = errors.New("record already exists")
ErrNotExist = errors.New("record not exists")
ErrBufferShort = errors.New("buffer size is not enough to deserialize")
ErrChecksum = errors.New("checksum does not match")
ErrDeadLock = errors.New("deadlock detected")
)
type Record struct {
Key string
Value []byte
}
func (r *Record) Serialize(buf []byte) (int, error) {
key := []byte(r.Key)
value := r.Value
total := 5 + len(key) + len(value)
// check buffer size
if len(buf) < total {
return 0, ErrBufferShort
}
// serialize
// TODO: support NULL value
buf[0] = uint8(len(key))
binary.BigEndian.PutUint32(buf[1:], uint32(len(r.Value)))
copy(buf[5:], key)
copy(buf[5+len(key):], r.Value)
return total, nil
}
func (r *Record) Deserialize(buf []byte) (int, error) {
if len(buf) < 5 {
return 0, ErrBufferShort
}
// parse length
keyLen := buf[0]
valueLen := binary.BigEndian.Uint32(buf[1:])
total := 5 + int(keyLen) + int(valueLen)
if len(buf) < total {
return 0, ErrBufferShort
}
// copy key and value from buffer
r.Key = string(buf[5 : 5+keyLen])
// TODO: support NULL value
r.Value = make([]byte, valueLen)
copy(r.Value, buf[5+keyLen:total])
return total, nil
}
type RecordLog struct {
Action uint8
Record
}
func (r *RecordLog) Serialize(buf []byte) (int, error) {
if len(buf) < 5 {
return 0, ErrBufferShort
}
buf[0] = r.Action
var total = 1
if r.Action > LRead {
// LCommit or LAbort
} else {
// serialize record content first (check buffer size)
n, err := r.Record.Serialize(buf[1:])
if err != nil {
return 0, err
}
total += n
}
if len(buf) < total+4 {
return 0, ErrBufferShort
}
// generate checksum
hash := crc32.NewIEEE()
if _, err := hash.Write(buf[:total]); err != nil {
return 0, err
}
binary.BigEndian.PutUint32(buf[total:], hash.Sum32())
return total + 4, nil
}
func (r *RecordLog) Deserialize(buf []byte) (int, error) {
if len(buf) < 5 {
return 0, ErrBufferShort
}
r.Action = buf[0]
var total = 1
switch r.Action {
case LCommit:
case LInsert, LUpdate, LDelete:
n, err := r.Record.Deserialize(buf[1:])
if err != nil {
return 0, err
}
total += n
default:
return 0, fmt.Errorf("action is not supported : %v", r.Action)
}
// validate checksum
hash := crc32.NewIEEE()
if _, err := hash.Write(buf[:total]); err != nil {
return 0, err
}
if binary.BigEndian.Uint32(buf[total:]) != hash.Sum32() {
return 0, ErrChecksum
}
return total + 4, nil
}
type lock struct {
mu umutex.UMutex
refs int
}
type Locker struct {
mu sync.Mutex
mutexes map[string]*lock
}
func NewLocker() *Locker {
return &Locker{
mutexes: make(map[string]*lock),
}
}
func (l *Locker) refLock(key string) *lock {
l.mu.Lock()
rec, ok := l.mutexes[key]
if !ok {
// TODO: not create lock object each time, use Pool or preallocate for each record
rec = new(lock)
l.mutexes[key] = rec
}
rec.refs++
l.mu.Unlock()
return rec
}
func (l *Locker) unrefLock(key string) *lock {
l.mu.Lock()
rec := l.mutexes[key]
rec.refs--
if rec.refs == 0 {
delete(l.mutexes, key)
}
l.mu.Unlock()
return rec
}
func (l *Locker) getLock(key string) *lock {
l.mu.Lock()
rec := l.mutexes[key]
l.mu.Unlock()
return rec
}
func (l *Locker) Lock(key string) {
rec := l.refLock(key)
rec.mu.Lock()
}
func (l *Locker) Unlock(key string) {
rec := l.unrefLock(key)
rec.mu.Unlock()
}
func (l *Locker) RLock(key string) {
rec := l.refLock(key)
rec.mu.RLock()
}
func (l *Locker) RUnlock(key string) {
rec := l.unrefLock(key)
rec.mu.RUnlock()
}
func (l *Locker) Upgrade(key string) bool {
rec := l.getLock(key)
return rec.mu.Upgrade()
}
func (l *Locker) Downgrade(key string) {
rec := l.getLock(key)
rec.mu.Downgrade()
}
type Storage struct {
muWAL sync.Mutex
muDB sync.RWMutex
dbPath string
tmpPath string
wal *os.File
db map[string]Record
lock *Locker
}
func NewStorage(wal *os.File, dbPath, tmpPath string) *Storage {
return &Storage{
dbPath: dbPath,
tmpPath: tmpPath,
wal: wal,
db: make(map[string]Record),
lock: NewLocker(),
}
}
func (s *Storage) ApplyLogs(logs []RecordLog) {
s.muDB.Lock()
defer s.muDB.Unlock()
// TODO: optimize when duplicate keys in logs
for _, rlog := range logs {
switch rlog.Action {
case LInsert:
s.db[rlog.Key] = rlog.Record
case LUpdate:
// reuse Key string in db and Key in rlog will be GCed.
r, ok := s.db[rlog.Key]
if !ok {
// record in db may be sometimes deleted. complete with rlog.Key for idempotency.
r.Key = rlog.Key
}
r.Value = rlog.Value
s.db[r.Key] = r
case LDelete:
delete(s.db, rlog.Key)
}
}
}
func (s *Storage) SaveWAL(logs []RecordLog) error {
// prevent parallel WAL writing by unexpected context switch
s.muWAL.Lock()
defer s.muWAL.Unlock()
var (
i int
buf [4096]byte
)
for _, rlog := range logs {
n, err := rlog.Serialize(buf[i:])
if err == ErrBufferShort {
// TODO: use writev
return err
} else if err != nil {
return err
}
// TODO: delay write and combine multi log into one buffer
_, err = s.wal.Write(buf[:n])
if err != nil {
return err
}
}
// write commit log
n, err := (&RecordLog{Action: LCommit}).Serialize(buf[:])
if err != nil {
// commit log serialization must not fail
log.Panic(err)
}
_, err = s.wal.Write(buf[:n])
if err != nil {
return err
}
// sync this transaction
err = s.wal.Sync()
if err != nil {
return err
}
return nil
}
func (s *Storage) LoadWAL() (int, error) {
if _, err := s.wal.Seek(0, io.SeekStart); err != nil {
return 0, err
}
var (
logs []RecordLog
buf [4096]byte
head int
size int
nlogs int
)
// redo all record logs in WAL file
for {
var rlog RecordLog
n, err := rlog.Deserialize(buf[head:size])
if err == ErrBufferShort {
// move data to head
copy(buf[:], buf[head:size])
size -= head
if size == 4096 {
// buffer size (4096) is too short for this log
// TODO: allocate and read directly to db buffer
return 0, err
}
// read more log data to buffer
n, err = s.wal.Read(buf[size:])
size += n
if err == io.EOF {
break
} else if err != nil {
return 0, err
}
continue
} else if err != nil {
return 0, err
}
head += n
nlogs++
switch rlog.Action {
case LInsert, LUpdate, LDelete:
// append log
logs = append(logs, rlog)
case LCommit:
// redo record logs
s.ApplyLogs(logs)
// clear logs
logs = nil
case LAbort:
// clear logs
logs = nil
default:
// skip
}
}
return nlogs, nil
}
func (s *Storage) ClearWAL() error {
if _, err := s.wal.Seek(0, io.SeekStart); err != nil {
return err
} else if err = s.wal.Truncate(0); err != nil {
return err
// it is not obvious that ftruncate(2) sync the change to disk or not. sync explicitly for safe.
} else if err = s.wal.Sync(); err != nil {
return err
}
return nil
}
func (s *Storage) SaveCheckPoint() error {
// create temporary checkout file
f, err := os.Create(s.tmpPath)
if err != nil {
return err
}
defer f.Close()
var buf [4096]byte
// write header
binary.BigEndian.PutUint32(buf[:4], uint32(len(s.db)))
_, err = f.Write(buf[:4])
if err != nil {
goto ERROR
}
// write all data
for _, r := range s.db {
// FIXME: key order in map will be randomized
n, err := r.Serialize(buf[:])
if err == ErrBufferShort {
// TODO: use writev
goto ERROR
} else if err != nil {
goto ERROR
}
// TODO: delay write and combine multi log into one buffer
_, err = f.Write(buf[:n])
if err != nil {
goto ERROR
}
}
if err = f.Sync(); err != nil {
goto ERROR
}
// swap dbfile and temporary file
err = os.Rename(s.tmpPath, s.dbPath)
if err != nil {
goto ERROR
}
return nil
ERROR:
if rerr := os.Remove(s.tmpPath); rerr != nil {
log.Println("failed to remove temporary file for checkpoint :", rerr)
}
return err
}
func (s *Storage) LoadCheckPoint() error {
f, err := os.Open(s.dbPath)
if err != nil {
return err
}
defer f.Close()
var buf [4096]byte
// read and parse header
n, err := f.Read(buf[:])
if err != nil {
return err
} else if n < 4 {
return fmt.Errorf("file header size is too short : %v", n)
}
total := binary.BigEndian.Uint32(buf[:4])
if total == 0 {
if n == 4 {
return nil
} else {
return fmt.Errorf("total is 0. but db file have some data")
}
}
var (
head = 4
size = n
loaded uint32
)
// read all data
for {
var r Record
n, err = r.Deserialize(buf[head:size])
if err == ErrBufferShort {
if size-head == 4096 {
// buffer size (4096) is too short for this log
// TODO: allocate and read directly to db buffer
return err
}
// move data to head
copy(buf[:], buf[head:size])
size -= head
// read more log data to buffer
n, err = f.Read(buf[size:])
size += n
if err == io.EOF {
break
} else if err != nil {
return err
}
continue
} else if err != nil {
return err
}
// set data
s.db[r.Key] = r
loaded++
head += n
if loaded > total {
// records in checkpoint file is more than specified in header
break
}
}
if loaded != total {
return fmt.Errorf("db file is broken : total %v records but actually %v records", total, loaded)
} else if size != 0 {
return fmt.Errorf("db file is broken : file size is larger than expected")
}
return nil
}
type Txn struct {
s *Storage
logs []RecordLog
readSet map[string]*Record
writeSet map[string]int
}
func (s *Storage) NewTxn() *Txn {
return &Txn{
s: s,
readSet: make(map[string]*Record),
writeSet: make(map[string]int),
}
}
func (txn *Txn) Read(key string) ([]byte, error) {
if r, ok := txn.readSet[key]; ok {
if r == nil {
return nil, ErrNotExist
}
return r.Value, nil
} else if idx, ok := txn.writeSet[key]; ok {
rec := txn.logs[idx]
if rec.Action == LDelete {
return nil, ErrNotExist
}
return rec.Value, nil
}
// read lock
txn.s.lock.RLock(key)
txn.s.muDB.RLock()
r, ok := txn.s.db[key]
txn.s.muDB.RUnlock()
if !ok {
txn.readSet[key] = nil
return nil, ErrNotExist
}
txn.readSet[r.Key] = &r
return r.Value, nil
}
func clone(v []byte) []byte {
// TODO: support NULL value
v2 := make([]byte, len(v))
copy(v2, v)
return v2
}
// ensureNotExist check readSet and writeSet step by step that there IS NOT the record.
// This method is used by Insert.
func (txn *Txn) ensureNotExist(key string) (string, error) {
if r, ok := txn.readSet[key]; ok {
if r != nil {
return "", ErrExist
}
// reallocate string
key = string(key)
if !txn.s.lock.Upgrade(key) {
return "", ErrDeadLock
}
// move record from readSet to writeSet
delete(txn.readSet, key)
} else if idx, ok := txn.writeSet[key]; ok {
rec := txn.logs[idx]
if rec.Action != LDelete {
return "", ErrExist
}
// reuse key in writeSet
key = rec.Key
} else {
// lock record
txn.s.lock.Lock(key)
// reallocate string
key = string(key)
// check that the key not exists in db
txn.s.muDB.RLock()
r, ok := txn.s.db[key]
txn.s.muDB.RUnlock()
if ok {
txn.readSet[key] = &r
txn.s.lock.Downgrade(key)
return "", ErrExist
}
}
return key, nil
}
// ensureExist check readSet and writeSet step by step that there IS the record.
// This method is used by Update, Delete.
func (txn *Txn) ensureExist(key string) (newKey string, err error) {
if r, ok := txn.readSet[key]; ok {
if r == nil {
return "", ErrNotExist
}
// reuse key in readSet
key = r.Key
if !txn.s.lock.Upgrade(key) {
return "", ErrDeadLock
}
// move record from readSet to writeSet
delete(txn.readSet, key)
} else if idx, ok := txn.writeSet[key]; ok {
rec := txn.logs[idx]
if rec.Action == LDelete {
return "", ErrNotExist
}
// reuse key in writeSet
key = rec.Key
} else {
// lock record
txn.s.lock.Lock(key)
// check that the key exists in db
txn.s.muDB.RLock()
r, ok := txn.s.db[key]
txn.s.muDB.RUnlock()
if !ok {
key = string(key)
txn.readSet[key] = nil
txn.s.lock.Downgrade(key)
return "", ErrNotExist
}
// reuse key in db
key = r.Key
}
return key, nil
}
func (txn *Txn) Insert(key string, value []byte) error {
key, err := txn.ensureNotExist(key)
if err != nil {
return err
}
// clone value to prevent injection after transaction
value = clone(value)
// add insert log
txn.logs = append(txn.logs, RecordLog{
Action: LInsert,
Record: Record{
Key: key,
Value: value,
},
})
// add to or update writeSet (index of logs)
txn.writeSet[key] = len(txn.logs) - 1
return nil
}
func (txn *Txn) Update(key string, value []byte) error {
key, err := txn.ensureExist(key)
if err != nil {
return err
}
// clone value to prevent injection after transaction
value = clone(value)
// add update log
txn.logs = append(txn.logs, RecordLog{
Action: LUpdate,
Record: Record{
Key: key,
Value: value,
},
})
// add to or update writeSet (index of logs)
txn.writeSet[key] = len(txn.logs) - 1
return nil
}
func (txn *Txn) Delete(key string) error {
key, err := txn.ensureExist(key)
if err != nil {
return err
}
// add delete log
txn.logs = append(txn.logs, RecordLog{
Action: LDelete,
Record: Record{
Key: key,
},
})
// add to or update writeSet (index of logs)
txn.writeSet[key] = len(txn.logs) - 1
return nil
}
func (txn *Txn) Commit() error {
// clearnup readSet before save WAL (S2PL)
for key := range txn.readSet {
txn.s.lock.RUnlock(key)
delete(txn.readSet, key)
}
err := txn.s.SaveWAL(txn.logs)
if err != nil {
return err
}
// write back writeSet to db (in memory)
txn.s.ApplyLogs(txn.logs)
// cleanup writeSet
for key := range txn.writeSet {
txn.s.lock.Unlock(key)
delete(txn.writeSet, key)
}
// clear logs
// TODO: clear all key and value pointer and reuse logs memory
txn.logs = nil
return nil
}
func (txn *Txn) Abort() {
for key := range txn.readSet {
txn.s.lock.RUnlock(key)
delete(txn.readSet, key)
}
for key := range txn.writeSet {
txn.s.lock.Unlock(key)
delete(txn.writeSet, key)
}
txn.logs = nil
}
func HandleTxn(r io.Reader, w io.WriteCloser, txn *Txn, storage *Storage, closeOnExit bool, wg *sync.WaitGroup) error {
if closeOnExit {
defer w.Close()
defer wg.Done()
}
reader := bufio.NewReader(r)
for {
fmt.Fprintf(w, ">> ")
txt, err := reader.ReadString('\n')
if err != nil {
fmt.Fprintf(w, "failed to read command : %v\n", err)
return err
}
txt = strings.TrimSpace(txt)
cmd := strings.Split(txt, " ")
if len(cmd) == 0 || len(cmd[0]) == 0 {
continue
}
switch strings.ToLower(cmd[0]) {
case "insert":
if len(cmd) != 3 {
fmt.Fprintf(w, "invalid command : insert <key> <value>\n")
} else if err = txn.Insert(cmd[1], []byte(cmd[2])); err != nil {
fmt.Fprintf(w, "failed to insert : %v\n", err)
} else {
fmt.Fprintf(w, "success to insert %q\n", cmd[1])
}
case "update":
if len(cmd) != 3 {
fmt.Fprintf(w, "invalid command : update <key> <value>\n")
} else if err = txn.Update(cmd[1], []byte(cmd[2])); err != nil {
fmt.Fprintf(w, "failed to update : %v\n", err)
} else {
fmt.Fprintf(w, "success to update %q\n", cmd[1])
}
case "delete":
if len(cmd) != 2 {
fmt.Fprintf(w, "invalid command : delete <key>\n")
} else if err = txn.Delete(cmd[1]); err != nil {
fmt.Fprintf(w, "failed to delete : %v\n", err)
} else {
fmt.Fprintf(w, "success to delete %q\n", cmd[1])
}
case "read":
if len(cmd) != 2 {
fmt.Fprintf(w, "invalid command : read <key>\n")
} else if v, err := txn.Read(cmd[1]); err != nil {
fmt.Fprintf(w, "failed to read : %v\n", err)
} else {
fmt.Fprintf(w, "%v\n", string(v))
}
case "commit":
if len(cmd) != 1 {
fmt.Fprintf(w, "invalid command : commit\n")
} else if err = txn.Commit(); err != nil {
fmt.Fprintf(w, "failed to commit : %v\n", err)
} else {
fmt.Fprintf(w, "committed\n")
}
case "abort":
if len(cmd) != 1 {
fmt.Fprintf(w, "invalid command : abort\n")
} else {
txn.Abort()
fmt.Fprintf(w, "aborted\n")
}
case "keys":
if len(cmd) != 1 {
fmt.Fprintf(w, "invalid command : keys\n")
} else {
fmt.Fprintf(w, ">>> show keys commited <<<\n")
for k, _ := range storage.db {
fmt.Fprintf(w, "%s\n", k)
}
}
case "quit", "exit", "q":
fmt.Fprintf(w, "byebye\n")
txn.Abort()
return nil
default:
fmt.Fprintf(w, "invalid command : not supported\n")
}
}
}
func main() {
walPath := flag.String("wal", "./txngo.log", "file path of WAL file")
dbPath := flag.String("db", "./txngo.db", "file path of data file")
isInit := flag.Bool("init", true, "create data file if not exist")
tcpaddr := flag.String("tcp", "", "tcp handler address (e.g. localhost:3000)")
flag.Parse()
wal, err := os.OpenFile(*walPath, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0600)
if err != nil {
log.Panic(err)
}
defer wal.Close()
storage := NewStorage(wal, *dbPath, *dbPath+".tmp")
log.Println("loading data file...")
if err = storage.LoadCheckPoint(); os.IsNotExist(err) && *isInit {
log.Println("db file is not found. this is initial start.")
} else if err != nil {
log.Printf("failed to load data file : %v\n", err)
return
}
log.Println("loading WAL file...")
if nlogs, err := storage.LoadWAL(); err != nil {
log.Printf("failed to load WAL file : %v\n", err)
return
} else if nlogs != 0 {
log.Println("previous shutdown is not success...")
log.Println("update data file...")
if err = storage.SaveCheckPoint(); err != nil {
log.Printf("failed to save checkpoint %v\n", err)
return
}
log.Println("clear WAL file...")
if err = storage.ClearWAL(); err != nil {
log.Printf("failed to clear WAL file %v\n", err)
return
}
}
log.Println("start transactions")
if *tcpaddr == "" {
// stdio handler
txn := storage.NewTxn()
err = HandleTxn(os.Stdin, os.Stdout, txn, storage, false, nil)
if err != nil {
log.Println("failed to handle", err)
}
log.Println("shutdown...")
} else {
// tcp handler
l, err := net.Listen("tcp", *tcpaddr)
if err != nil {
log.Println("failed to listen tcp :", err)
return
}
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
for {
conn, err := l.Accept()
if err != nil {
log.Println("failed to accept tcp :", err)
break
}
log.Println("accept new conn :", conn.RemoteAddr())
txn := storage.NewTxn()
wg.Add(1)
go HandleTxn(conn, conn, txn, storage, true, &wg)
}
}()
signal.Reset()
chsig := make(chan os.Signal)
signal.Notify(chsig, os.Interrupt)
<-chsig
log.Println("shutdown...")
l.Close()
chDone := make(chan struct{})
go func() {
wg.Wait()
chDone <- struct{}{}
}()
select {
case <-time.After(30 * time.Second):
log.Println("connection not quit. shutdown forcibly.")
return
case <-chDone:
}
}
log.Println("save checkpoint")
if err = storage.SaveCheckPoint(); err != nil {
log.Printf("failed to save data file : %v\n", err)
} else if err = storage.ClearWAL(); err != nil {
log.Printf("failed to clear WAL file : %v\n", err)
} else {
log.Println("success to save data")
}
}
|
package main
import (
"github.com/deluan/bring"
"github.com/faiface/pixel/pixelgl"
)
var (
keys map[pixelgl.Button]bring.KeyCode
)
// Rant: why pixelgl keyboard events handling is so messy?!?
func collectKeyStrokes(win *pixelgl.Window) (pressed []bring.KeyCode, released []bring.KeyCode) {
for k, v := range keys {
key := v
if win.JustPressed(k) || win.Repeated(k) {
pressed = append(pressed, key)
}
if win.JustReleased(k) {
released = append(released, key)
}
}
controlPressed := win.Pressed(pixelgl.KeyLeftControl) || win.Pressed(pixelgl.KeyRightControl) ||
win.Pressed(pixelgl.KeyLeftAlt) || win.Pressed(pixelgl.KeyRightAlt)
if controlPressed {
shiftPressed := win.Pressed(pixelgl.KeyLeftShift) || win.Pressed(pixelgl.KeyRightShift)
for ch := 32; ch < 127; ch++ {
isLetter := ch >= int('A') && ch <= int('Z')
key := ch
if isLetter && !shiftPressed {
key = ch + 32
}
if win.JustPressed(pixelgl.Button(ch)) || win.Repeated(pixelgl.Button(ch)) {
pressed = append(pressed, bring.KeyCode(key))
}
if win.JustReleased(pixelgl.Button(ch)) {
released = append(released, bring.KeyCode(key))
}
}
} else {
for _, ch := range win.Typed() {
pressed = append(pressed, bring.KeyCode(int(ch)))
released = append(released, bring.KeyCode(int(ch)))
}
}
return
}
func init() {
keys = map[pixelgl.Button]bring.KeyCode{
pixelgl.KeyLeftAlt: bring.KeyLeftAlt,
pixelgl.KeyRightAlt: bring.KeyRightAlt,
pixelgl.KeyLeftControl: bring.KeyLeftControl,
pixelgl.KeyRightControl: bring.KeyRightControl,
pixelgl.KeyLeftShift: bring.KeyLeftShift,
pixelgl.KeyRightShift: bring.KeyRightShift,
pixelgl.KeyBackspace: bring.KeyBackspace,
pixelgl.KeyCapsLock: bring.KeyCapsLock,
pixelgl.KeyDelete: bring.KeyDelete,
pixelgl.KeyDown: bring.KeyDown,
pixelgl.KeyEnd: bring.KeyEnd,
pixelgl.KeyEnter: bring.KeyEnter,
pixelgl.KeyEscape: bring.KeyEscape,
pixelgl.KeyF1: bring.KeyF1,
pixelgl.KeyF2: bring.KeyF2,
pixelgl.KeyF3: bring.KeyF3,
pixelgl.KeyF4: bring.KeyF4,
pixelgl.KeyF5: bring.KeyF5,
pixelgl.KeyF6: bring.KeyF6,
pixelgl.KeyF7: bring.KeyF7,
pixelgl.KeyF8: bring.KeyF8,
pixelgl.KeyF9: bring.KeyF9,
pixelgl.KeyF10: bring.KeyF10,
pixelgl.KeyF11: bring.KeyF11,
pixelgl.KeyF12: bring.KeyF12,
pixelgl.KeyF13: bring.KeyF13,
pixelgl.KeyF14: bring.KeyF14,
pixelgl.KeyF15: bring.KeyF15,
pixelgl.KeyF16: bring.KeyF16,
pixelgl.KeyF17: bring.KeyF17,
pixelgl.KeyF18: bring.KeyF18,
pixelgl.KeyF19: bring.KeyF19,
pixelgl.KeyF20: bring.KeyF20,
pixelgl.KeyF21: bring.KeyF21,
pixelgl.KeyF22: bring.KeyF22,
pixelgl.KeyF23: bring.KeyF23,
pixelgl.KeyF24: bring.KeyF24,
pixelgl.KeyHome: bring.KeyHome,
pixelgl.KeyInsert: bring.KeyInsert,
pixelgl.KeyLeft: bring.KeyLeft,
pixelgl.KeyNumLock: bring.KeyNumLock,
pixelgl.KeyPageDown: bring.KeyPageDown,
pixelgl.KeyPageUp: bring.KeyPageUp,
pixelgl.KeyPause: bring.KeyPause,
pixelgl.KeyPrintScreen: bring.KeyPrintScreen,
pixelgl.KeyRight: bring.KeyRight,
pixelgl.KeyTab: bring.KeyTab,
pixelgl.KeyUp: bring.KeyUp,
// pixelgl.KeyMeta: bring.KeyMeta,
// pixelgl.KeySuper: bring.KeySuper,
// pixelgl.KeyWin: bring.KeyWin,
}
}
|
package main
import "fmt"
func main() {
var studentName [10]string
var studentAge [10]int
var studentEmail [10]string
studentName[0] = "Goku"
studentAge[0] = 18
studentEmail[0] = "Goku@super.saiya"
fmt.Println(studentName[0], studentAge[0], studentEmail[0])
}
|
// Galang - Golang common utilities
// Copyright (c) 2020-present, gakkiiyomi@gamil.com
//
// gakkiyomi is licensed under Mulan PSL v2.
// You can use this software according to the terms and conditions of the Mulan PSL v2.
// You may obtain a copy of Mulan PSL v2 at:
// http://license.coscl.org.cn/MulanPSL2
// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
// See the Mulan PSL v2 for more details.
package config
import (
"strings"
"testing"
)
func TestReadConfigFile(t *testing.T) {
config, err := ReadConfigFile("./example.json")
if err != nil {
t.Error(err.Error())
}
str, err := config.ToString()
if err != nil {
t.Error(err.Error())
}
t.Log(str)
}
func TestReadConfig(t *testing.T) {
json := `{
"rpcConfig":[
{
"path":"auth.RoleService",
"host": "192.168.1.146:8187",
"methods":["Create","Update","List","Delete","UpdateGroup"]
},
{
"path":"auth.UserService",
"host": "192.168.1.146:8187",
"methods":["List","ListGroup"]
},
{
"path":"cmdb.NodeService",
"host": "192.168.1.146:8583",
"methods":["Create","Update","List","Search","Delete","Get"]
},
{
"path":"cmdb.ModelService",
"host": "192.168.1.146:8583",
"methods":["Create","Update","List","Delete","Get","GetByLabel","BindingProperty","DeleteBindingProperty","GetBindingProperties"]
}
]
}
`
config, err := ReadConfig(strings.NewReader(json))
if err != nil {
t.Error(err.Error())
}
str, err := config.ToString()
if err != nil {
t.Error(err.Error())
}
t.Log(str)
}
|
package main
// 改编自Twitter:
// https://github.com/twitter/snowflake/blob/snowflake-2010/src/main/scala/com/twitter/service/snowflake/IdWorker.scala
import (
"errors"
"sync"
"time"
)
// 0 - 00000000 00000000 00000000 00000000 00000000 0 - 00000000 00 - 00000000 0000
// 1bit 41bit时间戳 10bit工作机器id 12bit序列号
// 1bit:最高位;0代表正;1代表负;
// 41bit:时间差:2^41/(1000*60*60*24*365)=69.7年;
// 10bit:2^10=1024节点,包括5位datacenterId和5位workerId;
// 12bit:2^12=4096个id,1ms 1node 产生4096个id
const (
// 起始时间戳 (ms) 2006-01-02 15:04:05
epoch int64 = 1136214245000
// 工作机器id
workerIdBits uint = 5
datacenterIdBits uint = 5
// 最大编号
maxWorkerId int64 = -1 ^ (-1 << workerIdBits) // 31
maxDatacenterId int64 = -1 ^ (-1 << datacenterIdBits) // 31
// 序列号
sequenceBits uint = 12
// 最大序列号
//maxSequenceId int64 = -1 ^ (-1 << sequenceBits)
workerIdShift = sequenceBits //12
datacenterIdShift = sequenceBits + workerIdBits // 17
timestampLeftShift = sequenceBits + workerIdBits + datacenterIdBits // 22
sequenceMask = -1 ^ (-1 << sequenceBits) // 0x3ff 4095
)
type IdWorker struct {
mu sync.Mutex
workerId int64
datacenterId int64
lastTimestamp int64
sequence int64
}
func NewIdWorker(workerId, datacenterId int64) (*IdWorker, error) {
worker := new(IdWorker)
if workerId < 0 || workerId > maxWorkerId {
return nil, errors.New("the workerId must between 0 and 31")
}
if datacenterId > maxDatacenterId || datacenterId < 0 {
return nil, errors.New("the datacenterId must between 0 and 31")
}
worker.workerId = workerId
worker.datacenterId = datacenterId
worker.lastTimestamp = -1
worker.sequence = 0
return worker, nil
}
func (node *IdWorker) GenerateID() (int64, error) {
node.mu.Lock()
defer node.mu.Unlock()
// ms
now := time.Now().UnixNano() / 1000 / 1000
if now < node.lastTimestamp {
return 0, errors.New("error time")
}
if now == node.lastTimestamp {
node.sequence = (node.sequence + 1) & sequenceMask // 1000000000000(4096) & 111111111111(4095) = 0
// 本ms内sequence分配完(4096)
if node.sequence == 0 {
now = tilNextMillis(node.lastTimestamp)
}
} else {
node.sequence = 0
}
node.lastTimestamp = now
return (node.lastTimestamp-epoch)<<timestampLeftShift | node.datacenterId<<datacenterIdShift | node.workerId<<workerIdShift | node.sequence, nil
}
func tilNextMillis(lastTimestamp int64) int64 {
tmp := time.Now().UnixNano() / 1000 / 1000
for tmp <= lastTimestamp {
tmp = time.Now().UnixNano() / 1000 / 1000
}
return tmp
}
|
// stream_check project m3u8Info.go
package m3u8
import (
"product_code/check_stream/public"
"strconv"
"strings"
"time"
)
const (
TsStatusDefault = 0
TsStatusDownloading = 1
TsStatusOk = 2
TsStatusFail = 3
)
//#EXT-X-PROGRAM-DATE-TIME:2018-09-04T18:49:17Z
//#EXTINF:4.000,4.000000000 1212000
//269-8550-7020591.ts
//ts info
type TsInfo struct {
M3u8Name string
ExtX string
ExtInf string
Name string
Duration int64 //毫秒
Status int
}
func NewTs(m3u8Name string) *TsInfo {
return &TsInfo{
M3u8Name: m3u8Name,
}
}
type M3u8Info struct {
Raw string
Name string
Version string
Sequence int
TsArray []*TsInfo
M3u8Array []string
}
func NewM3u8(name string) *M3u8Info {
return &M3u8Info{
Name: name,
}
}
func (m3u8Info *M3u8Info) Parse(m3u8String string) {
m3u8Info.Raw = m3u8String
lines := public.SplitLine(m3u8String)
for i := range lines {
line := lines[i]
//version
{
index := strings.LastIndex(line, "#EXT-X-VERSION:")
if index >= 0 {
m3u8Info.Version = line[index+len("#EXT-X-VERSION:") : len(line)]
}
}
//sequence
{
index := strings.LastIndex(line, "#EXT-X-MEDIA-SEQUENCE:")
if index >= 0 {
m3u8Info.Sequence, _ = strconv.Atoi(line[index+len("#EXT-X-MEDIA-SEQUENCE:") : len(line)])
}
}
//ts
{
index := strings.LastIndex(line, "#EXTINF:")
if index >= 0 {
commaIndex := strings.LastIndex(line, ",")
d, _ := strconv.ParseFloat(line[index+len("#EXTINF:"):commaIndex], 32)
duration := int64(d * 1000)
title := line[commaIndex+1 : len(line)]
fields := strings.Fields(title)
if len(fields) > 0 {
d, err := strconv.ParseFloat(fields[0], 32)
if nil == err {
duration = int64(d * 1000)
}
}
tsInfo := NewTs(m3u8Info.Name)
if i > 0 {
extx := lines[i-1]
if strings.LastIndex(extx, "#EXT-X-PROGRAM-DATE-TIME") >= 0 {
tsInfo.ExtX = extx
}
}
tsInfo.ExtInf = line
tsInfo.Name = lines[i+1]
tsInfo.Duration = duration
m3u8Info.TsArray = append(m3u8Info.TsArray, tsInfo)
}
}
//m3u8Info
{
index := strings.LastIndex(line, "#EXT-X-STREAM-INF:")
if index >= 0 {
m3u8Info.M3u8Array = append(m3u8Info.M3u8Array, lines[i+1])
}
}
}
}
func (m3u8Info *M3u8Info) IsTop() bool {
return len(m3u8Info.M3u8Array) > 0
}
func (m3u8Info *M3u8Info) IsSecond() bool {
return len(m3u8Info.TsArray) > 0
}
func GetTsLocalFile(localPath, tsName string) string {
var preFolder string
if !strings.Contains(tsName, "/") {
preFolder = time.Now().Format("20060102")
}
return localPath + "/" + preFolder + "/" + tsName
}
|
package limits
import (
"sync/atomic"
"unsafe"
)
var Lhits int
type Sysatomic_t int64
type Syslimit_t struct {
// protected by proclock
Sysprocs int
// proctected by idmonl lock
Vnodes int
// proctected by _allfutex lock
Futexes int
// proctected by arptbl lock
Arpents int
// proctected by routetbl lock
Routes int
// per TCP socket tx/rx segments to remember
Tcpsegs int
// socks includes pipes and all TCP connections in TIMEWAIT.
Socks Sysatomic_t
// total cached dirents
// total pipes
Pipes Sysatomic_t
// additional memory filesystem per-page objects; each file gets one
// freebie.
Mfspgs Sysatomic_t
// shared buffer space
//shared Sysatomic_t
// bdev blocks
Blocks int
}
var Syslimit *Syslimit_t = MkSysLimit()
func MkSysLimit() *Syslimit_t {
return &Syslimit_t{
Sysprocs: 1e4,
Futexes: 1024,
Arpents: 1024,
Routes: 32,
Tcpsegs: 16,
Socks: 1e5,
Vnodes: 20000, // 1e6,
Pipes: 1e4,
// 8GB of block pages
Blocks: 100000, // 1 << 21,
}
}
func (s *Sysatomic_t) _aptr() *int64 {
return (*int64)(unsafe.Pointer(s))
}
func (s *Sysatomic_t) Given(_n uint) {
n := int64(_n)
if n < 0 {
panic("too mighty")
}
atomic.AddInt64(s._aptr(), n)
}
func (s *Sysatomic_t) Taken(_n uint) bool {
n := int64(_n)
if n < 0 {
panic("too mighty")
}
g := atomic.AddInt64(s._aptr(), -n)
if g >= 0 {
return true
}
atomic.AddInt64(s._aptr(), n)
return false
}
// returns false if the limit has been reached.
func (s *Sysatomic_t) Take() bool {
return s.Taken(1)
}
func (s *Sysatomic_t) Give() {
s.Given(1)
}
|
package zerointerface
//Describer interface
type Describer interface {
Describe()
}
|
//go:build !go1.18
// +build !go1.18
package gflag_test
// alias of interface{}, use for go < 1.18
type any = interface{}
|
package klusterlet
import (
"context"
"errors"
"fmt"
"io/ioutil"
"time"
"github.com/mdelder/failover/pkg/helpers"
"github.com/openshift/library-go/pkg/controller/controllercmd"
"github.com/openshift/library-go/pkg/operator/events"
"github.com/spf13/pflag"
"github.com/open-cluster-management/registration/pkg/spoke/hubclientcert"
"github.com/openshift/library-go/pkg/controller/controllercmd"
"github.com/openshift/library-go/pkg/operator/events"
utilrand "k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/klog/v2"
)
const (
// agentNameLength is the length of the spoke agent name which is generated automatically
agentNameLength = 5
// defaultSpokeComponentNamespace is the default namespace in which the spoke agent is deployed
defaultSpokeComponentNamespace = "open-cluster-management"
)
// FailoverAgentOptions holds configuration for spoke cluster agent
type FailoverAgentOptions struct {
ComponentNamespace string
ClusterName string
AgentName string
BootstrapKubeconfig string
HubKubeconfigSecret string
HubKubeconfigDir string
SpokeExternalServerURLs []string
ClusterHealthCheckPeriod time.Duration
MaxCustomClusterClaims int
}
// NewFailoverAgentOptions returns a FailoverAgentOptions
func NewFailoverAgentOptions() *FailoverAgentOptions {
return &FailoverAgentOptions{
HubKubeconfigSecret: "hub-kubeconfig-secret",
HubKubeconfigDir: "/spoke/hub-kubeconfig",
ClusterHealthCheckPeriod: 1 * time.Minute,
MaxCustomClusterClaims: 20,
}
}
// RunSpokeAgent starts the controllers on spoke agent to register to the hub.
//
// The spoke agent uses three kubeconfigs for different concerns:
// - The 'spoke' kubeconfig: used to communicate with the spoke cluster where
// the agent is running.
// - The 'bootstrap' kubeconfig: used to communicate with the hub in order to
// submit a CertificateSigningRequest, begin the join flow with the hub, and
// to write the 'hub' kubeconfig.
// - The 'hub' kubeconfig: used to communicate with the hub using a signed
// certificate from the hub.
//
// RunSpokeAgent handles the following scenarios:
// #1. Bootstrap kubeconfig is valid and there is no valid hub kubeconfig in secret
// #2. Both bootstrap kubeconfig and hub kubeconfig are valid
// #3. Bootstrap kubeconfig is invalid (e.g. certificate expired) and hub kubeconfig is valid
// #4. Neither bootstrap kubeconfig nor hub kubeconfig is valid
//
// A temporary ClientCertForHubController with bootstrap kubeconfig is created
// and started if the hub kubeconfig does not exist or is invalid and used to
// create a valid hub kubeconfig. Once the hub kubeconfig is valid, the
// temporary controller is stopped and the main controllers are started.
func (o *FailoverAgentOptions) RunFailoverAgent(ctx context.Context, controllerContext *controllercmd.ControllerContext) error {
klog.Infof("Let's get to work!")
// // create kube client
agentKubeClient, err := kubernetes.NewForConfig(controllerContext.KubeConfig)
if err != nil {
return err
}
if err := o.Complete(agentKubeClient.CoreV1(), ctx, controllerContext.EventRecorder); err != nil {
klog.Fatal(err)
}
if err := o.Validate(); err != nil {
klog.Fatal(err)
}
klog.Infof("Cluster name is %q and agent name is %q", o.ClusterName, o.AgentName)
// // create shared informer factory for spoke cluster
// agentKubeInformerFactory := informers.NewSharedInformerFactory(agentKubeClient, 10*time.Minute)
namespacedAgentKubeInformerFactory := informers.NewSharedInformerFactoryWithOptions(agentKubeClient, 10*time.Minute, informers.WithNamespace(o.ComponentNamespace))
hubKubeconfigSecretController := hubclientcert.NewHubKubeconfigSecretController(
o.HubKubeconfigDir, o.ComponentNamespace, o.HubKubeconfigSecret,
agentKubeClient.CoreV1(),
namespacedAgentKubeInformerFactory.Core().V1().Secrets(),
controllerContext.EventRecorder,
)
go hubKubeconfigSecretController.Run(ctx, 1)
<-ctx.Done()
return nil
}
// AddFlags registers flags for Agent
func (o *FailoverAgentOptions) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&o.ClusterName, "cluster-name", o.ClusterName,
"If non-empty, will use as cluster name instead of generated random name.")
fs.StringVar(&o.BootstrapKubeconfig, "bootstrap-kubeconfig", o.BootstrapKubeconfig,
"The path of the kubeconfig file for agent bootstrap.")
fs.StringVar(&o.HubKubeconfigSecret, "hub-kubeconfig-secret", o.HubKubeconfigSecret,
"The name of secret in component namespace storing kubeconfig for hub.")
fs.StringVar(&o.HubKubeconfigDir, "hub-kubeconfig-dir", o.HubKubeconfigDir,
"The mount path of hub-kubeconfig-secret in the container.")
fs.StringArrayVar(&o.SpokeExternalServerURLs, "spoke-external-server-urls", o.SpokeExternalServerURLs,
"A list of reachable spoke cluster api server URLs for hub cluster.")
fs.DurationVar(&o.ClusterHealthCheckPeriod, "cluster-healthcheck-period", o.ClusterHealthCheckPeriod,
"The period to check managed cluster kube-apiserver health")
fs.IntVar(&o.MaxCustomClusterClaims, "max-custom-cluster-claims", o.MaxCustomClusterClaims,
"The max number of custom cluster claims to expose.")
}
// Validate verifies the inputs.
func (o *FailoverAgentOptions) Validate() error {
if o.BootstrapKubeconfig == "" {
return errors.New("bootstrap-kubeconfig is required")
}
if o.ClusterName == "" {
return errors.New("cluster name is empty")
}
if o.AgentName == "" {
return errors.New("agent name is empty")
}
// if SpokeExternalServerURLs is specified we validate every URL in it, we expect the spoke external server URL is https
if len(o.SpokeExternalServerURLs) != 0 {
for _, serverURL := range o.SpokeExternalServerURLs {
if !helpers.IsValidHTTPSURL(serverURL) {
return errors.New(fmt.Sprintf("%q is invalid", serverURL))
}
}
}
if o.ClusterHealthCheckPeriod <= 0 {
return errors.New("cluster healthcheck period must greater than zero")
}
return nil
}
// Complete fills in missing values.
func (o *FailoverAgentOptions) Complete(coreV1Client corev1client.CoreV1Interface, ctx context.Context, recorder events.Recorder) error {
// get component namespace of spoke agent
nsBytes, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
if err != nil {
o.ComponentNamespace = defaultSpokeComponentNamespace
} else {
o.ComponentNamespace = string(nsBytes)
}
// // dump data in hub kubeconfig secret into file system if it exists
// err = hubclientcert.DumpSecret(coreV1Client, o.ComponentNamespace, o.HubKubeconfigSecret,
// o.HubKubeconfigDir, ctx, recorder)
// if err != nil {
// return err
// }
// load or generate cluster/agent names
o.ClusterName, o.AgentName = o.getOrGenerateClusterAgentNames()
return nil
}
// getOrGenerateClusterAgentNames returns cluster name and agent name.
// Rules for picking up cluster name:
// 1. Use cluster name from input arguments if 'cluster-name' is specified;
// 2. Parse cluster name from the common name of the certification subject if the certification exists;
// 3. Fallback to cluster name in the mounted secret if it exists;
// 4. TODO: Read cluster name from openshift struct if the agent is running in an openshift cluster;
// 5. Generate a random cluster name then;
// Rules for picking up agent name:
// 1. Parse agent name from the common name of the certification subject if the certification exists;
// 2. Fallback to agent name in the mounted secret if it exists;
// 3. Generate a random agent name then;
func (o *FailoverAgentOptions) getOrGenerateClusterAgentNames() (string, string) {
clusterName := generateClusterName()
// generate random agent name
agentName := generateAgentName()
return clusterName, agentName
}
// generateClusterName generates a name for spoke cluster
func generateClusterName() string {
return string(uuid.NewUUID())
}
// generateAgentName generates a random name for spoke cluster agent
func generateAgentName() string {
return utilrand.String(agentNameLength)
}
|
package 数组
import "sort"
// ------------------------ 排序解法 ------------------------
// 时间复杂度: O(n * log_n)
func canMakeArithmeticProgression(arr []int) bool {
sort.Ints(arr)
return isArithmeticProgression(arr)
}
func isArithmeticProgression(arr []int) bool {
if len(arr) <= 1 {
return true
}
diff := arr[1] - arr[0]
for i := 2; i < len(arr); i++ {
if arr[i]-arr[i-1] != diff {
return false
}
}
return true
}
/*
题目链接: https://leetcode-cn.com/problems/can-make-arithmetic-progression-from-sequence/
*/
|
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package benchstat
import (
"fmt"
"io"
"unicode/utf8"
)
// FormatText appends a fixed-width text formatting of the tables to w.
func FormatText(w io.Writer, tables []*Table) {
var textTables [][]*textRow
for _, t := range tables {
textTables = append(textTables, toText(t))
}
var max []int
for _, table := range textTables {
for _, row := range table {
if len(row.cols) == 1 {
// Header row
continue
}
for len(max) < len(row.cols) {
max = append(max, 0)
}
for i, s := range row.cols {
n := utf8.RuneCountInString(s)
if max[i] < n {
max[i] = n
}
}
}
}
for ti, table := range textTables {
if ti > 0 {
fmt.Fprintf(w, "\n")
}
// headings
row := table[0]
for i, s := range row.cols {
switch i {
case 0:
fmt.Fprintf(w, "%-*s", max[i], s)
default:
fmt.Fprintf(w, " %-*s", max[i], s)
}
}
fmt.Fprintln(w, "")
// data
for _, row := range table[1:] {
for i, s := range row.cols {
switch {
case len(row.cols) == 1:
// Single statistics
fmt.Fprint(w, s)
case i == 0:
// Test name
fmt.Fprintf(w, "%-*s", max[i], s)
default:
// Is this a delta, or text?
isnote := tables[ti].OldNewDelta && ((len(row.cols) > 5 && i%3 == 0) || i == len(row.cols)-1)
if isnote {
// Left-align notes
fmt.Fprintf(w, " %-*s", max[i], s)
break
}
fmt.Fprintf(w, " %*s", max[i], s)
}
}
fmt.Fprintf(w, "\n")
}
}
}
// A textRow is a row of printed text columns.
type textRow struct {
cols []string
}
func newTextRow(cols ...string) *textRow {
return &textRow{cols: cols}
}
func (r *textRow) add(col string) {
r.cols = append(r.cols, col)
}
func (r *textRow) trim() {
for len(r.cols) > 0 && r.cols[len(r.cols)-1] == "" {
r.cols = r.cols[:len(r.cols)-1]
}
}
// toText converts the Table to a textual grid of cells,
// which can then be printed in fixed-width output.
func toText(t *Table) []*textRow {
var textRows []*textRow
switch len(t.Configs) {
case 1:
textRows = append(textRows, newTextRow("name", t.Metric))
case 2:
textRows = append(textRows, newTextRow("name", "old "+t.Metric, "new "+t.Metric, "delta"))
default:
row := newTextRow("name \\ " + t.Metric)
for _, config := range t.Configs {
row.cols = append(row.cols, config)
if t.OldNewDelta {
row.cols = append(row.cols, "delta", "note")
}
}
textRows = append(textRows, row)
}
var group string
for _, row := range t.Rows {
if row.Group != group {
group = row.Group
textRows = append(textRows, newTextRow(group))
}
text := newTextRow(row.Benchmark)
for i, m := range row.Metrics {
text.cols = append(text.cols, m.Format(row.Scaler))
if t.OldNewDelta && (len(row.Metrics) > 2 || i > 0) {
delta := row.Deltas[i]
if delta == "~" {
delta = "~ "
}
text.cols = append(text.cols, delta)
text.cols = append(text.cols, row.Notes[i])
}
}
textRows = append(textRows, text)
}
for _, r := range textRows {
r.trim()
}
return textRows
}
|
package goSolution
func minDistance(word1 string, word2 string) int {
n, m := len(word1), len(word2)
f := make([][]int, n + 1)
for i := 0; i < n + 1; i++ {
f[i] = make([]int, m + 1)
}
for i := 0; i < n; i++ {
for j := 0; j < m; j++ {
f[i + 1][j + 1] = max(f[i][j + 1], f[i + 1][j])
if word1[i] == word2[j] {
f[i + 1][j + 1] = max(f[i + 1][j + 1], f[i][j] + 1)
}
}
}
return n + m - (f[n][m] << 1)
} |
package slice_1
import "testing"
func TestSolve(t *testing.T) {
arr := []struct{
str string
left int
expected int
} {
{"((1)23(45))(aB)", 0, 10},
{"((1)23(45))(aB)", 1, 3},
{"((1)23(45))(aB)", 2, -1},
{"((1)23(45))(aB)", 6, 9},
{"((1)23(45))(aB)", 11, 14},
{"((>)|?(*'))(yZ)", 11, 14},
}
for _, s := range arr {
actual := Solve(s.str, s.left)
if actual != s.expected {
t.Errorf("slice_1_test has error")
}
}
}
|
package slicerdicer
import (
"image"
"testing"
)
var (
testRect = image.Rect(0, 0, 1000, 1000)
testImage = image.NewRGBA(testRect)
)
func assert(t *testing.T, val, expected interface{}) {
t.Helper()
if val != expected {
t.Errorf("value (%+v) was not like expected (%+v)", val, expected)
}
}
func TestCrop(t *testing.T) {
_, err := Crop(testImage, 1005, 0, 1, 1)
if err != ErrPointOutOfBounds {
t.Error("should have returned ErrOutOfBounds, but was ", err)
}
_, err = Crop(testImage, 0, -1, 1, 1)
if err != ErrPointOutOfBounds {
t.Error("should have returned ErrOutOfBounds, but was ", err)
}
_, err = Crop(testImage, 0, 0, 0, 1)
if err != ErrDimensionOutOfBounds {
t.Error("should have returned ErrDimensionOutOfBounds, but was ", err)
}
_, err = Crop(testImage, 0, 100, 1, 950)
if err != ErrDimensionOutOfBounds {
t.Error("should have returned ErrDimensionOutOfBounds, but was ", err)
}
res, err := Crop(testImage, 50, 50, 200, 250)
if err != nil {
t.Error(err)
}
bounds := res.Bounds()
assert(t, bounds.Dx(), 200)
assert(t, bounds.Dy(), 250)
}
func TestSlice(t *testing.T) {
const slices = 5
res, err := Slice(testImage, slices)
if err != nil {
t.Error(err)
}
if len(res) != 5 {
t.Errorf("array size was %d instead of 5", len(res))
}
expectedSizeX := testRect.Dx() / slices
expectedSizeY := testRect.Dy() / slices
for _, row := range res {
if len(row) != 5 {
t.Errorf("row size was %d instead of 5", len(row))
}
for _, slice := range row {
bounds := slice.Bounds()
assert(t, bounds.Dx(), expectedSizeX)
assert(t, bounds.Dy(), expectedSizeY)
}
}
}
|
package model
import (
"github.com/astaxie/beego/orm"
)
type UserModel struct {
Id int64 `json:"id" orm:"column(id);pk;auto;unique"`
Phone string `json:"phone" orm:"column(phone);unique;size(11)"`
Nickname string `json:"nickname" orm:"column(nickname);unique;size(40);"`
Password string `json:"-" orm:"column(password);size(40)"`
Created int `json:"created" orm:"column(created)"`
Updated int `json:"-" orm:"column(updated)"`
}
func (u *UserModel) TableName() string {
return "user"
}
func (usr *UserModel) Insert() error{
usr.Phone = "129379"
if _,err:=Select("default").Insert(usr);err != nil{
return err
}
return nil
}
func (usr *UserModel) Read(fields ...string) error {
if err := orm.NewOrm().Read(usr, fields...); err != nil {
return err
}
return nil
}
|
package main
import (
"crypto/tls"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"math"
"net"
"net/http"
"net/url"
"os"
"path"
"strconv"
"strings"
"sync"
"time"
"gonum.org/v1/gonum/mat"
"gonum.org/v1/gonum/optimize"
)
// `http-max-rps` is designed to tell you the maximum rps that
// either an http server or an intermediary can provide. It does
// this using the Universal Scalability Law.
//
// Thanks to @brendantracey for the go playground snippet least squared regression
// code that I borrowed verbatim.
func main() {
var (
address = flag.String("address", "http://localhost:4140", "URL of http server or intermediary")
host = flag.String("host", "", "value of Host header to set")
concurrencyLevels = flag.String("concurrencyLevels", "1,5,10,20,30", "levels of concurrency to test with")
timePerLevel = flag.Duration("timePerLevel", 1*time.Second, "how much time to spend testing each concurrency level")
debug = flag.Bool("debug", false, "print out some extra information for debugging")
)
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage: %s [flags]\n", path.Base(os.Args[0]))
flag.PrintDefaults()
}
flag.Parse()
if *timePerLevel < time.Second {
log.Fatalf("timePerLevel cannot be less than 1 second.")
}
levels := strings.Split(*concurrencyLevels, ",")
var denseLatency [](float64)
for _, l := range levels {
level, err := strconv.Atoi(l)
if err != nil {
log.Fatalf("unknown concurrency level: %s, %s", l, err)
}
throughput := runLoadTests(address, host, level, timePerLevel)
if *debug {
fmt.Printf("%d %d\n", level, throughput)
}
denseLatency = append(denseLatency, float64(level))
denseLatency = append(denseLatency, float64(throughput))
}
latency := mat.NewDense(len(denseLatency)/2, 2, denseLatency)
concurrency := mat.Col(nil, 0, latency)
throughput := mat.Col(nil, 1, latency)
// `f` and `grad` were borrowed from https://play.golang.org/p/wWUH4E5LhP
f := func(x []float64) float64 {
sigma, kappa, lambda := optvarsToGreek(x)
var mismatch float64
for i, N := range concurrency {
pred := concurrencyToThroughput(N, sigma, kappa, lambda)
truth := throughput[i]
mismatch += (pred - truth) * (pred - truth)
}
return mismatch
}
grad := func(grad, x []float64) {
for i := range grad {
grad[i] = 0
}
sigma, kappa, lambda := optvarsToGreek(x)
dSigmaDX, dKappaDX, dLambdaDX := optvarsToGreekDeriv(x)
for i, N := range concurrency {
pred := concurrencyToThroughput(N, sigma, kappa, lambda)
truth := throughput[i]
dMismatchDPred := 2 * (pred - truth)
dPredDSigma, dPredDKappa, dPredDLambda := concurrencyToThroughputDeriv(N, sigma, kappa, lambda)
grad[0] += dMismatchDPred * dPredDSigma * dSigmaDX
grad[1] += dMismatchDPred * dPredDKappa * dKappaDX
grad[2] += dMismatchDPred * dPredDLambda * dLambdaDX
}
}
problem := optimize.Problem{
Func: f,
Grad: grad,
}
settings := optimize.DefaultSettings()
settings.GradientThreshold = 1e-2 // Looser tolerance because using FD derivative
initX := []float64{0, -1, -3} // make sure they all start positive
result, err := optimize.Local(problem, initX, nil, nil)
if err != nil {
fmt.Println("Optimization error:", err)
}
sigmaOpt, kappaOpt, lambdaOpt := optvarsToGreek(result.X)
fmt.Println("sigma (the overhead of contention): ", sigmaOpt)
fmt.Println("kappa (the overhead of crosstalk): ", kappaOpt)
fmt.Println("lambda (unloaded performance): ", lambdaOpt)
if *debug {
for i, v := range throughput {
N := concurrency[i]
pred := concurrencyToThroughput(N, sigmaOpt, kappaOpt, lambdaOpt)
fmt.Println("true", v, "pred", pred)
}
}
maxConcurrency := math.Floor(math.Sqrt((1 - sigmaOpt) / kappaOpt))
fmt.Printf("maxConcurrency: %f\n", maxConcurrency)
maxRps := throughputAtConcurrency(float64(maxConcurrency), kappaOpt, lambdaOpt, sigmaOpt)
fmt.Printf("maxRps: %f\n", maxRps)
}
func exUsage(msg string, args ...interface{}) {
fmt.Fprintln(os.Stderr, fmt.Sprintf(msg, args...))
fmt.Fprintln(os.Stderr, "Try --help for help.")
os.Exit(64)
}
func throughputAtConcurrency(n, kappa, lambda, sigma float64) float64 {
return (lambda * n) / (1 + (sigma * (n - 1)) + (kappa * n * (n - 1)))
}
// These math functions were borrowed from https://play.golang.org/p/wWUH4E5LhP
func optvarsToGreek(x []float64) (sigma, kappa, lambda float64) {
return math.Exp(x[0]), math.Exp(x[1]), math.Exp(x[2])
}
func optvarsToGreekDeriv(x []float64) (dSigmaDX, dKappaDX, dLambdaDX float64) {
return math.Exp(x[0]), math.Exp(x[1]), math.Exp(x[2])
}
func concurrencyToThroughput(concurrency, sigma, kappa, lambda float64) float64 {
N := concurrency
return lambda * N / (1 + sigma*(N-1) + kappa*N*(N-1))
}
func concurrencyToThroughputDeriv(concurrency, sigma, kappa, lambda float64) (dSigma, dKappa, dLambda float64) {
// X(N) = lambda * N / (1 + sigma*(N-1) + kappa*N*(N-1))
N := concurrency
num := lambda * N
denom := 1 + sigma*(N-1) + kappa*N*(N-1)
dSigma = -(num / (denom * denom)) * (N - 1)
dKappa = -(num / (denom * denom)) * (N - 1) * N
dLambda = N / denom
return dSigma, dKappa, dLambda
}
// Converts a slice of chan int to a slice of int.
func chansToSlice(cs []<-chan int, size int) []int {
s := make([]int, size)
for i, c := range cs {
for m := range c {
s[i] = m
}
}
return s
}
func newClient(
compress bool,
https bool,
noreuse bool,
maxConn int,
) *http.Client {
tr := http.Transport{
DisableCompression: !compress,
DisableKeepAlives: noreuse,
MaxIdleConnsPerHost: maxConn,
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 5 * time.Second,
}).Dial,
TLSHandshakeTimeout: 5 * time.Second,
}
if https {
tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
}
return &http.Client{
Timeout: 10 * time.Second,
Transport: &tr,
}
}
func sendRequest(
client *http.Client,
url *url.URL,
host *string,
bodyBuffer []byte,
) error {
req, err := http.NewRequest("GET", url.String(), nil)
req.Close = false
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
fmt.Fprintf(os.Stderr, "\n")
}
if *host != "" {
req.Host = *host
}
response, err := client.Do(req)
if err != nil {
return err
} else {
defer response.Body.Close()
io.CopyBuffer(ioutil.Discard, response.Body, bodyBuffer)
return nil
}
}
// Runs a single load test, returns how many requests were sent in a second.
func runLoadTest(client *http.Client, destURL *url.URL, host *string, wg *sync.WaitGroup, startWg *sync.WaitGroup, timePerLevel *time.Duration) <-chan int {
out := make(chan int, 1)
bodyBuffer := make([]byte, 50000)
go func() {
defer wg.Done()
// Roughly synchronize the start of all our load test goroutines
startWg.Wait()
start := time.Now()
requests := 0
for ; time.Now().Sub(start) <= *timePerLevel; requests++ {
err := sendRequest(client, destURL, host, bodyBuffer)
if err != nil {
// TODO: have an err channel so we can report the # of errs
log.Printf("Error issuing request %v", err)
continue
}
}
rps := requests / int(timePerLevel.Seconds())
out <- rps
close(out)
}()
return out
}
// returns how many requests were sent in one second at concurrencyLevel
func runLoadTests(address *string, host *string, concurrencyLevel int, timePerLevel *time.Duration) int {
// FIXME: wire these options through flags if needed or remove.
client := newClient(false, false, false, concurrencyLevel)
destURL, err := url.Parse(*address)
if err != nil {
exUsage("invalid URL: '%s': %s\n", address, err.Error())
}
var wg sync.WaitGroup
var startWg sync.WaitGroup
// a slice of channels containing throughput per goroutine
var requests []<-chan int
startWg.Add(1)
wg.Add(concurrencyLevel)
for i := 0; i < concurrencyLevel; i++ {
if err != nil {
log.Fatalf("did not connect: %v", err)
}
request := runLoadTest(client, destURL, host, &wg, &startWg, timePerLevel)
requests = append(requests, request)
}
startWg.Done()
wg.Wait()
requestsPerWorker := chansToSlice(requests, concurrencyLevel)
totalRequests := 0
for _, requests := range requestsPerWorker {
totalRequests += requests
}
return totalRequests
}
|
package controllers
import (
"github.com/astaxie/beego"
"gowechatsubscribe/models"
"strconv"
"gowechatsubscribe/dblite"
)
type PoetryController struct {
beego.Controller
}
func (c *PoetryController) Get() {
login := checkAccount(c.Ctx)
c.Data["IsLogin"] = login
if !login {
c.Redirect("/mis/login", 302)
return
}
op := c.Input().Get("op")
switch op {
case "deltag":
id := c.Input().Get("id")
poetryId := c.Input().Get("poetry_id")
beego.Debug("delete poetry tag", id)
if len(id) == 0 {
break
}
tid, err := strconv.ParseInt(id, 10, 32)
if err != nil {
beego.Error(err)
}
err = models.DelPoetryTag(int(tid))
if err != nil {
beego.Error(err)
}
c.Redirect("/mis/poetry/" + poetryId, 301)
return
}
id := c.Ctx.Input.Param(":id")
beego.Info("poetryId:", id)
c.Data["PoetryId"] = id
pid, err := strconv.ParseInt(id, 10, 64)
if err != nil {
beego.Error(err)
}
poetry, err := models.GetPoetry(pid)
if err != nil {
beego.Error(err)
}
poetryTags, err := models.GetPoetryTagState(pid)
if err != nil {
beego.Error(err)
}
if poetry != nil {
poetry.Content = dblite.RenderContent(poetry.Content, "<br/>")
}
c.Data["PoetryTags"] = poetryTags
c.Data["Poetry"] = poetry
c.TplName = "poetry_view.html"
}
func (c *PoetryController) Post() {
login := checkAccount(c.Ctx)
c.Data["IsLogin"] = login
if !login {
c.Redirect("/mis/login", 302)
return
}
// for add tag to poetry
poetryId := c.Input().Get("poetry_id")
tagId := c.Input().Get("tag_id")
bestLines := c.Input().Get("best_lines")
if len(poetryId) == 0 || len(tagId) == 0 {
return
}
pid, err := strconv.ParseInt(poetryId, 10, 64)
tid, err := strconv.ParseInt(tagId, 10, 32)
err = models.SetPoetryTag(pid, int(tid), bestLines)
if err != nil {
beego.Error(err)
}
c.Redirect("/mis/poetry/"+poetryId, 301)
}
|
package iowriter
import (
"fmt"
"io"
"os"
"github.com/popodidi/log"
"github.com/popodidi/log/handlers"
"github.com/popodidi/log/handlers/codec"
)
// Config defines the writer handler config.
type Config struct {
Codec handlers.Codec
Writer io.Writer
}
// Stdout returns a handler that encodes with default codec and writes to
// os.Stdout
func Stdout(color bool) log.CloseHandler {
return &handler{
Config: Config{
Codec: codec.Default(color),
Writer: os.Stdout,
},
}
}
// New returns a writer handler with config.
func New(conf Config) log.CloseHandler {
h := &handler{
Config: conf,
}
if h.Writer == nil {
fmt.Println("no writer found. use os.Stdout")
h.Writer = os.Stdout
}
if h.Codec == nil {
h.Writer.Write( // nolint: errcheck,gosec
[]byte("not codec found. use codec.Default"))
h.Codec = codec.Default(false)
}
return h
}
var _ log.CloseHandler = (*handler)(nil)
type handler struct {
Config
}
func (h *handler) Handle(entry *log.Entry) {
b := h.Codec.Encode(entry)
_, err := h.Writer.Write(b)
if err != nil {
fmt.Println("Failed to write log to writer. err:", err)
}
}
func (h *handler) Close() error {
if closer, ok := h.Writer.(io.Closer); ok {
return closer.Close()
}
return nil
}
|
package blob
import (
"io/ioutil"
"os"
"strings"
"github.com/iotaledger/wasp/packages/hashing"
"github.com/iotaledger/wasp/tools/wasp-cli/config"
"github.com/iotaledger/wasp/tools/wasp-cli/log"
"github.com/spf13/pflag"
)
func InitCommands(commands map[string]func([]string), flags *pflag.FlagSet) {
commands["blob"] = blobCmd
}
var subcmds = map[string]func([]string){
"put": putBlobCmd,
"get": getBlobCmd,
"has": hasBlobCmd,
}
func blobCmd(args []string) {
if len(args) < 1 {
usage()
}
subcmd, ok := subcmds[args[0]]
if !ok {
usage()
}
subcmd(args[1:])
}
func usage() {
cmdNames := make([]string, 0)
for k := range subcmds {
cmdNames = append(cmdNames, k)
}
log.Usage("%s blob [%s]\n", os.Args[0], strings.Join(cmdNames, "|"))
}
func putBlobCmd(args []string) {
if len(args) != 1 {
log.Usage("%s blob put <filename>\n", os.Args[0])
}
data, err := ioutil.ReadFile(args[0])
log.Check(err)
hash, err := config.WaspClient().PutBlob(data)
log.Check(err)
log.Printf("Blob uploaded. Hash: %s\n", hash)
}
func getBlobCmd(args []string) {
if len(args) != 1 {
log.Usage("%s blob get <hash>\n", os.Args[0])
}
hash, err := hashing.HashValueFromBase58(args[0])
log.Check(err)
data, err := config.WaspClient().GetBlob(hash)
log.Check(err)
_, err = os.Stdout.Write(data)
log.Check(err)
}
func hasBlobCmd(args []string) {
if len(args) != 1 {
log.Usage("%s blob has <hash>\n", os.Args[0])
}
hash, err := hashing.HashValueFromBase58(args[0])
log.Check(err)
ok, err := config.WaspClient().HasBlob(hash)
log.Check(err)
log.Printf("%v", ok)
}
|
package main
import (
"manager"
"manager/stmanager"
"flag"
"fmt"
)
func main() {
t := flag.Int("t", 0, "stock list type")
flag.Parse()
switch (*t) {
case 0:
fmt.Println("Get all the stock list")
m := manager.NewStockListManager()
m.Process()
fmt.Println("Stock list run complete!")
case 1:
fmt.Println("Get new stock list")
shm := stmanager.NewSHSEListManager()
shm.Process()
szm := stmanager.NewSZSEListManager()
szm.Process()
fmt.Println("New stock list run complete!")
}
}
|
/*
Copyright 2019 Cornelius Weig.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"bytes"
"context"
"io"
"os"
"os/exec"
"strings"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// UpdateAndCleanUntracked will fetch origin and set HEAD to origin/HEAD
// and also will create a pristine working directory by removing
// untracked files and directories.
func UpdateAndCleanUntracked(ctx context.Context, updateIndex bool, destinationPath string) error {
if !updateIndex {
logrus.Infof("Skipping index update")
return nil
}
if err := git(ctx, destinationPath, "fetch", "origin", "master", "--verbose", "--depth", "1"); err != nil {
return errors.Wrapf(err, "fetch index at %q failed", destinationPath)
}
if err := git(ctx, destinationPath, "reset", "--hard", "@{upstream}"); err != nil {
return errors.Wrapf(err, "reset index at %q failed", destinationPath)
}
err := git(ctx, destinationPath, "clean", "-xfd")
return errors.Wrapf(err, "clean index at %q failed", destinationPath)
}
func git(ctx context.Context, pwd string, args ...string) error {
logrus.Infof("Going to run git %s", strings.Join(args, " "))
cmd := exec.CommandContext(ctx, "git", args...)
cmd.Dir = pwd
buf := bytes.Buffer{}
var w io.Writer = &buf
if logrus.InfoLevel < logrus.GetLevel() {
w = io.MultiWriter(w, os.Stderr)
}
cmd.Stdout, cmd.Stderr = w, w
if err := cmd.Run(); err != nil {
return errors.Wrapf(err, "command execution failure, output=%q", buf.String())
}
return nil
}
|
package davepdf
import "fmt"
func (pdf *Pdf) ImportPage(sourceFile string, pageno int, box string) int {
var tplid int
pdf.fpdi.SetSourceFile(sourceFile)
pdf.fpdi.SetNextObjectID(pdf.n + 1)
tplid = pdf.fpdi.ImportPage(pageno, box)
// write imported objects
for tplName, objId := range pdf.fpdi.PutFormXobjects() {
pdf.tplObjIds[tplName] = objId
}
// write objects
objs := pdf.fpdi.GetImportedObjects()
for i := pdf.n; i < len(objs)+pdf.n; i++ {
if objs[i] != "" {
pdf.newObjId()
pdf.newObj(pdf.n)
pdf.outln(objs[i])
}
}
return tplid
}
func (pdf *Pdf) UseImportedTemplate(tplid int, x, y, w, h float64) {
// get gofpdi template values
tplName, scaleX, scaleY, tX, tY := pdf.fpdi.UseTemplate(tplid, x, y-pdf.h, w, h)
pdf.page.instructions.add(fmt.Sprintf("q 0 J 1 w 0 j 0 G 0 g q %.4F 0 0 %.4F %.4F %.4F cm %s Do Q Q", scaleX, scaleY, tX, tY, tplName), "draw template")
}
|
package main
import (
"daemon"
"errors"
"fmt"
"log"
"net"
"os"
)
func parseCommand(str string) (int, error) {
if str == "get" {
return daemon.CMD_GET, nil
} else if str == "put" {
return daemon.CMD_PUT, nil
} else if str == "pin" {
return daemon.CMD_PIN, nil
} else if str == "unpin" {
return daemon.CMD_UNPIN, nil
} else {
return -1, errors.New("Invalid command")
}
}
func main() {
if len(os.Args) < 3 {
fmt.Printf("Usage: %s <command> <filename>\n", os.Args[0])
return
}
command, err := parseCommand(os.Args[1])
if err != nil {
fmt.Printf("Invalid command entered: %s\n", os.Args[1])
fmt.Printf("Valid commands are: get, put\n")
return
}
arg := os.Args[2]
addr, err := net.ResolveUDPAddr("udp", "127.0.0.1:40000")
conn, err := net.DialUDP("udp", nil, addr)
if err != nil {
fmt.Println(err.Error())
return
}
log.Printf("Sending command: %d, %s\n", command, arg)
daemon.SendCommand(conn, nil, command, arg)
response, err := daemon.ReadResponse(conn)
if err != nil {
fmt.Println(err.Error())
return
}
fmt.Printf("%d, %s\n", response.ResultCode, response.Result)
}
|
package main
import (
"fmt"
)
func main() {
//if x:= 42;x==2 { Didn't run because x not the same
// fmt.Println("007")
//}
if x := 42; x == 42 {
fmt.Println("009")
}
}
|
package main
import (
"encoding/csv"
"fmt"
"os"
"sort"
"sync"
"time"
)
// doing hw 4.6 of CS215
// made good headway learned the csv package, bufio
// bunch of other file opening, manipulations etc
// gonna try to make it rly good with go's concurrency
// This is represent the bipartite graph
type Graph map[string]map[string]int
type Imdb struct {
actor string
score float64
}
type ImdbRanks []Imdb
func (s ImdbRanks) Len() int { return len(s) }
func (s ImdbRanks) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s ImdbRanks) Less(i, j int) bool { return s[i].score < s[j].score }
func g1(ch chan<- int) {
for i := 2; ; i = i + 2 {
ch <- i // Send 'i' to channel 'ch'
fmt.Printf("sending %d from g1\n", i)
}
}
func g2(ch chan<- int) {
for i := 2; ; i = i + 3 {
ch <- i // Send 'i' to channel 'ch'
fmt.Printf("sending %d from g2\n", i)
}
}
func make_link(g Graph, actor, movie string) {
if _, in := g[actor]; !in {
g[actor] = make(map[string]int)
}
g[actor][movie] = 1
if _, in := g[movie]; !in {
g[movie] = make(map[string]int)
}
g[movie][actor] = 1
}
// this is causing problems!!! g is a bipartite graph
func average_centrality(g Graph, node string) (dis float64) {
dis_from_start := map[string]int{node: 0}
open_list := []string{node}
for len(open_list) != 0 {
current := open_list[0]
open_list = open_list[1:]
for neighbor := range g[current] {
if _, ok := dis_from_start[neighbor]; !ok {
dis_from_start[neighbor] = dis_from_start[current] + 1
open_list = append(open_list, neighbor)
}
}
}
for _, v := range dis_from_start {
dis += float64(v)
}
dis = dis / float64(len(dis_from_start))
return
}
func test() float64 {
time.Sleep(100 * time.Millisecond)
return 0.0
}
func main() {
t1 := time.Now()
fp, err := os.Open("/Users/dluna/Downloads/file.tsv")
if err != nil {
fmt.Println(err.Error())
}
defer fp.Close()
r := csv.NewReader(fp)
r.Comma, r.FieldsPerRecord = '\t', 3
all_recs, err := r.ReadAll() // gives a 2d slice of the file r, each slice is a line split into 3 sections
if err != nil {
fmt.Println(err.Error())
}
g := make(Graph)
actors := make(map[string]bool) // create a map of actors, used later on to compute centralities
for _, line := range all_recs {
make_link(g, line[0], line[1]+" "+line[2])
actors[line[0]] = true
}
// make the slice, ch and wg
top20k := make(ImdbRanks, len(actors))
wg := &sync.WaitGroup{}
i := 0
// find centrality concurrently
for node, _ := range actors {
wg.Add(1)
go func(arr ImdbRanks, i int, node string) {
defer wg.Done()
sc := average_centrality(g, node)
// sc := test() // func to test is sleep blocks, this is not the case
arr[i] = Imdb{node, sc}
}(top20k, i, node)
i++
// when running average_centrality it shows 3 goroutines up at the same time
// when running test() number goes up all the way to len(actors)
// fmt.Println(wg, i, runtime.NumGoroutine())
}
wg.Wait()
// unload all the Imdb into the array
// for i := 0; i < len(actors); i++ {
// top20k[i] = <-ch
// fmt.Println(top20k[i])
// }
sort.Sort(top20k)
for i := 0; i < 20; i++ {
fmt.Println(top20k[i].actor, top20k[i].score)
}
fmt.Println(time.Since(t1))
}
|
package gfuns
import (
"bytes"
"encoding/json"
"fmt"
"log"
"os"
"os/exec"
"path/filepath"
"strconv"
)
func Split(url string, ffprobe FFprobe) ([]string, string, error) {
dir, _ := createDir(ffprobe.Format.Filename)
message, err := ffmpeg("-y", "-v", "error", "-i", url, "-f", "segment", "-codec:", "copy", "-segment_time", "60", dir+"/"+getBaseName(GetName(ffprobe.Format.Filename))+"_%d"+filepath.Ext(GetName(url)))
return walk(dir), message, err
}
func Probe(url string) (FFprobe, error) {
cmd := exec.Command(os.TempDir()+"/ffprobe", "-v", "quiet", "-print_format", "json", "-show_error", "-show_format", "-show_streams", url)
fmt.Println(cmd)
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
log.Fatal(err)
return FFprobe{}, err
}
var app FFprobe
return app, json.Unmarshal(out.Bytes(), &app)
}
func ffmpeg(arg ...string) (string, error) {
cmd := exec.Command(os.TempDir()+"/ffmpeg", arg...)
fmt.Println(cmd)
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
err := cmd.Run()
if err != nil {
log.Fatal(stderr.String())
return stderr.String(), err
}
return out.String(), nil
}
func Mp4(url string, gcsEvent GCSEvent) ([]string, string, error) {
dir, _ := createDir(gcsEvent.Pattern + "/" + GetName(gcsEvent.Name))
message, err := ffmpeg("-y", "-v", "error", "-i", url, "-f", "mp4", "-s", strconv.FormatInt(int64(gcsEvent.Resolution.Width), 10)+"x"+strconv.FormatInt(int64(gcsEvent.Resolution.Height), 10), dir+"/"+getBaseName(GetName(gcsEvent.Name))+".mp4")
return walk(dir), message, err
}
func Png(url string, gcsEvent GCSEvent) ([]string, string, error) {
dir, _ := createDir(gcsEvent.Pattern + "/" + GetName(gcsEvent.Name))
message, err := ffmpeg("-y", "-v", "error", "-i", url, "-vframes", "1", "-vf", "select='gte(n\\,10)',scale=320:-1", "-ss", "00:00:10", dir+"/"+GetRealName(gcsEvent.Name)+".png")
return walk(dir), message, err
}
func Jpeg(url string, gcsEvent GCSEvent) ([]string, string, error) {
dir, _ := createDir(gcsEvent.Pattern + "/" + GetName(gcsEvent.Name))
message, err := ffmpeg("-y", "-v", "error", "-i", url, "-vf", "select='gte(n\\,10)',scale=144:-1", dir+"/"+GetRealName(gcsEvent.Name)+"_%d.jpeg")
return walk(dir), message, err
}
func Webp(url string, gcsEvent GCSEvent) ([]string, string, error) {
dir, _ := createDir(gcsEvent.Pattern + "/" + GetName(gcsEvent.Name))
message, err := ffmpeg("-y", "-v", "error", "-i", url, "-loop", "0", "-vf", "select='gte(n\\,10)',scale=320:-1", "-ss", "00:00:10", "-t", "00:00:03", dir+"/"+GetRealName(gcsEvent.Name)+".webp")
return walk(dir), message, err
}
func Hls(url string, gcsEvent GCSEvent) ([]string, string, error) {
dir, _ := createDir(gcsEvent.Pattern + "/" + GetName(gcsEvent.Name))
urls := generateMultipleUrl("asrevo-video", List(url))
write(dir+"/files.txt", urls)
name := GetRealName(gcsEvent.Name)
message, err := ffmpeg("-y", "-v", "error", "-safe", "0", "-protocol_whitelist", "file,http,https,tcp,tls,concat,crypto", "-f", "concat", "-i", dir+"/files.txt", "-hls_segment_type", "mpegts", "-f", "hls", "-codec:", "copy", "-start_number", "0", "-hls_time", "2", "-hls_list_size", "0", "-hls_enc", "1", "-hls_enc_key", gcsEvent.Meta.Key, "-hls_enc_key_url", dir+"/"+name+".key", "-hls_enc_iv", gcsEvent.Meta.Iv, "-master_pl_name", name+".m3u8", dir+"/"+name+"_"+".m3u8")
return walk(dir), message, err
}
|
package main
import (
"fmt"
"log"
"time"
tm "github.com/buger/goterm"
"github.com/goburrow/modbus"
)
var normal chan bool
func main() {
normal = make(chan bool, 1)
SlaveId := byte(1)
addr := uint16(0x59)
// handler := modbus.NewTCPClientHandler("127.0.0.1:502")
handler := modbus.NewRTUClientHandler("/dev/ttyUSB0")
handler.BaudRate = 9600
handler.DataBits = 8
handler.Parity = "N"
handler.StopBits = 2
handler.SlaveId = SlaveId
handler.Timeout = 1 * time.Second
err := handler.Connect()
if err != nil {
fmt.Println(err)
}
defer handler.Close()
client := modbus.NewClient(handler)
for {
var output string
results, err := client.ReadHoldingRegisters(addr, 6)
if err != nil {
fmt.Println(err)
break
}
year := (int32(results[0])<<8 | int32(results[1]))
month := (int32(results[2])<<8 | int32(results[3]))
date := (int32(results[4])<<8 | int32(results[5]))
hour := (int32(results[6])<<8 | int32(results[7]))
minute := (int32(results[8])<<8 | int32(results[9]))
second := (int32(results[10])<<8 | int32(results[11]))
output = fmt.Sprintf("%d-%02d-%02d %02d:%02d:%02d", year, month, date, hour, minute, second)
results, err = client.ReadHoldingRegisters(0x01, 1)
if err != nil {
log.Println(err)
break
}
freq := (int32(results[0])<<8 | int32(results[1]))
output += fmt.Sprintf("\n Freq : %0.2f Hz", float64(freq)/100)
results, err = client.ReadHoldingRegisters(0x02, 2)
if err != nil {
log.Println(err)
break
}
avgU := (int32(results[0])<<24 | int32(results[1])<<16 | int32(results[2])<<8 | int32(results[3]))
output += fmt.Sprintf("\n U avg : %0.1f V", float64(avgU)/10)
results, err = client.ReadHoldingRegisters(0x04, 2)
if err != nil {
log.Println(err)
break
}
avgLU := (int32(results[0])<<24 | int32(results[1])<<16 | int32(results[2])<<8 | int32(results[3]))
output += fmt.Sprintf("\n UL avg : %0.1f V", float64(avgLU)/10)
results, err = client.ReadHoldingRegisters(0x06, 2)
if err != nil {
log.Println(err)
break
}
avgI := (int32(results[0])<<24 | int32(results[1])<<16 | int32(results[2])<<8 | int32(results[3]))
output += fmt.Sprintf("\n I avg : %0.3f A", float64(avgI)/1000)
results, err = client.ReadHoldingRegisters(0x08, 2)
if err != nil {
log.Println(err)
break
}
In := (int32(results[0])<<24 | int32(results[1])<<16 | int32(results[2])<<8 | int32(results[3]))
output += fmt.Sprintf("\n In : %0.3f A", float64(In)/1000)
results, err = client.ReadHoldingRegisters(0x0A, 2)
if err != nil {
log.Println(err)
break
}
Psum := (int32(results[0])<<24 | int32(results[1])<<16 | int32(results[2])<<8 | int32(results[3]))
output += fmt.Sprintf("\n Psum : %d W", Psum)
results, err = client.ReadHoldingRegisters(0x0C, 2)
if err != nil {
log.Println(err)
break
}
Qsum := (int32(results[0])<<24 | int32(results[1])<<16 | int32(results[2])<<8 | int32(results[3]))
output += fmt.Sprintf("\n Qsum : %d VAR", Qsum)
results, err = client.ReadHoldingRegisters(0x0C, 2)
if err != nil {
log.Println(err)
break
}
Ssum := (int32(results[0])<<24 | int32(results[1])<<16 | int32(results[2])<<8 | int32(results[3]))
output += fmt.Sprintf("\n Ssum : %d VA", Ssum)
results, err = client.ReadHoldingRegisters(0x10, 2)
if err != nil {
log.Println(err)
break
}
PFavg := (int32(results[0])<<24 | int32(results[1])<<16 | int32(results[2])<<8 | int32(results[3]))
output += fmt.Sprintf("\n PF avg : %.3f VA", float64(PFavg)/1000)
results, err = client.ReadHoldingRegisters(0x12, 2)
if err != nil {
log.Println(err)
break
}
ea := (int32(results[0])<<24 | int32(results[1])<<16 | int32(results[2])<<8 | int32(results[3]))
output += fmt.Sprintf("\n Ea : %0.1f kWh", float64(ea)/10)
// results, err = client.ReadHoldingRegisters(0x14, 2)
// if err != nil {
// log.Println(err)
// break
// }
// er := (int32(results[0])<<24 | int32(results[1])<<16 | int32(results[2])<<8 | int32(results[3]))
// log.Printf("Er : %0.1f kVARh", float64(er)/10)
// results, err = client.ReadHoldingRegisters(0x16, 2)
// if err != nil {
// log.Println(err)
// break
// }
// cost := (int32(results[0])<<24 | int32(results[1])<<16 | int32(results[2])<<8 | int32(results[3]))
// log.Printf("Cost : %0.1f ฿", float64(cost)/10)
// results, err = client.ReadHoldingRegisters(0x18, 2)
// if err != nil {
// log.Println(err)
// break
// }
// co2 := (int32(results[0])<<24 | int32(results[1])<<16 | int32(results[2])<<8 | int32(results[3]))
// log.Printf("CO2 : %0.1f kg", float64(co2)/10)
tm.Clear() // Clear current screen
tm.MoveCursor(1, 1)
tm.Print(output)
tm.Flush() // Call it every time at the end of rendering
// time.Sleep(time.Second)
}
}
|
package main
import (
"os"
"fmt"
"path/filepath"
)
type Student struct {
Age int
Name string
}
type my_string string
func fs(str string) (r_str string) {
str+=str+"sh"
r_str=str+"ce"
return r_str
}
func main(){
fmt.Println(os.Args)
fmt.Println(len(os.Args))
fmt.Println(filepath.Base(os.Args[0]))
fmt.Println(filepath.FromSlash("\\name\\file\\"))
student1 :=Student{}
var student2 Student
fmt.Println(student1)
fmt.Println(student2)
str:="hellow"
fmt.Println(str)
fmt.Println(len(str))
str+=" world"
fmt.Println(str)
fmt.Println(len(str))
str+=" li_xiaolong"
fmt.Println(str)
fmt.Println(len(str))
r_str:=fs(str)
fmt.Println(str)
fmt.Println(r_str)
a:=237
fmt.Printf("%p,%#p,%x,%#x,%X,%#X",&a,&a,a,a,a,a)
fmt.Printf("%p,%#p,%x,%#x,%X,%#X",&a,&a,a,a,a,a)
b:=new(int)
c:=&[]int{10,11}
fmt.Println()
fmt.Println(b)
fmt.Printf("%p",c)
a1:=[]int{1,2,3,4}
a2:=[...]int{1,2,3,4}
fmt.Printf("\n%T\n",a1)
fmt.Println(len(a1),cap(a1),a1)
fmt.Printf("\n%T\n",a2)
fmt.Println(len(a2),cap(a2),a2)
str1:="abcdefg"
for i := range str1 {
fmt.Println(i)
}
for _,i := range str1 {
fmt.Println(i)
}
sts := []Student{{10,"abc"},{20,"def"},{30,"ghi"}}
for _,i := range sts {
fmt.Println(i)
}
sts1 := []*Student{{10,"abc"},{20,"def"},{30,"ghi"}}
for _,i := range sts1 {
fmt.Println(i)
}
sts2 := []*Student{&student1,&student2}
for _,i := range sts2 {
fmt.Println(i)
}
var m_str my_string = "test"
fmt.Println(m_str)
m_str=fs(m_str)
fmt.Println(m_str)
}
|
/*
* Copyright 2017 StreamSets Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package common
import (
"github.com/streamsets/datacollector-edge/api"
"github.com/streamsets/datacollector-edge/api/validation"
)
type BaseStage struct {
stageContext api.StageContext
}
func (b *BaseStage) GetStageContext() api.StageContext {
return b.stageContext
}
func (b *BaseStage) Init(stageContext api.StageContext) []validation.Issue {
issues := make([]validation.Issue, 0)
b.stageContext = stageContext
return issues
}
func (b *BaseStage) Destroy() error {
//No OP Destroy
return nil
}
func (b *BaseStage) GetStageConfig() *StageConfiguration {
return b.stageContext.(*StageContextImpl).StageConfig
}
|
package segtree
/*
Verified:
RMQ: https://onlinejudge.u-aizu.ac.jp/solutions/problem/DSL_2_A/review/5807307/numacci/Go
RSQ: https://onlinejudge.u-aizu.ac.jp/solutions/problem/DSL_2_B/review/5805828/numacci/Go
*/
// SegTree can be used for RMQ and RSQ with Update operation, not Add.
// So if we need to add x to a[k] when updating the segment tree,
// we need to modify the Update function or use lazy segment tree struct.
type SegTree struct {
Size int
Ex X // identity element of monoid X: (x,e)=(e,x)=x
Fx func(a, b X) X // binary operation of monoid X: (X,X)->X
Dat []X
}
// X used for generics and X corresponds to the monoid of this segment tree.
// If we need to construct the float segment tree, change the type of val to float64.
type X struct {
val int
}
// NewSegTree is a constructor of SegTree. ex is the identity element of monoid X.
// fx is the binary operation of monoid X; e.g. Min, Max, Add.
func NewSegTree(n int, ex X, fx func(X, X) X) *SegTree {
sg := &SegTree{
Size: 1,
Ex: ex,
Fx: fx,
}
for sg.Size < n {
sg.Size *= 2
}
// construct segment tree
sg.Dat = make([]X, 2*sg.Size)
for i := range sg.Dat {
sg.Dat[i] = sg.Ex
}
return sg
}
// Set and Build used for bulk construction in O(n)
func (sg *SegTree) Set(k int, x X) {
sg.Dat[k+sg.Size-1] = x
}
func (sg *SegTree) Build() {
for k := sg.Size - 2; k >= 0; k-- {
sg.Dat[k] = sg.Fx(sg.Dat[2*k+1], sg.Dat[2*k+2])
}
}
// Update updates a[k] to x
func (sg *SegTree) Update(k int, x X) {
// update itself
k += sg.Size - 1
sg.Dat[k] = x
// propagate to its parent
for k > 0 {
k = (k - 1) / 2
sg.Dat[k] = sg.Fx(sg.Dat[2*k+1], sg.Dat[2*k+2])
}
}
// Query returns the query result of [s,t) defined by its binary operation Fx
func (sg *SegTree) Query(s, t int) X {
return sg.query(s, t, 0, 0, sg.Size)
}
func (sg *SegTree) query(s, t, k, l, r int) X {
if r <= s || t <= l {
return sg.Ex
}
if s <= l && r <= t {
return sg.Dat[k]
}
vl := sg.query(s, t, 2*k+1, l, (l+r)/2)
vr := sg.query(s, t, 2*k+2, (l+r)/2, r)
return sg.Fx(vl, vr)
}
|
package output
type Outputs []*Output
func (Outputs Outputs) Params() (params []string) {
for _, output := range Outputs {
params = append(params, output.Params()...)
}
return
}
|
//go:generate mockgen -destination=./mock/types_mock.go os FileInfo
package os
|
/*
* Copyright (c) 2020. Ant Group. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*/
package stargz
import "testing"
func Test_digest_Sha256(t *testing.T) {
tests := []struct {
name string
d digest
want string
}{
{
name: "testdigest",
d: digest("sha256:12345"),
want: "12345",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.d.Sha256(); got != tt.want {
t.Errorf("Sha256() = %v, want %v", got, tt.want)
}
})
}
}
|
package crypto_test
import (
"crypto-performance-compare/crypto"
"crypto-performance-compare/fakes"
"crypto-performance-compare/utils"
"fmt"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"net/http"
"net/http/httptest"
"os"
"testing"
)
func TestUpdater(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, fakes.SuccessResponse)
}))
os.Setenv("BASE_URL", ts.URL)
os.Setenv("TRACK_LIST", "BTC")
logger := utils.NewLogger()
cache := crypto.NewCache()
updater := crypto.NewUpdater(logger, cache)
err := updater.UpdateAll()
require.NoError(t, err)
items := utils.GetCurrencies()
for _, item := range items {
res, err := cache.Read(item)
require.NoError(t, err)
assert.NotEmpty(t, res)
}
res2, err := cache.Read("ETH")
assert.Error(t, err)
assert.Empty(t, res2)
}
|
package attacher
import (
"errors"
"fmt"
"net"
"strings"
"github.com/Huawei/eSDK_K8S_Plugin/src/proto"
"github.com/Huawei/eSDK_K8S_Plugin/src/storage/oceanstor/client"
"github.com/Huawei/eSDK_K8S_Plugin/src/utils"
"github.com/Huawei/eSDK_K8S_Plugin/src/utils/log"
)
type AttacherPlugin interface {
ControllerAttach(string, map[string]interface{}) (string, error)
ControllerDetach(string, map[string]interface{}) (string, error)
NodeStage(string, map[string]interface{}) (string, error)
NodeUnstage(string, map[string]interface{}) error
getTargetISCSIPortals() ([]string, error)
getTargetRoCEPortals() ([]string, error)
}
type Attacher struct {
cli *client.Client
protocol string
invoker string
portals []string
alua map[string]interface{}
}
func NewAttacher(
product string,
cli *client.Client,
protocol, invoker string,
portals []string,
alua map[string]interface{}) AttacherPlugin {
switch product {
case "DoradoV6":
return newDoradoV6Attacher(cli, protocol, invoker, portals, alua)
default:
return newOceanStorAttacher(cli, protocol, invoker, portals, alua)
}
}
func (p *Attacher) getHostName(postfix string) string {
host := fmt.Sprintf("k8s_%s", postfix)
if len(host) <= 31 {
return host
}
return host[:31]
}
func (p *Attacher) getHostGroupName(postfix string) string {
return fmt.Sprintf("k8s_%s_hostgroup_%s", p.invoker, postfix)
}
func (p *Attacher) getLunGroupName(postfix string) string {
return fmt.Sprintf("k8s_%s_lungroup_%s", p.invoker, postfix)
}
func (p *Attacher) getMappingName(postfix string) string {
return fmt.Sprintf("k8s_%s_mapping_%s", p.invoker, postfix)
}
func (p *Attacher) getHost(parameters map[string]interface{}, toCreate bool) (map[string]interface{}, error) {
var err error
hostname, exist := parameters["HostName"].(string)
if !exist {
hostname, err = utils.GetHostName()
if err != nil {
log.Errorf("Get hostname error: %v", err)
return nil, err
}
}
hostToQuery := p.getHostName(hostname)
host, err := p.cli.GetHostByName(hostToQuery)
if err != nil {
log.Errorf("Get host %s error: %v", hostToQuery, err)
return nil, err
}
if host == nil && toCreate {
host, err = p.cli.CreateHost(hostToQuery)
if err != nil {
log.Errorf("Create host %s error: %v", hostToQuery, err)
return nil, err
}
}
if host != nil {
return host, nil
}
if toCreate {
return nil, fmt.Errorf("cannot create host %s", hostToQuery)
}
return nil, nil
}
func (p *Attacher) createMapping(hostID string) (string, error) {
mappingName := p.getMappingName(hostID)
mapping, err := p.cli.GetMappingByName(mappingName)
if err != nil {
log.Errorf("Get mapping by name %s error: %v", mappingName, err)
return "", err
}
if mapping == nil {
mapping, err = p.cli.CreateMapping(mappingName)
if err != nil {
log.Errorf("Create mapping %s error: %v", mappingName, err)
return "", err
}
}
return mapping["ID"].(string), nil
}
func (p *Attacher) createHostGroup(hostID, mappingID string) error {
var err error
var hostGroup map[string]interface{}
var hostGroupID string
hostGroupsByHostID, err := p.cli.QueryAssociateHostGroup(21, hostID)
if err != nil {
log.Errorf("Query associated hostgroups of host %s error: %v", hostID, err)
return err
}
hostGroupName := p.getHostGroupName(hostID)
for _, i := range hostGroupsByHostID {
group := i.(map[string]interface{})
if group["NAME"].(string) == hostGroupName {
hostGroupID = group["ID"].(string)
goto Add_TO_MAPPING
}
}
hostGroup, err = p.cli.GetHostGroupByName(hostGroupName)
if err != nil {
log.Errorf("Get hostgroup by name %s error: %v", hostGroupName, err)
return err
}
if hostGroup == nil {
hostGroup, err = p.cli.CreateHostGroup(hostGroupName)
if err != nil {
log.Errorf("Create hostgroup %s error: %v", hostGroupName, err)
return err
}
}
hostGroupID = hostGroup["ID"].(string)
err = p.cli.AddHostToGroup(hostID, hostGroupID)
if err != nil {
log.Errorf("Add host %s to hostgroup %s error: %v", hostID, hostGroupID, err)
return err
}
Add_TO_MAPPING:
hostGroupsByMappingID, err := p.cli.QueryAssociateHostGroup(245, mappingID)
if err != nil {
log.Errorf("Query associated hostgroups of mapping %s error: %v", mappingID, err)
return err
}
for _, i := range hostGroupsByMappingID {
group := i.(map[string]interface{})
if group["NAME"].(string) == hostGroupName {
return nil
}
}
err = p.cli.AddGroupToMapping(14, hostGroupID, mappingID)
if err != nil {
log.Errorf("Add hostgroup %s to mapping %s error: %v", hostGroupID, mappingID, err)
return err
}
return nil
}
func (p *Attacher) createLunGroup(lunID, hostID, mappingID string) error {
var err error
var lunGroup map[string]interface{}
var lunGroupID string
lunGroupsByLunID, err := p.cli.QueryAssociateLunGroup(11, lunID)
if err != nil {
log.Errorf("Query associated lungroups of lun %s error: %v", lunID, err)
return err
}
lunGroupName := p.getLunGroupName(hostID)
for _, i := range lunGroupsByLunID {
group := i.(map[string]interface{})
if group["NAME"].(string) == lunGroupName {
lunGroupID = group["ID"].(string)
goto Add_TO_MAPPING
}
}
lunGroup, err = p.cli.GetLunGroupByName(lunGroupName)
if err != nil {
log.Errorf("Get lungroup by name %s error: %v", lunGroupName, err)
return err
}
if lunGroup == nil {
lunGroup, err = p.cli.CreateLunGroup(lunGroupName)
if err != nil {
log.Errorf("Create lungroup %s error: %v", lunGroupName, err)
return err
}
}
lunGroupID = lunGroup["ID"].(string)
err = p.cli.AddLunToGroup(lunID, lunGroupID)
if err != nil {
log.Errorf("Add lun %s to group %s error: %v", lunID, lunGroupID, err)
return err
}
Add_TO_MAPPING:
lunGroupsByMappingID, err := p.cli.QueryAssociateLunGroup(245, mappingID)
if err != nil {
log.Errorf("Query associated lungroups of mapping %s error: %v", mappingID, err)
return err
}
for _, i := range lunGroupsByMappingID {
group := i.(map[string]interface{})
if group["NAME"].(string) == lunGroupName {
return nil
}
}
err = p.cli.AddGroupToMapping(256, lunGroupID, mappingID)
if err != nil {
log.Errorf("Add lungroup %s to mapping %s error: %v", lunGroupID, mappingID, err)
return err
}
return nil
}
func (p *Attacher) needUpdateInitiatorAlua(initiator map[string]interface{}) bool {
if p.alua == nil {
return false
}
multiPathType, ok := p.alua["MULTIPATHTYPE"]
if !ok {
return false
}
if multiPathType != initiator["MULTIPATHTYPE"] {
return true
} else if initiator["MULTIPATHTYPE"] == MULTIPATHTYPE_DEFAULT {
return false
}
failoverMode, ok := p.alua["FAILOVERMODE"]
if ok && failoverMode != initiator["FAILOVERMODE"] {
return true
}
specialModeType, ok := p.alua["SPECIALMODETYPE"]
if ok && specialModeType != initiator["SPECIALMODETYPE"] {
return true
}
pathType, ok := p.alua["PATHTYPE"]
if ok && pathType != initiator["PATHTYPE"] {
return true
}
return false
}
func (p *Attacher) getTargetISCSIPortals() ([]string, error) {
ports, err := p.cli.GetIscsiTgtPort()
if err != nil {
log.Errorf("Get ISCSI tgt port error: %v", err)
return nil, err
}
if ports == nil {
msg := "No ISCSI tgt port exist"
log.Errorln(msg)
return nil, errors.New(msg)
}
validIPs := map[string]bool{}
for _, i := range ports {
port := i.(map[string]interface{})
portID := port["ID"].(string)
portIqn := strings.Split(portID, ",")[0]
splitIqn := strings.Split(portIqn, ":")
if len(splitIqn) < 6 {
continue
}
validIPs[splitIqn[5]] = true
}
var availablePortals []string
for _, portal := range p.portals {
ip := net.ParseIP(portal).String()
if !validIPs[ip] {
log.Warningf("ISCSI portal %s is not valid", ip)
continue
}
availablePortals = append(availablePortals, ip)
}
if availablePortals == nil {
msg := fmt.Sprintf("All config portal %s is not valid", p.portals)
log.Errorln(msg)
return nil, errors.New(msg)
}
return availablePortals, nil
}
func (p *Attacher) getTargetRoCEPortals() ([]string, error) {
var availablePortals []string
for _, portal := range p.portals {
ip := net.ParseIP(portal).String()
rocePortal, err := p.cli.GetRoCEPortalByIP(ip)
if err != nil {
log.Errorf("Get RoCE tgt portal error: %v", err)
return nil, err
}
if rocePortal == nil {
log.Warningf("the config portal %s does not exit.", ip)
continue
}
supportProtocol, exist := rocePortal["SUPPORTPROTOCOL"].(string)
if !exist {
msg := "current storage does not support NVMe"
log.Errorln(msg)
return nil, errors.New(msg)
}
if supportProtocol != "64" { // 64 means NVME protocol
log.Warningf("the config portal %s does not support NVME.", ip)
continue
}
availablePortals = append(availablePortals, ip)
}
if availablePortals == nil {
msg := fmt.Sprintf("All config portal %s is not valid", p.portals)
log.Errorln(msg)
return nil, errors.New(msg)
}
return availablePortals, nil
}
func (p *Attacher) attachISCSI(hostID string) (map[string]interface{}, error) {
name, err := proto.GetISCSIInitiator()
if err != nil {
log.Errorf("Get ISCSI initiator name error: %v", name)
return nil, err
}
initiator, err := p.cli.GetIscsiInitiator(name)
if err != nil {
log.Errorf("Get ISCSI initiator %s error: %v", name, err)
return nil, err
}
if initiator == nil {
initiator, err = p.cli.AddIscsiInitiator(name)
if err != nil {
log.Errorf("Add initiator %s error: %v", name, err)
return nil, err
}
}
isFree, freeExist := initiator["ISFREE"].(string)
parent, parentExist := initiator["PARENTID"].(string)
if freeExist && isFree == "true" {
err := p.cli.AddIscsiInitiatorToHost(name, hostID)
if err != nil {
log.Errorf("Add ISCSI initiator %s to host %s error: %v", name, hostID, err)
return nil, err
}
} else if parentExist && parent != hostID {
msg := fmt.Sprintf("ISCSI initiator %s is already associated to another host %s", name, parent)
log.Errorln(msg)
return nil, errors.New(msg)
}
return initiator, nil
}
func (p *Attacher) attachFC(hostID string) ([]map[string]interface{}, error) {
fcInitiators, err := proto.GetFCInitiator()
if err != nil {
log.Errorf("Get fc initiator error: %v", err)
return nil, err
}
var addWWNs []string
var hostInitiators []map[string]interface{}
for _, wwn := range fcInitiators {
initiator, err := p.cli.GetFCInitiator(wwn)
if err != nil {
log.Errorf("Get FC initiator %s error: %v", wwn, err)
return nil, err
}
if initiator == nil {
log.Warningf("FC initiator %s does not exist", wwn)
continue
}
status, exist := initiator["RUNNINGSTATUS"].(string)
if !exist || status != "27" {
log.Warningf("FC initiator %s is not online", wwn)
continue
}
isFree, freeExist := initiator["ISFREE"].(string)
parent, parentExist := initiator["PARENTID"].(string)
if freeExist && isFree == "true" {
addWWNs = append(addWWNs, wwn)
} else if parentExist && parent != hostID {
msg := fmt.Sprintf("FC initiator %s is already associated to another host %s", wwn, parent)
log.Errorln(msg)
return nil, errors.New(msg)
}
hostInitiators = append(hostInitiators, initiator)
}
for _, wwn := range addWWNs {
err := p.cli.AddFCInitiatorToHost(wwn, hostID)
if err != nil {
log.Errorf("Add initiator %s to host %s error: %v", wwn, hostID, err)
return nil, err
}
}
return hostInitiators, nil
}
func (p *Attacher) attachRoCE(hostID string) (map[string]interface{}, error) {
name, err := proto.GetRoCEInitiator()
if err != nil {
log.Errorf("Get RoCE initiator name error: %v", name)
return nil, err
}
initiator, err := p.cli.GetRoCEInitiator(name)
if err != nil {
log.Errorf("Get RoCE initiator %s error: %v", name, err)
return nil, err
}
if initiator == nil {
initiator, err = p.cli.AddRoCEInitiator(name)
if err != nil {
log.Errorf("Add initiator %s error: %v", name, err)
return nil, err
}
}
isFree, freeExist := initiator["ISFREE"].(string)
parent, parentExist := initiator["PARENTID"].(string)
if freeExist && isFree == "true" {
err := p.cli.AddRoCEInitiatorToHost(name, hostID)
if err != nil {
log.Errorf("Add RoCE initiator %s to host %s error: %v", name, hostID, err)
return nil, err
}
} else if parentExist && parent != hostID {
msg := fmt.Sprintf("RoCE initiator %s is already associated to another host %s", name, parent)
log.Errorln(msg)
return nil, errors.New(msg)
}
return initiator, nil
}
func (p *Attacher) doMapping(hostID, lunName string) (string, error) {
lun, err := p.cli.GetLunByName(lunName)
if err != nil {
log.Errorf("Get lun %s error: %v", lunName, err)
return "", err
}
if lun == nil {
msg := fmt.Sprintf("Lun %s not exist for attaching", lunName)
log.Errorln(msg)
return "", errors.New(msg)
}
lunID := lun["ID"].(string)
mappingID, err := p.createMapping(hostID)
if err != nil {
log.Errorf("Create mapping for host %s error: %v", hostID, err)
return "", err
}
err = p.createHostGroup(hostID, mappingID)
if err != nil {
log.Errorf("Create host group for host %s error: %v", hostID, err)
return "", err
}
err = p.createLunGroup(lunID, hostID, mappingID)
if err != nil {
log.Errorf("Create lun group for host %s error: %v", hostID, err)
return "", err
}
lunUniqueId, err := utils.GetLunUniqueId(p.protocol, lun)
if err != nil {
return "", err
}
return lunUniqueId, nil
}
func (p *Attacher) doUnmapping(hostID, lunName string) (string, error) {
lun, err := p.cli.GetLunByName(lunName)
if err != nil {
log.Errorf("Get lun %s info error: %v", lunName, err)
return "", err
}
if lun == nil {
log.Infof("LUN %s doesn't exist while detaching", lunName)
return "", nil
}
lunID := lun["ID"].(string)
lunGroupsByLunID, err := p.cli.QueryAssociateLunGroup(11, lunID)
if err != nil {
log.Errorf("Query associated lungroups of lun %s error: %v", lunID, err)
return "", err
}
lunGroupName := p.getLunGroupName(hostID)
for _, i := range lunGroupsByLunID {
group := i.(map[string]interface{})
if group["NAME"].(string) == lunGroupName {
lunGroupID := group["ID"].(string)
err = p.cli.RemoveLunFromGroup(lunID, lunGroupID)
if err != nil {
log.Errorf("Remove lun %s from group %s error: %v", lunID, lunGroupID, err)
return "", err
}
}
}
lunUniqueId, err := utils.GetLunUniqueId(p.protocol, lun)
if err != nil {
return "", err
}
return lunUniqueId, nil
}
func (p *Attacher) NodeUnstage(lunName string, parameters map[string]interface{}) error {
wwn, err := p.ControllerDetach(lunName, parameters)
if err != nil {
return err
}
if wwn == "" {
log.Warningf("Cannot get WWN of LUN %s, the dev may leftover", lunName)
return nil
}
return disConnectVolume(wwn, p.protocol)
}
func (p *Attacher) ControllerDetach(lunName string, parameters map[string]interface{}) (string, error) {
host, err := p.getHost(parameters, false)
if err != nil {
log.Infof("Get host ID error: %v", err)
return "", err
}
if host == nil {
log.Infof("Host doesn't exist while detaching %s", lunName)
return "", nil
}
hostID := host["ID"].(string)
wwn, err := p.doUnmapping(hostID, lunName)
if err != nil {
log.Errorf("Unmapping LUN %s from host %s error: %v", lunName, hostID, err)
return "", err
}
return wwn, nil
}
|
package goSolution
func minCostClimbingStairs(cost []int) int {
cost0 := cost[0]
cost1 := cost[1]
for _, c := range cost[2:] {
t := cost1
cost1 = c + min(t, cost0)
cost0 = t
}
return min(cost1, cost0)
}
|
package manifestgen
import (
"github.com/iLLeniumStudios/FiveMCarsMerger/pkg/dft"
"github.com/iLLeniumStudios/FiveMCarsMerger/pkg/flags"
log "github.com/sirupsen/logrus"
"io/ioutil"
"os"
"strings"
"text/template"
)
type Manifest struct {
HasCarcols bool
HasCarvariations bool
HasContentUnlocks bool
HasHandling bool
HasVehicleLayouts bool
HasVehicleModelsets bool
HasVehicles bool
HasWeaponsFile bool
}
type Generator interface {
Generate() error
}
type generator struct {
Flags flags.Flags
}
func New(_flags flags.Flags) Generator {
return &generator{Flags: _flags}
}
func (g *generator) Generate() error {
tmpl, err := template.New("manifestTemplate").Parse(manifestTemplate)
if err != nil {
return err
}
var manifest Manifest
folders, err := ioutil.ReadDir(g.Flags.OutputPath + "/data")
if err != nil {
return err
}
for _, folder := range folders {
if folder.Name() == strings.ToLower(dft.CARCOLS.String()) {
manifest.HasCarcols = true
} else if folder.Name() == strings.ToLower(dft.CARVARIATIONS.String()) {
manifest.HasCarvariations = true
} else if folder.Name() == strings.ToLower(dft.CONTENTUNLOCKS.String()) {
manifest.HasContentUnlocks = true
} else if folder.Name() == strings.ToLower(dft.HANDLING.String()) {
manifest.HasHandling = true
} else if folder.Name() == strings.ToLower(dft.VEHICLELAYOUTS.String()) {
manifest.HasVehicleLayouts = true
} else if folder.Name() == strings.ToLower(dft.VEHICLEMODELSETS.String()) {
manifest.HasVehicleModelsets = true
} else if folder.Name() == strings.ToLower(dft.VEHICLES.String()) {
manifest.HasVehicles = true
} else if folder.Name() == strings.ToLower(dft.WEAPONSFILE.String()) {
manifest.HasWeaponsFile = true
} else {
log.Warn("Invalid folder name", folder.Name())
}
}
fxManifest, err := os.Create(g.Flags.OutputPath + "/fxmanifest.lua")
if err != nil {
return err
}
defer fxManifest.Close()
err = tmpl.Execute(fxManifest, manifest)
if err != nil {
return err
}
return nil
}
|
package util
import (
"go_web/pkg/logger"
"strconv"
)
func Int64ToString(num int64) string {
return strconv.FormatInt(num, 10)
}
func StringToInt64(str string) int64 {
i, err := strconv.ParseInt(str, 10, 64)
if err != nil {
logger.LogError(err)
}
return i
}
|
package main
import (
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"time"
"go.mongodb.org/mongo-driver/bson"
)
func GetPeopleEndpoint(response http.ResponseWriter, request *http.Request) {
fmt.Println("GetPeopleEndpoint - start")
response.Header().Set("content-type", "application/json")
var people []Person
fmt.Println("GetPeopleEndpoint - before client.Database().Collection()")
collection := client.Database("thepolyglotdeveloper").Collection("people")
fmt.Println("GetPeopleEndpoint - after client.Database().Collection()")
fmt.Println("GetPeopleEndpoint - before context.WithTimeout()")
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
fmt.Println("GetPeopleEndpoint - after context.WithTimeout()")
fmt.Println("GetPeopleEndpoint - before collection.Find()")
cursor, errFind := collection.Find(ctx, bson.M{})
fmt.Println("GetPeopleEndpoint - after collection.Find()")
if errFind != nil {
response.WriteHeader(http.StatusInternalServerError)
_, errWrite := response.Write([]byte(`{ "message": "` + errFind.Error() + `" }`))
if errWrite != nil {
log.Fatal(errWrite)
}
return
}
defer cursor.Close(ctx)
fmt.Println("GetPeopleEndpoint - before cursor.Next()")
for cursor.Next(ctx) {
fmt.Println("GetPeopleEndpoint - cursor.Next()")
var person Person
errDecode := cursor.Decode(&person)
if errDecode != nil {
log.Fatal(errDecode)
}
people = append(people, person)
}
fmt.Println("GetPeopleEndpoint - after cursor.Next()")
if errCursor := cursor.Err(); errCursor != nil {
response.WriteHeader(http.StatusInternalServerError)
_, errWrite := response.Write([]byte(`{ "message": "` + errCursor.Error() + `" }`))
if errWrite != nil {
log.Fatal(errWrite)
}
return
}
errEncode := json.NewEncoder(response).Encode(people)
if errEncode != nil {
log.Fatal(errEncode)
}
fmt.Println("GetPeopleEndpoint - finish")
}
|
package dalmodel
import (
"context"
"github.com/gremlinsapps/avocado_server/session"
"github.com/jinzhu/gorm"
)
type Hashtag struct {
gorm.Model
AuditModel
Name string `gorm:"not null;unique_index"`
}
type AuditModel struct {
CreatedBy User `gorm:"foreignkey:CreatedByID;association_foreignkey:ID"`
CreatedByID int
UpdatedBy User `gorm:"foreignkey:UpdatedByID;association_foreignkey:ID"`
UpdatedByID int
DeletedBy User `gorm:"foreignkey:DeletedByID;association_foreignkey:ID"`
DeletedByID int
}
func CreateAuditModel(ctx context.Context) (*AuditModel, error) {
uid, err := session.GetUserId(ctx)
if err != nil {
return nil, err
}
return &AuditModel{
CreatedByID: uid,
UpdatedByID: uid,
}, nil
}
func UpdateAuditModel(ctx context.Context, data map[string]interface{}) error {
if data == nil {
return nil
}
uid, err := session.GetUserId(ctx)
if err != nil {
return err
}
data["UpdatedByID"] = uid
return nil
}
|
package main
import (
"log"
"net/http"
"github.com/Khamliuk/testsCI/controller"
"github.com/Khamliuk/testsCI/handler"
"github.com/Khamliuk/testsCI/mongo"
)
func main() {
db, err := mongo.New()
if err != nil {
log.Fatalf("could not create new db connection: %v", err)
}
service := controller.New(db)
api := handler.New(service)
log.Fatal(http.ListenAndServe(":8080", api))
}
|
package server
import (
"context"
"log"
"time"
"github.com/asishshaji/startup/apps/auth/controller"
"github.com/asishshaji/startup/apps/auth/delivery"
"github.com/asishshaji/startup/apps/auth/repository"
"github.com/asishshaji/startup/apps/auth/usecase"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
// App creates the app
type App struct {
httpRouter delivery.Router
controller controller.AuthController
port string
}
// NewApp is the constructor
func NewApp(router *delivery.Router, port string) *App {
db := initDB()
userRepo := repository.NewUserRepository(db, "asd")
userUseCase := usecase.NewAuthUseCase(*userRepo,
"ASDS",
[]byte("asd"),
time.Hour*45,
)
userController := controller.NewAuthController(userUseCase)
return &App{
httpRouter: *router,
port: port,
controller: userController,
}
}
// Run starts the server
func (app *App) Run() {
log.Println("running server")
app.httpRouter.POST("/signup", app.controller.Signup)
app.httpRouter.POST("/signin", app.controller.Signin)
app.httpRouter.SERVE(app.port)
}
func initDB() *mongo.Database {
client, err := mongo.NewClient(options.Client().ApplyURI("mongodb://localhost:27017"))
if err != nil {
log.Fatalf("Error occured while establishing connection to mongoDB")
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
err = client.Connect(ctx)
if err != nil {
log.Fatal(err)
}
err = client.Ping(context.Background(), nil)
if err != nil {
log.Fatal(err)
}
log.Println("Connected to MongoDB ")
return client.Database("DB")
}
|
package zfs
// #include <stdlib.h>
// #include <libzfs.h>
// #include "common.h"
// #include "zpool.h"
// #include "zfs.h"
import "C"
import (
"encoding/json"
"fmt"
"strconv"
"time"
"errors"
)
var stringToDatasetPropDic = make(map[string]DatasetProp)
var stringToPoolPropDic = make(map[string]PoolProp)
var zfsMaxDatasetProp DatasetProp
var zfsMaxPoolProp PoolProp
func init() {
if C.ZFS_NUM_PROPS > DatasetNumProps {
zfsMaxDatasetProp = DatasetNumProps
} else {
zfsMaxDatasetProp = DatasetProp(C.ZFS_NUM_PROPS)
}
if C.ZPOOL_NUM_PROPS > PoolNumProps {
zfsMaxPoolProp = PoolNumProps
} else {
zfsMaxPoolProp = PoolProp(C.ZPOOL_NUM_PROPS)
}
for i := DatasetPropType; i < zfsMaxDatasetProp; i++ {
stringToDatasetPropDic[i.String()] = i
}
for i := PoolPropName; i < zfsMaxPoolProp; i++ {
stringToPoolPropDic[i.String()] = i
}
}
func (p DatasetProp) String() string {
return C.GoString(C.zfs_prop_to_name((C.zfs_prop_t)(p)))
}
func (p DatasetProp) MarshalJSON() ([]byte, error) {
s := p.String()
return json.Marshal(s)
}
func (p *DatasetProp) UnmarshalJSON(b []byte) error {
var j string
err := json.Unmarshal(b, &j)
if err != nil {
return err
}
prop, ok := stringToDatasetPropDic[j]
if !ok {
return fmt.Errorf("prop \"%s\" not exists", j)
}
*p = prop
return err
}
func (p PoolProp) String() string {
return C.GoString(C.zpool_prop_to_name((C.zpool_prop_t)(p)))
}
func (p PoolProp) MarshalJSON() ([]byte, error) {
s := p.String()
return json.Marshal(s)
}
func (p *PoolProp) UnmarshalJSON(b []byte) error {
var j string
err := json.Unmarshal(b, &j)
if err != nil {
return err
}
prop, ok := stringToPoolPropDic[j]
if !ok {
return fmt.Errorf("prop \"%s\" not exists", j)
}
*p = prop
return err
}
//{"guid": {"value":"16859519823695578253", "source":"-"}}
func (p DatasetProperties) MarshalJSON() ([]byte, error) {
props := make(map[string]PropertyValue)
maxUint64 := strconv.FormatUint(C.UINT64_MAX, 10)
for prop, value := range p {
name := prop.String()
if maxUint64 != value.Value && value.Value != "none" {
if prop == DatasetPropCreation {
time_int, _ := strconv.ParseInt(value.Value, 10, 64)
value.Value = time.Unix(time_int, 0).Format("2006-01-02T15:04:05-0700")
}
props[name] = value
}
}
return json.Marshal(props)
}
func (p *DatasetProperties) UnmarshalJSON(b []byte) error {
if p == nil {
return errors.New("map is nil. use make")
}
props := make(map[string]PropertyValue)
err := json.Unmarshal(b, &props)
if err != nil {
return err
}
for key, value := range props {
prop, ok := stringToDatasetPropDic[key]
if !ok {
return fmt.Errorf("property \"%s\" not exist", key)
}
(*p)[prop] = value
}
return nil
}
func (p DatasetProperties) String() string {
data, err := json.Marshal(p)
if err != nil {
return ""
}
return string(data)
}
func (p PoolProperties) MarshalJSON() ([]byte, error) {
props := make(map[string]PropertyValue)
maxUint64 := strconv.FormatUint(C.UINT64_MAX, 10)
for prop, value := range p {
name := prop.String()
if maxUint64 != value.Value && value.Value != "none" {
props[name] = value
}
}
return json.Marshal(props)
}
func (p *PoolProperties) UnmarshalJSON(b []byte) error {
if p == nil {
return errors.New("map is nil. use make")
}
props := make(map[string]PropertyValue)
err := json.Unmarshal(b, &props)
if err != nil {
return err
}
for key, value := range props {
prop, ok := stringToPoolPropDic[key]
if !ok {
return fmt.Errorf("property \"%s\" not exist", key)
}
(*p)[prop] = value
}
return nil
}
func (p PoolProperties) String() string {
data, err := json.Marshal(p)
if err != nil {
return ""
}
return string(data)
}
|
package match
import (
mesh_proto "github.com/kumahq/kuma/api/mesh/v1alpha1"
"github.com/kumahq/kuma/pkg/core/policy"
"github.com/kumahq/kuma/pkg/core/resources/model"
)
// ToConnectionPolicies casts a ResourceList to a slice of ConnectionPolicy.
func ToConnectionPolicies(policies model.ResourceList) []policy.ConnectionPolicy {
items := policies.GetItems()
c := make([]policy.ConnectionPolicy, 0, len(items))
for _, i := range items {
c = append(c, i.(policy.ConnectionPolicy))
}
return c
}
// RankedPolicy is a policy that matches some set of tags, together
// with the rank of the match.
type RankedPolicy struct {
Rank mesh_proto.TagSelectorRank
Policy policy.ConnectionPolicy
}
// ConnectionPoliciesBySource finds all the connection policies that have a
// matching `Sources` selector. The resulting matches are not ordered.
func ConnectionPoliciesBySource(
sourceTags map[string]string,
policies []policy.ConnectionPolicy,
) []RankedPolicy {
var matches []RankedPolicy
for _, p := range policies {
if rank, ok := policy.MatchSelector(sourceTags, p.Sources()); ok {
matches = append(matches, RankedPolicy{rank, p})
}
}
return matches
}
|
package collectors
import (
"encoding/json"
"os"
"sync"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
mct "github.com/ClusterCockpit/cc-metric-collector/pkg/multiChanTicker"
)
// Map of all available metric collectors
var AvailableCollectors = map[string]MetricCollector{
"likwid": new(LikwidCollector),
"loadavg": new(LoadavgCollector),
"memstat": new(MemstatCollector),
"netstat": new(NetstatCollector),
"ibstat": new(InfinibandCollector),
"lustrestat": new(LustreCollector),
"cpustat": new(CpustatCollector),
"topprocs": new(TopProcsCollector),
"nvidia": new(NvidiaCollector),
"customcmd": new(CustomCmdCollector),
"iostat": new(IOstatCollector),
"diskstat": new(DiskstatCollector),
"tempstat": new(TempCollector),
"ipmistat": new(IpmiCollector),
"gpfs": new(GpfsCollector),
"cpufreq": new(CPUFreqCollector),
"cpufreq_cpuinfo": new(CPUFreqCpuInfoCollector),
"nfs3stat": new(Nfs3Collector),
"nfs4stat": new(Nfs4Collector),
"numastats": new(NUMAStatsCollector),
"beegfs_meta": new(BeegfsMetaCollector),
"beegfs_storage": new(BeegfsStorageCollector),
"rapl": new(RAPLCollector),
"rocm_smi": new(RocmSmiCollector),
"self": new(SelfCollector),
"schedstat": new(SchedstatCollector),
}
// Metric collector manager data structure
type collectorManager struct {
collectors []MetricCollector // List of metric collectors to read in parallel
serial []MetricCollector // List of metric collectors to read serially
output chan lp.CCMetric // Output channels
done chan bool // channel to finish / stop metric collector manager
ticker mct.MultiChanTicker // periodically ticking once each interval
duration time.Duration // duration (for metrics that measure over a given duration)
wg *sync.WaitGroup // wait group for all goroutines in cc-metric-collector
config map[string]json.RawMessage // json encoded config for collector manager
collector_wg sync.WaitGroup // internally used wait group for the parallel reading of collector
parallel_run bool // Flag whether the collectors are currently read in parallel
}
// Metric collector manager access functions
type CollectorManager interface {
Init(ticker mct.MultiChanTicker, duration time.Duration, wg *sync.WaitGroup, collectConfigFile string) error
AddOutput(output chan lp.CCMetric)
Start()
Close()
}
// Init initializes a new metric collector manager by setting up:
// * output channel
// * done channel
// * wait group synchronization for goroutines (from variable wg)
// * ticker (from variable ticker)
// * configuration (read from config file in variable collectConfigFile)
// Initialization is done for all configured collectors
func (cm *collectorManager) Init(ticker mct.MultiChanTicker, duration time.Duration, wg *sync.WaitGroup, collectConfigFile string) error {
cm.collectors = make([]MetricCollector, 0)
cm.serial = make([]MetricCollector, 0)
cm.output = nil
cm.done = make(chan bool)
cm.wg = wg
cm.ticker = ticker
cm.duration = duration
// Read collector config file
configFile, err := os.Open(collectConfigFile)
if err != nil {
cclog.Error(err.Error())
return err
}
defer configFile.Close()
jsonParser := json.NewDecoder(configFile)
err = jsonParser.Decode(&cm.config)
if err != nil {
cclog.Error(err.Error())
return err
}
// Initialize configured collectors
for collectorName, collectorCfg := range cm.config {
if _, found := AvailableCollectors[collectorName]; !found {
cclog.ComponentError("CollectorManager", "SKIP unknown collector", collectorName)
continue
}
collector := AvailableCollectors[collectorName]
err = collector.Init(collectorCfg)
if err != nil {
cclog.ComponentError("CollectorManager", "Collector", collectorName, "initialization failed:", err.Error())
continue
}
cclog.ComponentDebug("CollectorManager", "ADD COLLECTOR", collector.Name())
if collector.Parallel() {
cm.collectors = append(cm.collectors, collector)
} else {
cm.serial = append(cm.serial, collector)
}
}
return nil
}
// Start starts the metric collector manager
func (cm *collectorManager) Start() {
tick := make(chan time.Time)
cm.ticker.AddChannel(tick)
cm.wg.Add(1)
go func() {
defer cm.wg.Done()
// Collector manager is done
done := func() {
// close all metric collectors
if cm.parallel_run {
cm.collector_wg.Wait()
cm.parallel_run = false
}
for _, c := range cm.collectors {
c.Close()
}
close(cm.done)
cclog.ComponentDebug("CollectorManager", "DONE")
}
// Wait for done signal or timer event
for {
select {
case <-cm.done:
done()
return
case t := <-tick:
cm.parallel_run = true
for _, c := range cm.collectors {
// Wait for done signal or execute the collector
select {
case <-cm.done:
done()
return
default:
// Read metrics from collector c via goroutine
cclog.ComponentDebug("CollectorManager", c.Name(), t)
cm.collector_wg.Add(1)
go func(myc MetricCollector) {
myc.Read(cm.duration, cm.output)
cm.collector_wg.Done()
}(c)
}
}
cm.collector_wg.Wait()
cm.parallel_run = false
for _, c := range cm.serial {
// Wait for done signal or execute the collector
select {
case <-cm.done:
done()
return
default:
// Read metrics from collector c
cclog.ComponentDebug("CollectorManager", c.Name(), t)
c.Read(cm.duration, cm.output)
}
}
}
}
}()
// Collector manager is started
cclog.ComponentDebug("CollectorManager", "STARTED")
}
// AddOutput adds the output channel to the metric collector manager
func (cm *collectorManager) AddOutput(output chan lp.CCMetric) {
cm.output = output
}
// Close finishes / stops the metric collector manager
func (cm *collectorManager) Close() {
cclog.ComponentDebug("CollectorManager", "CLOSE")
cm.done <- true
// wait for close of channel cm.done
<-cm.done
}
// New creates a new initialized metric collector manager
func New(ticker mct.MultiChanTicker, duration time.Duration, wg *sync.WaitGroup, collectConfigFile string) (CollectorManager, error) {
cm := new(collectorManager)
err := cm.Init(ticker, duration, wg, collectConfigFile)
if err != nil {
return nil, err
}
return cm, err
}
|
package abstract_factory_test
import (
"design_pattern/creational/abstract_factory"
"testing"
)
func TestAdidas(t *testing.T) {
adidasFactory, _ := abstract_factory.GetSportsFactory("adidas")
adidasShoe := adidasFactory.MakeShoe()
logo := adidasShoe.GetLogo()
if logo == "adidas" {
t.Logf("adidas shoe logo = %v, want %v", logo, "adidas")
} else {
t.Errorf("adidas shoe logo = %v, want %v", logo, "adidas")
}
size := adidasShoe.GetSize()
if size == 14 {
t.Logf("adidas shoe size = %v, want %v", size, 14)
} else {
t.Errorf("adidas shoe size = %v, want %v", size, 14)
}
}
func TestNike(t *testing.T) {
nikeFactory, _ := abstract_factory.GetSportsFactory("nike")
nikeShoe := nikeFactory.MakeShoe()
logo := nikeShoe.GetLogo()
if logo == "nike" {
t.Logf("nike shoe logo = %v, want %v", logo, "nike")
} else {
t.Errorf("nike shoe logo = %v, want %v", logo, "nike")
}
size := nikeShoe.GetSize()
if size == 14 {
t.Logf("nike shoe size = %v, want %v", size, 14)
} else {
t.Errorf("nike shoe size = %v, want %v", size, 14)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.