text stringlengths 11 4.05M |
|---|
package main
import (
"fmt"
"math"
)
func main() {
for i := 1.0; i < 16.0; i++ {
fmt.Println(sumDigits(math.Pow(2, i)))
}
}
func sumDigits(n float64) float64 {
sum := 0.0
for n >= 1 {
r := float64(int(n) % 10)
sum += r
n = n / 10
}
return sum
}
|
package wiki
import (
"testing"
"sort"
"io/ioutil"
"os"
)
func setupPageStore() *diskStore {
storePath, err := ioutil.TempDir("", "wikitest")
if err != nil {
panic(err)
}
return &diskStore{path: storePath}
}
func cleanPageStore(store *diskStore) {
ids, _ := store.ListAll()
for _, id := range ids {
store.Delete(id)
}
os.Remove(store.path)
}
func TestStoreCreateRead(t *testing.T) {
store := setupPageStore()
defer cleanPageStore(store)
page := &Page{Title: "Sample Page", Body: "This is a sample page for testing purposes."}
id, err := store.Create(page)
if err != nil {
t.Error(err)
return
}
if page.Id != id {
t.Errorf("diskStore.Create: expected %q, found %q", page.Id, id)
return
}
pageRead, err := store.Read(id)
if err != nil {
t.Error(err)
return
}
if pageRead.Id != page.Id {
t.Errorf("diskStore.Read(%q): expected %q, found %q", id, page.Id, pageRead.Id)
return
}
if pageRead.Title != page.Title {
t.Errorf("diskStore.Read(%q): expected %q, found %q", id, page.Title, pageRead.Title)
return
}
if pageRead.Body != page.Body {
t.Errorf("diskStore.Read(%q): expected %q, found %q", id, page.Body, pageRead.Body)
return
}
}
func TestStoreUpdate(t *testing.T) {
store := setupPageStore()
defer cleanPageStore(store)
page := &Page{Title: "Sample Page", Body: "This is a sample page for testing purposes."}
id, err := store.Create(page)
if err != nil {
t.Error(err)
return
}
page.Title = "Modified Page Title"
page.Body = "This is a modified page body."
err = store.Update(page)
if err != nil {
t.Error(err)
return
}
pageRead, err := store.Read(id)
if err != nil {
t.Error(err)
return
}
if pageRead.Title != page.Title {
t.Errorf("diskStore.Read(%q): expected %q, found %q", id, page.Title, pageRead.Title)
return
}
if pageRead.Body != page.Body {
t.Errorf("diskStore.Read(%q): expected %q, found %q", id, page.Body, pageRead.Body)
return
}
}
func TestStoreDelete(t *testing.T) {
store := setupPageStore()
defer cleanPageStore(store)
page := &Page{Title: "Sample Page", Body: "This is a sample page for testing purposes."}
id, err := store.Create(page)
if err != nil {
t.Error(err)
return
}
_, err = store.Read(id)
if err != nil {
t.Error(err)
return
}
err = store.Delete(id)
if err != nil {
t.Error(err)
return
}
_, err = store.Read(id)
if err == nil {
t.Errorf("diskStore.Delete(%q): page was not deleted", id)
return
}
}
func TestStoreListAll(t *testing.T) {
store := setupPageStore()
defer cleanPageStore(store)
pages := []*Page{
&Page{Title: "Sample Page 1", Body: "This is a sample page for testing purposes."},
&Page{Title: "Sample Page 2", Body: "This is a sample page for testing purposes."},
&Page{Title: "Sample Page 3", Body: "This is a sample page for testing purposes."}}
expected := make([]string, len(pages))
for k, page := range pages {
id, err := store.Create(page)
if err != nil {
t.Error(err)
return
}
expected[k] = string(id)
}
sort.Strings(expected)
found, err := store.ListAll()
if err != nil {
t.Error(err)
return
}
if len(found) != len(expected) {
t.Errorf("diskStore.ListAll: expected %d pages, found %d", len(expected), len(found))
return
}
for i := range found {
if expected[i] != string(found[i]) {
t.Errorf("diskStore.ListAll: expected %q, found %q", expected[i], found[i])
return
}
}
}
func TestStoreFindByTitle(t *testing.T) {
store := setupPageStore()
defer cleanPageStore(store)
pages := []*Page{
&Page{Title: "Sample Page 1", Body: "This is a sample page for testing purposes."},
&Page{Title: "Sample Page 2", Body: "This is a sample page for testing purposes."},
&Page{Title: "Sample Page 3", Body: "This is a sample page for testing purposes."}}
for _, page := range pages {
_, err := store.Create(page)
if err != nil {
t.Error(err)
return
}
}
for _, page := range pages {
pageId, err := store.FindByTitle(page.Title)
if err != nil {
t.Error(err)
return
}
if pageId != page.Id {
t.Errorf("diskStore.FindByTitle: expected %q, found %q", page.Id, pageId)
return
}
}
pageId, err := store.FindByTitle("unexistent page")
if err != nil {
t.Error(err)
return
}
if pageId != "" {
t.Errorf("diskStore.FindByTitle: expected nil, found %q", pageId)
return
}
}
func TestStoreUnexistentPageError(t *testing.T) {
store := setupPageStore()
defer cleanPageStore(store)
id := PageId("unexistent")
_, err := store.Read(id)
if err == nil {
t.Error("diskStore.Read: an error was expected")
return
}
unexistentPageErr, ok := err.(UnexistentPageError)
if !ok {
t.Error("diskStore.Read: UnexistentPageError was expected")
return
}
if unexistentPageErr.Id != id {
t.Error("UnexistentPageError.Id: expected %q, found %q", id, unexistentPageErr.Id)
return
}
}
|
package main
// 扫雷游戏
// https://leetcode.com/problems/minesweeper/#/description
import (
"fmt"
)
var updatedBoard [][]byte
var visit [][]byte
func updateBoard(board [][]byte, click []int) [][]byte {
// 初始化
updatedBoard = make([][]byte, len(board))
copy(updatedBoard, board)
visit = make([][]byte, len(board))
for i := range board {
visit[i] = make([]byte, len(board[0]))
}
// 如果点击mine,不用向下运行
x := click[0]
y := click[1]
if board[x][y] == 'M' {
updatedBoard[x][y] = 'X'
} else {
searchNode(board, x, y)
}
return updatedBoard
}
func searchNode(board [][]byte, x int, y int) {
visit[x][y] = 1
h := len(board)
w := len(board[0])
mineCount := searchMine(board, x, y)
if mineCount > 0 {
// 结束条件
updatedBoard[x][y] = byte(mineCount&0xff) + '0'
} else {
updatedBoard[x][y] = 'B'
// 继续搜索
startX := 0
startY := 0
if x > 0 {
startX = x - 1
}
if y > 0 {
startY = y - 1
}
for i := startX; i < h && i <= x+1; i++ {
for j := startY; j < w && j <= y+1; j++ {
if visit[i][j] == 0 {
searchNode(board, i, j)
}
}
}
}
}
// 搜索当前点是否有mine
func searchMine(board [][]byte, x int, y int) int {
startX := 0
startY := 0
h := len(board)
w := len(board[0])
if x > 0 {
startX = x - 1
}
if y > 0 {
startY = y - 1
}
count := 0
for i := startX; i < h && i <= x+1; i++ {
for j := startY; j < w && j <= y+1; j++ {
if i == x && j == y {
continue
}
if board[i][j] == 'M' {
count += 1
}
}
}
return count
}
func PrintMap(board [][]byte) {
h := len(board)
w := len(board[0])
fmt.Print("\n")
for i := 0; i < h; i++ {
fmt.Print("|")
for j := 0; j < w; j++ {
if j == 0 {
fmt.Print(" " + string(board[i][j]) + " ")
} else {
fmt.Print(string(board[i][j]) + " ")
}
}
fmt.Print("|\n")
}
fmt.Print("\n")
}
func main() {
board := [][]byte{
{'E', 'E', 'E', 'E', 'E'},
{'E', 'E', 'M', 'E', 'E'},
{'E', 'E', 'E', 'E', 'E'},
{'E', 'E', 'E', 'E', 'E'},
{'E', 'E', 'E', 'E', 'E'}}
PrintMap(board)
updateBoard(board, []int{0, 0})
//searchMine(board, 0, 0)
PrintMap(updatedBoard)
fmt.Println(visit)
}
|
package api
import (
"errors"
"fmt"
"os"
"sort"
"strings"
"time"
"github.com/sirupsen/logrus"
)
const (
ticketurl = "tickets/%d"
ticketsurl = "tickets"
ticketsquery = "?per_page=100&page=%d&updated_since=%s"
converstaions = "tickets/%d/conversations?per_page=100"
oldticketsurl = "search/tickets?query=\"created_at:<'%s' AND status:2\""
)
// Ticket statuses
const (
OPEN = iota + 2
PENDING
RESOLVED
CLOSED
)
// EncodePath encodes path and leaves < and > unchanged
func EncodePath(path string) string {
translateMap := map[string]string{
"'": "%27",
" ": "%20",
}
for k, v := range translateMap {
path = strings.ReplaceAll(path, k, v)
}
return path
}
// FormatDate converts date object into a RFC3339 compliant string
func FormatDate(t time.Time) string {
return time.Time.Format(t, time.RFC3339)
}
// LastMonday returns date object for last week's monday
func LastMonday() time.Time {
location, _ := time.LoadLocation(os.Getenv("API_LOC"))
now := time.Now()
var sundayOffset int
if now.Weekday() == 0 {
sundayOffset = 7
}
lastDay := now.AddDate(0, 0, -int(now.Weekday())+sundayOffset-6)
lastDateMidnight := time.Date(lastDay.Year(), lastDay.Month(), lastDay.Day(), 0, 0, 0, 0, location)
return lastDateMidnight
}
// filterTicketsBetweenDates receives slice of tickets and returns back those
// tickets whose create_at field is within [d1, d2]
func filterTicketsBetweenDates(tickets []Ticket, d1, d2 time.Time) []Ticket {
var result []Ticket
for _, e := range tickets {
// AddDate to include the last day as well
fmt.Println("Checking between:", d1, d2.AddDate(0, 0, 1))
if e.CreatedAt.After(d1) && e.CreatedAt.Before(d2.AddDate(0, 0, 1)) {
result = append(result, e)
}
}
return result
}
// SortTicketsByCount returns slice of tickets in a form of Counter,
// which is sorted by the number of calls and then name
func SortTicketsByCount(tickets []Ticket) []Counter {
var result []Counter
counters := map[string]Counter{}
for _, e := range tickets {
currentCounter := counters[e.Subject].Count + 1
name := strings.Replace(e.Subject, "Access Point(s) Down Domio - ", "", -1)
counters[e.Subject] = Counter{
ID: e.ID,
Name: name,
Count: currentCounter,
Percent: fmt.Sprintf("%.02f", float64(currentCounter)*100.0/float64(len(tickets))),
}
}
for _, v := range counters {
result = append(result, v)
}
// Sort by age, keeping original order or equal elements.
sort.SliceStable(result, func(i, j int) bool {
if result[i].Count == result[j].Count {
return result[i].Name > result[j].Name
}
return result[i].Count > result[j].Count
})
return result
}
// IsVR returns whether requested property is one of accepted VRs
func IsVR(reqVR string) bool {
for _, e := range []string{"domio", "minthouse", "frontdesk"} {
if e == reqVR {
return true
}
}
return false
}
// VR ...
type VR struct {
Name string
ID int64
Logo string
}
// GetRequestedVR returns the requested VR object
func GetRequestedVR(name string, logger *logrus.Logger) (*VR, error) {
vrs := []VR{
{"domio", 14018909403, "domio.png"},
{"minthouse", 14032290882, "minthouse.png"},
{"frontdesk", 14034833809, "frontdesk.png"},
}
for _, e := range vrs {
if e.Name == name {
logger.Infof("Requested VR is %+v\n", e)
return &e, nil
}
}
return nil, errors.New("check the name, VR wasn't found")
}
|
package odoo
import (
"fmt"
)
// IrQwebFieldDuration represents ir.qweb.field.duration model.
type IrQwebFieldDuration struct {
LastUpdate *Time `xmlrpc:"__last_update,omptempty"`
DisplayName *String `xmlrpc:"display_name,omptempty"`
Id *Int `xmlrpc:"id,omptempty"`
}
// IrQwebFieldDurations represents array of ir.qweb.field.duration model.
type IrQwebFieldDurations []IrQwebFieldDuration
// IrQwebFieldDurationModel is the odoo model name.
const IrQwebFieldDurationModel = "ir.qweb.field.duration"
// Many2One convert IrQwebFieldDuration to *Many2One.
func (iqfd *IrQwebFieldDuration) Many2One() *Many2One {
return NewMany2One(iqfd.Id.Get(), "")
}
// CreateIrQwebFieldDuration creates a new ir.qweb.field.duration model and returns its id.
func (c *Client) CreateIrQwebFieldDuration(iqfd *IrQwebFieldDuration) (int64, error) {
ids, err := c.CreateIrQwebFieldDurations([]*IrQwebFieldDuration{iqfd})
if err != nil {
return -1, err
}
if len(ids) == 0 {
return -1, nil
}
return ids[0], nil
}
// CreateIrQwebFieldDuration creates a new ir.qweb.field.duration model and returns its id.
func (c *Client) CreateIrQwebFieldDurations(iqfds []*IrQwebFieldDuration) ([]int64, error) {
var vv []interface{}
for _, v := range iqfds {
vv = append(vv, v)
}
return c.Create(IrQwebFieldDurationModel, vv)
}
// UpdateIrQwebFieldDuration updates an existing ir.qweb.field.duration record.
func (c *Client) UpdateIrQwebFieldDuration(iqfd *IrQwebFieldDuration) error {
return c.UpdateIrQwebFieldDurations([]int64{iqfd.Id.Get()}, iqfd)
}
// UpdateIrQwebFieldDurations updates existing ir.qweb.field.duration records.
// All records (represented by ids) will be updated by iqfd values.
func (c *Client) UpdateIrQwebFieldDurations(ids []int64, iqfd *IrQwebFieldDuration) error {
return c.Update(IrQwebFieldDurationModel, ids, iqfd)
}
// DeleteIrQwebFieldDuration deletes an existing ir.qweb.field.duration record.
func (c *Client) DeleteIrQwebFieldDuration(id int64) error {
return c.DeleteIrQwebFieldDurations([]int64{id})
}
// DeleteIrQwebFieldDurations deletes existing ir.qweb.field.duration records.
func (c *Client) DeleteIrQwebFieldDurations(ids []int64) error {
return c.Delete(IrQwebFieldDurationModel, ids)
}
// GetIrQwebFieldDuration gets ir.qweb.field.duration existing record.
func (c *Client) GetIrQwebFieldDuration(id int64) (*IrQwebFieldDuration, error) {
iqfds, err := c.GetIrQwebFieldDurations([]int64{id})
if err != nil {
return nil, err
}
if iqfds != nil && len(*iqfds) > 0 {
return &((*iqfds)[0]), nil
}
return nil, fmt.Errorf("id %v of ir.qweb.field.duration not found", id)
}
// GetIrQwebFieldDurations gets ir.qweb.field.duration existing records.
func (c *Client) GetIrQwebFieldDurations(ids []int64) (*IrQwebFieldDurations, error) {
iqfds := &IrQwebFieldDurations{}
if err := c.Read(IrQwebFieldDurationModel, ids, nil, iqfds); err != nil {
return nil, err
}
return iqfds, nil
}
// FindIrQwebFieldDuration finds ir.qweb.field.duration record by querying it with criteria.
func (c *Client) FindIrQwebFieldDuration(criteria *Criteria) (*IrQwebFieldDuration, error) {
iqfds := &IrQwebFieldDurations{}
if err := c.SearchRead(IrQwebFieldDurationModel, criteria, NewOptions().Limit(1), iqfds); err != nil {
return nil, err
}
if iqfds != nil && len(*iqfds) > 0 {
return &((*iqfds)[0]), nil
}
return nil, fmt.Errorf("ir.qweb.field.duration was not found with criteria %v", criteria)
}
// FindIrQwebFieldDurations finds ir.qweb.field.duration records by querying it
// and filtering it with criteria and options.
func (c *Client) FindIrQwebFieldDurations(criteria *Criteria, options *Options) (*IrQwebFieldDurations, error) {
iqfds := &IrQwebFieldDurations{}
if err := c.SearchRead(IrQwebFieldDurationModel, criteria, options, iqfds); err != nil {
return nil, err
}
return iqfds, nil
}
// FindIrQwebFieldDurationIds finds records ids by querying it
// and filtering it with criteria and options.
func (c *Client) FindIrQwebFieldDurationIds(criteria *Criteria, options *Options) ([]int64, error) {
ids, err := c.Search(IrQwebFieldDurationModel, criteria, options)
if err != nil {
return []int64{}, err
}
return ids, nil
}
// FindIrQwebFieldDurationId finds record id by querying it with criteria.
func (c *Client) FindIrQwebFieldDurationId(criteria *Criteria, options *Options) (int64, error) {
ids, err := c.Search(IrQwebFieldDurationModel, criteria, options)
if err != nil {
return -1, err
}
if len(ids) > 0 {
return ids[0], nil
}
return -1, fmt.Errorf("ir.qweb.field.duration was not found with criteria %v and options %v", criteria, options)
}
|
package main
import (
"flag"
"fmt"
"github.com/hyperhq/hyper/client"
"os"
)
func main() {
var (
proto = "unix"
addr = "/var/run/hyper.sock"
)
cli := client.NewHyperClient(proto, addr, nil)
// set the flag to output
flHelp := flag.Bool("help", false, "Help Message")
flVersion := flag.Bool("version", false, "Version Message")
flag.Usage = func() { cli.Cmd("help") }
flag.Parse()
if flag.NArg() == 0 {
cli.Cmd("help")
return
}
if *flHelp == true {
cli.Cmd("help")
}
if *flVersion == true {
cli.Cmd("version")
}
if err := cli.Cmd(flag.Args()...); err != nil {
fmt.Printf("%s ERROR: %s\n", os.Args[0], err.Error())
os.Exit(-1)
}
}
|
package model
import (
"fmt"
"strings"
"github.com/docker/libcompose/utils"
"github.com/jinzhu/gorm"
"github.com/rancher/go-rancher/v2"
)
type Template struct {
EnvironmentId string `json:"environmentId"`
CatalogId uint `sql:"type:integer REFERENCES catalog(id) ON DELETE CASCADE"`
Name string `json:"name"`
IsSystem string `json:"isSystem"`
Description string `json:"description"`
DefaultVersion string `json:"defaultVersion" yaml:"default_version"`
Path string `json:"path"`
Maintainer string `json:"maintainer"`
License string `json:"license"`
ProjectURL string `json:"projectURL" yaml:"project_url"`
UpgradeFrom string `json:"upgradeFrom"`
FolderName string `json:"folderName"`
Catalog string `json:"catalogId"`
Base string `json:"templateBase"`
Icon string `json:"icon"`
IconFilename string `json:"iconFilename"`
Readme string `json:"readme"`
Categories []string `sql:"-" json:"categories"`
Labels map[string]string `sql:"-" json:"labels"`
Versions []Version `sql:"-"`
Category string `sql:"-"`
}
type TemplateModel struct {
Base
Template
}
type TemplateResource struct {
client.Resource
Template
VersionLinks map[string]string `json:"versionLinks"`
DefaultTemplateVersionId string `json:"defaultTemplateVersionId"`
}
type TemplateCollection struct {
client.Collection
Data []TemplateResource `json:"data,omitempty"`
}
func LookupTemplate(db *gorm.DB, environmentId, catalog, folderName, base string) *Template {
var templateModel TemplateModel
if err := db.Raw(`
SELECT catalog_template.*
FROM catalog_template, catalog
WHERE (catalog_template.environment_id = ? OR catalog_template.environment_id = ?)
AND catalog_template.catalog_id = catalog.id
AND catalog.name = ?
AND catalog_template.base = ?
AND catalog_template.folder_name = ?
`, environmentId, "global", catalog, base, folderName).Scan(&templateModel).Error; err == gorm.ErrRecordNotFound {
return nil
}
fillInTemplate(db, &templateModel)
return &templateModel.Template
}
func fillInTemplate(db *gorm.DB, templateModel *TemplateModel) {
catalog := GetCatalog(db, templateModel.CatalogId)
if catalog != nil {
templateModel.Catalog = catalog.Name
}
templateModel.Categories = lookupTemplateCategories(db, templateModel.ID)
templateModel.Labels = lookupTemplateLabels(db, templateModel.ID)
templateModel.Versions = lookupVersions(db, templateModel.ID)
}
func templateCategoryMap(db *gorm.DB, templateIDList []int) map[int][]string {
categoriesQuery := `
SELECT template_id, category_id, name
FROM catalog_template_category tc
JOIN catalog_category c ON (tc.category_id = c.id)
WHERE tc.template_id IN ( ? )`
catagoryAndTemplateList := []CategoryAndTemplate{}
db.Raw(categoriesQuery, templateIDList).Find(&catagoryAndTemplateList)
// make map of template (key) to category name (value)
var catagoryAndTemplateMap map[int][]string
catagoryAndTemplateMap = make(map[int][]string)
for _, catagoryAndTemplate := range catagoryAndTemplateList {
catagoryAndTemplateMap[catagoryAndTemplate.TemplateID] = append(catagoryAndTemplateMap[catagoryAndTemplate.TemplateID], catagoryAndTemplate.Name)
}
return catagoryAndTemplateMap
}
func templateLabelMap(db *gorm.DB, templateIDList []int) map[int]map[string]string {
labelsQuery := "SELECT template_id, `key`, value FROM catalog_label cl WHERE cl.template_id IN ( ? )"
var labelAndTemplateList []TemplateLabelModel
db.Raw(labelsQuery, templateIDList).Find(&labelAndTemplateList)
var labelAndTemplateMap map[int]map[string]string
labelAndTemplateMap = make(map[int]map[string]string)
for _, labelAndTemplate := range labelAndTemplateList {
if _, ok := labelAndTemplateMap[int(labelAndTemplate.TemplateId)]; !ok {
labels := map[string]string{labelAndTemplate.Key: labelAndTemplate.Value}
labelAndTemplateMap[int(labelAndTemplate.TemplateId)] = labels
} else {
labelAndTemplateMap[int(labelAndTemplate.TemplateId)][labelAndTemplate.Key] = labelAndTemplate.Value
}
}
return labelAndTemplateMap
}
func templateVersionMap(db *gorm.DB, templateIDList []int) map[int][]Version {
var versionList []VersionModel
// all versions with list of template IDs
versionsQuery := `
SELECT *
FROM catalog_version
WHERE catalog_version.template_id IN ( ? )`
db.Raw(versionsQuery, templateIDList).Find(&versionList)
// look up version based on version id
versionMap := map[uint]VersionModel{}
var versionIDs []int
for _, version := range versionList {
versionIDs = append(versionIDs, int(version.ID))
versionMap[version.ID] = version
}
var versionLabelList []VersionLabelModel
versionLabelsQuery := `
SELECT *
FROM catalog_version_label
WHERE catalog_version_label.version_id IN (?)`
db.Raw(versionLabelsQuery, versionIDs).Find(&versionLabelList)
for _, label := range versionLabelList {
if versionMap[label.VersionId].Labels == nil {
version := versionMap[label.VersionId]
version.Labels = make(map[string]string)
versionMap[label.VersionId] = version
}
versionMap[label.VersionId].Labels[label.Key] = label.Value
}
var versionFiles []FileModel
versionFilesQuery := `
SELECT *
FROM catalog_file
WHERE catalog_file.version_id IN ( ? )`
db.Raw(versionFilesQuery, versionIDs).Find(&versionFiles)
for _, file := range versionFiles {
version := versionMap[file.VersionId]
version.Files = append(versionMap[file.VersionId].Files, file.File)
versionMap[file.VersionId] = version
}
templateVersionMap := map[int][]Version{}
for _, version := range versionMap {
templateVersionMap[int(version.TemplateId)] = append(templateVersionMap[int(version.TemplateId)], version.Version)
}
return templateVersionMap
}
func catalogMap(db *gorm.DB, templateIDList []int) map[uint]string {
catalogQuery := `
SELECT catalog.*
FROM catalog
JOIN catalog_template ON (catalog.id = catalog_template.catalog_id)
WHERE catalog_template.id IN ( ? )`
var catalogs []CatalogModel
db.Raw(catalogQuery, templateIDList).Find(&catalogs)
// make map of catalog id to catalog name
catalogMap := map[uint]string{}
for _, catalog := range catalogs {
catalogMap[catalog.ID] = catalog.Name
}
return catalogMap
}
func LookupTemplates(db *gorm.DB, environmentId, catalog, templateBaseEq string, categories, categoriesNe []string) []Template {
var templateModels []TemplateModel
params := []interface{}{environmentId, "global"}
if catalog != "" {
params = append(params, catalog)
}
if templateBaseEq != "" {
params = append(params, templateBaseEq)
}
query := `
SELECT catalog_template.*
FROM catalog_template, catalog
WHERE (catalog_template.environment_id = ? OR catalog_template.environment_id = ?)
AND catalog_template.catalog_id = catalog.id`
if catalog != "" {
query += `
AND catalog.name = ?`
}
if templateBaseEq != "" {
query += `
AND catalog_template.base = ?`
}
db.Raw(query, params...).Find(&templateModels)
var templateIDList []int
for _, template := range templateModels {
templateIDList = append(templateIDList, int(template.ID))
}
templateCategoryMap := templateCategoryMap(db, templateIDList)
templateLabelMap := templateLabelMap(db, templateIDList)
templateVersionMap := templateVersionMap(db, templateIDList)
catalogMap := catalogMap(db, templateIDList)
var templates []Template
for _, templateModel := range templateModels {
templateModel.Categories = templateCategoryMap[int(templateModel.ID)]
templateModel.Labels = templateLabelMap[int(templateModel.ID)]
templateModel.Versions = templateVersionMap[int(templateModel.ID)]
templateModel.Catalog = catalogMap[templateModel.CatalogId]
skip := false
for _, category := range categories {
if !utils.Contains(templateModel.Categories, category) {
skip = true
break
}
}
for _, categoryNe := range categoriesNe {
if utils.Contains(templateModel.Categories, categoryNe) {
skip = true
break
}
}
if !skip {
templates = append(templates, templateModel.Template)
}
}
return templates
}
func listQuery(size int) string {
var query string
for i := 0; i < size; i++ {
query += " ? ,"
}
return fmt.Sprintf("(%s)", strings.TrimSuffix(query, ","))
}
|
package main
import (
"crypto/tls"
"fmt"
"github.com/bigbank-as/go_camunda_client/rest"
"github.com/bigbank-as/go_camunda_client/rest/dto"
"net/http"
)
func main() {
httpTransport := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
}
httpClient := http.Client{Transport: httpTransport}
camunda := rest.Construct("https://localhost:6002/engine-rest", "admin", "admin", httpClient)
camunda.HandleErrors(func(err error) {
fmt.Printf("\nError: %#v", err.Error())
})
fmt.Print("StartProcess..")
processStarted, _ := camunda.StartProcess("my-demo-process", dto.ProcessStartRequest{
Variables: []dto.Variable{
{"id", 123, "Integer"},
{"firstName", "John", "String"},
},
})
fmt.Printf("\nProcess: %#v\n", processStarted)
fmt.Print("GetProcess..")
processLater, _ := camunda.GetProcess(processStarted.GetId())
fmt.Printf("\nProcess: %#v\n", processLater)
}
|
package main
import (
"fmt"
"math/rand"
"time"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
// RandStringRunes generates random strings of length n
func RandStringRunes(n int) string {
b := make([]rune, n)
for i := range b {
b[i] = letterRunes[rand.Intn(len(letterRunes))]
}
return string(b)
}
func randomString(messages chan string) {
messages <- RandStringRunes(rand.Intn(10) + 1)
}
func randomInt(numbers chan int) {
numbers <- rand.Intn(100)
}
func main() {
messages := make(chan string)
numbers := make(chan int)
for i := 0; i < 5; i++ {
go randomString(messages)
go randomInt(numbers)
}
select {
case num := <-numbers:
fmt.Println("Number: ", num)
case msg := <-messages:
fmt.Println("Message: ", msg)
default:
fmt.Println("None")
}
}
|
package nsq
type Config struct {
Host string
Port string
Prefix string
}
|
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-2020 Datadog, Inc.
package flare
import (
"bufio"
"bytes"
"fmt"
"io"
"regexp"
"strings"
)
// replacer structure to store regex matching and replacement functions
type replacer struct {
regex *regexp.Regexp
hints []string // If any of these hints do not exist in the line, then we know the regex wont match either
repl []byte
replFunc func(b []byte) []byte
}
var (
commentRegex = regexp.MustCompile(`^\s*#.*$`)
blankRegex = regexp.MustCompile(`^\s*$`)
singleLineReplacers, multiLineReplacers []replacer
)
func init() {
apiKeyReplacer := replacer{
regex: regexp.MustCompile(`\b[a-fA-F0-9]{27}([a-fA-F0-9]{5})\b`),
repl: []byte(`***************************$1`),
}
appKeyReplacer := replacer{
regex: regexp.MustCompile(`\b[a-fA-F0-9]{35}([a-fA-F0-9]{5})\b`),
repl: []byte(`***********************************$1`),
}
uriPasswordReplacer := replacer{
regex: regexp.MustCompile(`([A-Za-z]+\:\/\/|\b)([A-Za-z0-9_]+)\:([^\s-]+)\@`),
repl: []byte(`$1$2:********@`),
}
passwordReplacer := replacer{
regex: matchYAMLKeyPart(`(pass(word)?|pwd)`),
hints: []string{"pass", "pwd"},
repl: []byte(`$1 ********`),
}
tokenReplacer := replacer{
regex: matchYAMLKeyPart(`token`),
hints: []string{"token"},
repl: []byte(`$1 ********`),
}
certReplacer := replacer{
regex: matchCert(),
hints: []string{"BEGIN"},
repl: []byte(`********`),
}
singleLineReplacers = []replacer{apiKeyReplacer, appKeyReplacer, uriPasswordReplacer, passwordReplacer, tokenReplacer}
multiLineReplacers = []replacer{certReplacer}
}
func matchYAMLKeyPart(part string) *regexp.Regexp {
return regexp.MustCompile(fmt.Sprintf(`(\s*(\w|_)*%s(\w|_)*\s*:).+`, part))
}
func matchCert() *regexp.Regexp {
/*
Try to match as accurately as possible RFC 7468's ABNF
Backreferences are not available in go, so we cannot verify
here that the BEGIN label is the same as the END label.
*/
return regexp.MustCompile(
`-----BEGIN (?:.*)-----[A-Za-z0-9=\+\/\s]*-----END (?:.*)-----`,
)
}
// credentialsCleanerBytes scrubs credentials from slice of bytes
func credentialsCleanerBytes(data []byte) ([]byte, error) {
r := bytes.NewReader(data)
return credentialsCleaner(r)
}
func credentialsCleaner(file io.Reader) ([]byte, error) {
var cleanedFile []byte
scanner := bufio.NewScanner(file)
// First, we go through the file line by line, applying any
// single-line replacer that matches the line.
first := true
for scanner.Scan() {
b := scanner.Bytes()
if !commentRegex.Match(b) && !blankRegex.Match(b) && string(b) != "" {
for _, repl := range singleLineReplacers {
containsHint := false
for _, hint := range repl.hints {
if strings.Contains(string(b), hint) {
containsHint = true
break
}
}
if len(repl.hints) == 0 || containsHint {
if repl.replFunc != nil {
b = repl.regex.ReplaceAllFunc(b, repl.replFunc)
} else {
b = repl.regex.ReplaceAll(b, repl.repl)
}
}
}
if !first {
cleanedFile = append(cleanedFile, byte('\n'))
}
cleanedFile = append(cleanedFile, b...)
first = false
}
}
if err := scanner.Err(); err != nil {
return nil, err
}
// Then we apply multiline replacers on the cleaned file
for _, repl := range multiLineReplacers {
containsHint := false
for _, hint := range repl.hints {
if strings.Contains(string(cleanedFile), hint) {
containsHint = true
break
}
}
if len(repl.hints) == 0 || containsHint {
if repl.replFunc != nil {
cleanedFile = repl.regex.ReplaceAllFunc(cleanedFile, repl.replFunc)
} else {
cleanedFile = repl.regex.ReplaceAll(cleanedFile, repl.repl)
}
}
}
return cleanedFile, nil
}
|
package main
import (
"net"
"log"
"flag"
"fmt"
"bufio"
"strings"
"strconv"
)
type conn struct {
rw net.Conn
dataHostPort string
prevCmd string
pasvListener net.Listener
cmdErr error
binary bool
}
func NewConn(cmdConn net.Conn) *conn {
return &conn{rw: cmdConn}
}
func hostPortToFTP(hostport string) (addr string, err error) {
host, portStr, err := net.SplitHostPort(hostport)
if err != nil {
return "", err
}
ipAddr, err := net.ResolveIPAddr("ip4", host)
if err != nil {
return "", err
}
port, err := strconv.ParseInt(portStr, 10, 64)
if err != nil {
return "", err
}
ip := ipAddr.IP.To4()
s := fmt.Sprintf("%d,%d,%d,%d,%d,%d", ip[0], ip[1], ip[2], ip[3], port/256, port%256)
return s, nil
}
func hostPortFromFTP(address string) (string, error) {
var a, b, c, d byte
var p1, p2 int
_, err := fmt.Sscanf(address, "%d%,d,%d,%d,%d,%d", &a, &b, &c, &d, &p1, &p2)
if err != nil {
return "", err
}
return fmt.Sprintf("%d.%d.%d.%d:%d", a, b, c, d, 256*p1+p2), nil
}
func (c *conn) run() {
fmt.Fprintln("220 Ready.")
s := bufio.NewScanner(c.rw)
var cmd string
var args []string
for s.Scan() {
if c.CmdErr() != nil {
log.Print("err:", fmt.Errorf("command connection: %s", c.CmdErr()))
}
fields := strings.Fields(s.Text())
if len(fields) == 0 {
continue
}
cmd = strings.ToUpper(fields[0])
args = nil
if len(fields) > 1 {
args = fields[1:]
}
switch cmd {
case "LIST":
c.list(args)
case "NOOP":
fmt.Fprintln("200 Ready.")
case "PASV":
c.pasv(args)
case "PORT":
c.port(args)
case "QUIT":
fmt.Fprintln("221 Goodbye.")
return
case "RETR":
c.retr(args)
case "STOR":
c.stor(args)
case "STRU":
c.stru(args)
case "SYST":
fmt.Fprintln("215 UNIX Type: L8")
case "TYPE":
c.type_(args)
case "USER":
fmt.Fprintln("230 Login successful.")
default:
fmt.Fprintln("502 Command not implemented")
}
}
}
func main() {
var port int
flag.IntVar(&port, "port", 8000, "listen port")
ln, err := net.Listen("tcp4", fmt.Sprintf(":%d", port))
if err != nil {
log.Fatal("Opening main listener")
}
for {
c, err := ln.Accept()
if err != nil {
log.Print("Accepting new connection:", err)
}
go NewConn(c).run()
}
}
|
package main
import (
"bufio"
"fmt"
"net"
)
func main() {
conn, err := net.Dial("tcp", ":8080")
if err != nil {
panic(err)
}
defer conn.Close()
// writeToServer(conn)
readFromServer(conn)
}
func writeToServer(conn net.Conn) {
fmt.Fprintf(conn, "Hello from client")
}
func readFromServer(conn net.Conn) {
reader := bufio.NewScanner(conn)
for reader.Scan() {
fmt.Println("Server said", reader.Text())
}
}
|
package problem0003
import (
"fmt"
"strings"
)
func lengthOfLongestSubstring(s string) int {
if len(s) == 0 {
return 0
}
maxLen := 0
cache := map[byte]int{} // 存储字符在字符串中的最后位置
//遍历,如果有重复,起点移动至cache中字符位置的后一字符
for start, i := 0, 0; i < len(s); i++ {
//判断是否上一个相同字符位置,是否大于等于start位置,并且i 不等于 start
//注意 map查找中的ok 与 零值
if lastCurr, ok := cache[s[i]]; ok && lastCurr >= start && i != start {
//start位移
start = lastCurr + 1
}
len := i - start + 1
if maxLen < len {
maxLen = len
}
//记录字符最后出现的位置
cache[s[i]] = i
}
return maxLen
}
/*
用strings.IndexByte代替map缓存
窗口可以在两个边界移动一开始窗口大小为0
随着数组下标的前进窗口的右侧依次增大
每次查询窗口里的字符,若窗口中有查询的字符
窗口的左侧移动到该字符加一的位置
每次记录窗口的最大程度
重复操作直到数组遍历完成
返回最大窗口长度即可
*/
func lengthOfLongestSubstring2(s string) int {
if len(s) == 0 {
return 0
}
maxLen := 0
//遍历,如果有重复,起点移动至cache中字符位置的后一字符
for start, i := 0, 0; i < len(s); i++ {
//判断是否上一个相同字符位置,是否大于等于start位置,并且i 不等于 start
//注意 map查找中的ok 与 零值
if lastCurr := strings.IndexByte(s[start:i], s[i]); lastCurr != -1 {
//start位移
start = start + lastCurr + 1
}
len := i - start + 1
fmt.Println(i, string(s[i]), start, string(s[start]))
if maxLen < len {
maxLen = len
}
}
return maxLen
}
func lengthOfLongestSubstring3(s string) int {
var Length int
var s1 string
left := 0
right := 0
s1 = s[left:right]
for ; right < len(s); right++ {
if index := strings.IndexByte(s1, s[right]); index != -1 {
left += index + 1
}
s1 = s[left : right+1]
if len(s1) > Length {
Length = len(s1)
}
}
return Length
}
|
package config
const Jwt_Signing_Key string = "jdsfhdsjkhsfjkhwqieyhncxmfhsu6353%$^&%&G"
|
package main
import (
"github.com/moshloop/fireviz/cmd"
"github.com/moshloop/fireviz/pkg"
"github.com/spf13/cobra"
)
func main() {
pkg.LogError("fireviz " + pkg.VERSION)
var rootCmd = &cobra.Command{
Use: "fireviz",
Run: func(cmd *cobra.Command, args []string) {},
}
rootCmd.AddCommand(&cmd.Export, &cmd.List, &cmd.Ping)
rootCmd.Execute()
}
|
// Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cm
import (
"math"
"math/rand"
"testing"
"github.com/m3db/m3x/pool"
"github.com/stretchr/testify/require"
)
const (
testInsertAndCompressEvery = 100
testFlushEvery = 1000
)
var (
testQuantiles = []float64{0.5, 0.9, 0.99}
)
func testStreamOptions() Options {
return NewOptions().
SetEps(0.01).
SetCapacity(16)
}
func TestEmptyStream(t *testing.T) {
opts := testStreamOptions()
s := NewStream(testQuantiles, opts)
require.Equal(t, 0.0, s.Min())
require.Equal(t, 0.0, s.Max())
for _, q := range testQuantiles {
require.Equal(t, 0.0, s.Quantile(q))
}
}
func TestStreamWithOnePositiveSample(t *testing.T) {
opts := testStreamOptions()
s := NewStream(testQuantiles, opts)
s.Add(100.0)
s.Flush()
require.Equal(t, 100.0, s.Min())
require.Equal(t, 100.0, s.Max())
for _, q := range testQuantiles {
require.Equal(t, 100.0, s.Quantile(q))
}
}
func TestStreamWithOneNegativeSample(t *testing.T) {
opts := testStreamOptions()
s := NewStream(testQuantiles, opts)
s.Add(-100.0)
s.Flush()
require.Equal(t, -100.0, s.Min())
require.Equal(t, -100.0, s.Max())
for _, q := range testQuantiles {
require.Equal(t, -100.0, s.Quantile(q))
}
}
func TestStreamWithThreeSamples(t *testing.T) {
opts := testStreamOptions()
s := NewStream(testQuantiles, opts)
for _, val := range []float64{100.0, 200.0, 300.0} {
s.Add(val)
}
s.Flush()
require.Equal(t, 100.0, s.Min())
require.Equal(t, 300.0, s.Max())
expected := []float64{200.0, 300.0, 300.0}
for i, q := range testQuantiles {
require.Equal(t, expected[i], s.Quantile(q))
}
}
func TestStreamWithIncreasingSamplesNoPeriodicInsertCompressNoPeriodicFlush(t *testing.T) {
opts := testStreamOptions()
testStreamWithIncreasingSamples(t, opts)
}
func TestStreamWithIncreasingSamplesPeriodicInsertCompressNoPeriodicFlush(t *testing.T) {
opts := testStreamOptions().SetInsertAndCompressEvery(testInsertAndCompressEvery)
testStreamWithIncreasingSamples(t, opts)
}
func TestStreamWithIncreasingSamplesNoPeriodicInsertCompressPeriodicFlush(t *testing.T) {
opts := testStreamOptions().SetFlushEvery(testFlushEvery)
testStreamWithIncreasingSamples(t, opts)
}
func TestStreamWithIncreasingSamplesPeriodicInsertCompressPeriodicFlush(t *testing.T) {
opts := testStreamOptions().
SetInsertAndCompressEvery(testInsertAndCompressEvery).
SetFlushEvery(testFlushEvery)
testStreamWithIncreasingSamples(t, opts)
}
func TestStreamWithDecreasingSamplesNoPeriodicInsertCompressNoPeriodicFlush(t *testing.T) {
opts := testStreamOptions()
testStreamWithDecreasingSamples(t, opts)
}
func TestStreamWithDecreasingSamplesPeriodicInsertCompressNoPeriodicFlush(t *testing.T) {
opts := testStreamOptions().SetInsertAndCompressEvery(testInsertAndCompressEvery)
testStreamWithDecreasingSamples(t, opts)
}
func TestStreamWithDecreasingSamplesNoPeriodicInsertCompressPeriodicFlush(t *testing.T) {
opts := testStreamOptions().SetFlushEvery(testFlushEvery)
testStreamWithDecreasingSamples(t, opts)
}
func TestStreamWithDecreasingSamplesPeriodicInsertCompressPeriodicFlush(t *testing.T) {
opts := testStreamOptions().
SetInsertAndCompressEvery(testInsertAndCompressEvery).
SetFlushEvery(testFlushEvery)
testStreamWithDecreasingSamples(t, opts)
}
func TestStreamWithRandomSamplesNoPeriodicInsertCompressNoPeriodicFlush(t *testing.T) {
opts := testStreamOptions()
testStreamWithRandomSamples(t, opts)
}
func TestStreamWithRandomSamplesPeriodicInsertCompressNoPeriodicFlush(t *testing.T) {
opts := testStreamOptions().SetInsertAndCompressEvery(testInsertAndCompressEvery)
testStreamWithRandomSamples(t, opts)
}
func TestStreamWithRandomSamplesNoPeriodicInsertCompressPeriodicFlush(t *testing.T) {
opts := testStreamOptions().SetFlushEvery(testFlushEvery)
testStreamWithRandomSamples(t, opts)
}
func TestStreamWithRandomSamplesPeriodicInsertCompressPeriodicFlush(t *testing.T) {
opts := testStreamOptions().
SetInsertAndCompressEvery(testInsertAndCompressEvery).
SetFlushEvery(testFlushEvery)
testStreamWithRandomSamples(t, opts)
}
func TestStreamWithSkewedDistributionNoPeriodicInsertCompressNoPeriodicFlush(t *testing.T) {
opts := testStreamOptions()
testStreamWithSkewedDistribution(t, opts)
}
func TestStreamWithSkewedDistributionPeriodicInsertCompressNoPeriodicFlush(t *testing.T) {
opts := testStreamOptions().SetInsertAndCompressEvery(testInsertAndCompressEvery)
testStreamWithSkewedDistribution(t, opts)
}
func TestStreamWithSkewedDistributionNoPeriodicInsertCompressPeriodicFlush(t *testing.T) {
opts := testStreamOptions().SetFlushEvery(testFlushEvery)
testStreamWithSkewedDistribution(t, opts)
}
func TestStreamWithSkewedDistributionPeriodicInsertCompressPeriodicFlush(t *testing.T) {
opts := testStreamOptions().
SetInsertAndCompressEvery(testInsertAndCompressEvery).
SetFlushEvery(testFlushEvery)
testStreamWithSkewedDistribution(t, opts)
}
func TestStreamClose(t *testing.T) {
opts := testStreamOptions()
s := NewStream(testQuantiles, opts).(*stream)
require.False(t, s.closed)
// Close the stream.
s.Close()
require.True(t, s.closed)
// Close the stream again, should be a no-op.
s.Close()
require.True(t, s.closed)
}
func TestStreamAddToMinHeap(t *testing.T) {
floatsPool := pool.NewFloatsPool(
[]pool.Bucket{
{Capacity: 1, Count: 1},
{Capacity: 2, Count: 1},
}, nil)
floatsPool.Init()
opts := testStreamOptions().SetFloatsPool(floatsPool)
s := NewStream(testQuantiles, opts).(*stream)
heap := minHeap(floatsPool.Get(1))
require.Equal(t, 1, cap(heap))
inputs := []float64{1.0, 2.0}
// Push one value to the heap, still under capacity.
s.addToMinHeap(&heap, inputs[0])
require.Equal(t, inputs[:1], []float64(heap))
require.Equal(t, 1, cap(heap))
// Push another value to the heap, which causes the capacity to grow.
s.addToMinHeap(&heap, inputs[1])
require.Equal(t, inputs, []float64(heap))
require.Equal(t, 2, cap(heap))
}
func testStreamWithIncreasingSamples(t *testing.T, opts Options) {
numSamples := 100000
s := NewStream(testQuantiles, opts)
for i := 0; i < numSamples; i++ {
s.Add(float64(i))
}
s.Flush()
require.Equal(t, 0.0, s.Min())
require.Equal(t, float64(numSamples-1), s.Max())
margin := float64(numSamples) * opts.Eps()
for _, q := range testQuantiles {
val := s.Quantile(q)
require.True(t, val >= float64(numSamples)*q-margin && val <= float64(numSamples)*q+margin)
}
}
func testStreamWithDecreasingSamples(t *testing.T, opts Options) {
numSamples := 100000
s := NewStream(testQuantiles, opts)
for i := numSamples - 1; i >= 0; i-- {
s.Add(float64(i))
}
s.Flush()
require.Equal(t, 0.0, s.Min())
require.Equal(t, float64(numSamples-1), s.Max())
margin := float64(numSamples) * opts.Eps()
for _, q := range testQuantiles {
val := s.Quantile(q)
require.True(t, val >= float64(numSamples)*q-margin && val <= float64(numSamples)*q+margin)
}
}
func testStreamWithRandomSamples(t *testing.T, opts Options) {
numSamples := 100000
maxInt64 := int64(math.MaxInt64)
s := NewStream(testQuantiles, opts)
min := math.MaxFloat64
max := -1.0
rand.Seed(100)
for i := 0; i < numSamples; i++ {
v := float64(rand.Int63n(maxInt64))
min = math.Min(min, v)
max = math.Max(max, v)
s.Add(v)
}
s.Flush()
require.Equal(t, min, s.Min())
require.Equal(t, max, s.Max())
margin := float64(maxInt64) * opts.Eps()
for _, q := range testQuantiles {
val := s.Quantile(q)
require.True(t, val >= float64(maxInt64)*q-margin && val <= float64(maxInt64)*q+margin)
}
}
func testStreamWithSkewedDistribution(t *testing.T, opts Options) {
s := NewStream(testQuantiles, opts)
for i := 0; i < 10000; i++ {
s.Add(1.0)
}
// Add a huge sample value (10M).
s.Add(10000000.0)
s.Flush()
require.Equal(t, 1.0, s.Min())
require.Equal(t, 10000000.0, s.Max())
for _, q := range testQuantiles {
require.Equal(t, 1.0, s.Quantile(q))
}
}
|
package buqi
import (
"errors"
"io"
"log"
"math/rand"
"net"
"time"
)
// BufSize 缓冲区大小
const BufSize = 1024
// Socket 用于传输的 TCP Socket
type Socket struct {
Cipher *Cipher
ListenAddr *net.TCPAddr
RemoteAddr *net.TCPAddr
}
func init() {
// 更新随机种子
rand.Seed(time.Now().Unix())
}
// Start 启动
func Start() (config *Config) {
config = &Config{}
if !config.ReadConfig() {
pwd := RandPassword().String()
config = &Config{
Local: "127.0.0.1:12345",
Server: ":59386",
Current: "Server",
Password: pwd,
}
config.SaveConfig()
log.Fatal("已为您创建配置文件模板,请检查!")
}
return
}
// DecodeRead 读取并解码
func (s *Socket) DecodeRead(conn *net.TCPConn, bs []byte) (n int, err error) {
n, err = conn.Read(bs)
if err != nil {
return
}
s.Cipher.decrypt(bs[:n])
return
}
// EncodeWrite 编码并输出
func (s *Socket) EncodeWrite(conn *net.TCPConn, bs []byte) (int, error) {
s.Cipher.encrypt(bs)
return conn.Write(bs)
}
// EncodeCopy 读取并加密src的数据再写入到dst
func (s *Socket) EncodeCopy(dst *net.TCPConn, src *net.TCPConn) error {
buf := make([]byte, BufSize)
for {
readCount, errRead := src.Read(buf)
if errRead != nil {
if errRead != io.EOF {
return errRead
}
return nil
}
if readCount > 0 {
writeCount, errWrite := s.EncodeWrite(dst, buf[0:readCount])
if errWrite != nil {
return errWrite
}
if readCount != writeCount {
return io.ErrShortWrite
}
}
}
}
// DecodeCopy 读取并解码src的数据再写入到dst
func (s *Socket) DecodeCopy(dst *net.TCPConn, src *net.TCPConn) error {
buf := make([]byte, BufSize)
for {
readCount, errRead := s.DecodeRead(src, buf)
if errRead != nil {
if errRead != io.EOF {
return errRead
}
return nil
}
if readCount > 0 {
writeCount, errWrite := dst.Write(buf[0:readCount])
if errWrite != nil {
return errWrite
}
if readCount != writeCount {
return io.ErrShortWrite
}
}
}
}
// DialRemote 连接远程服务器
func (s *Socket) DialRemote() (*net.TCPConn, error) {
conn, err := net.DialTCP("tcp", nil, s.RemoteAddr)
if err != nil {
return nil, errors.New("警告:连接远程服务器失败")
}
return conn, nil
}
|
package models
import (
"context"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
)
//
// 公司管理表数据结构
type CompanyData struct {
ComId int64 `json:"com_id" bson:"com_id"`
ComName string `json:"com_name" bson:"com_name"`
ExpirationDate string `json:"expiration_date" bson:"expiration_date"`
//Delivery []DeliveryData `json:"delivery" bson:"delivery"`
Units string `json:"units" bson:"units"`
Payment []string `json:"payment" bson:"payment"` //结算方式
Module string `json:"module" bson:"module"` //平台名称
Developer string `json:"developer" bson:"developer"` //开发名称
Admin string `json:"admin" bson:"admin"`
Telephone string `json:"telephone" bson:"telephone"`
}
// 公司表数据结构
type Company struct {
ComId int64 `json:"com_id" bson:"com_id"`
ComName string `json:"com_name" bson:"com_name"`
ExpireAt int64 `json:"expire_at" bson:"expire_at"` // 到期时间
CreateAt int64 `json:"create_at" bson:"create_at"` // 创建时间
Units interface{} `json:"units" bson:"units"` //计量单位
Payment interface{} `json:"payment" bson:"payment"` //结算方式
Module string `json:"module" bson:"module"` //平台名称
Developer string `json:"developer" bson:"developer"` //开发名称
Position interface{} `bson:"position" json:"position"` //职务
DefaultProfitMargin float64 `json:"default_profit_margin" bson:"default_profit_margin"` //默认利润率
Admin string `json:"admin" bson:"admin"`
Telephone string `json:"phone" bson:"phone"`
Password string `json:"password" bson:"password"`
}
func getCompanyCollection() *mongo.Collection {
return Client.Collection("company")
}
func (c *Company) Add() error {
_, err := getCompanyCollection().InsertOne(context.TODO(), c)
return err
}
func SelectCompanyByComID(comID int64) (*Company, error) {
var c Company
err := getCompanyCollection().FindOne(context.TODO(), bson.M{"com_id": comID}).Decode(&c)
if err != nil{
return nil, err
}
return &c, nil
}
func UpdateCompanyByComID(comID int64, updateData bson.M) error {
_, err := getCompanyCollection().UpdateOne(context.TODO(), bson.M{"com_id": comID}, bson.M{
"$set": updateData,
})
return err
}
func UpdateAdminPwdByTelPhone(telephone, password string) error {
_, err := getCompanyCollection().UpdateMany(context.TODO(), bson.D{{"telephone", telephone}}, bson.M{"$set": bson.M{"password": password}})
return err
}
func UpdateCompanyExpireTime(comID, expireTime int64) error {
_, err := getCompanyCollection().UpdateOne(context.TODO(), bson.D{{"com_id", comID}}, bson.M{"$set": bson.M{"expire_at": expireTime}})
return err
}
|
package tests
import (
"testing"
"github.com/ravendb/ravendb-go-client"
"github.com/stretchr/testify/assert"
)
func nextAndSeedIdentitiesTestNextIdentityFor(t *testing.T, driver *RavenTestDriver) {
var err error
store := driver.getDocumentStoreMust(t)
defer store.Close()
{
session := openSessionMust(t, store)
user := &User{}
user.setLastName("Adi")
err = session.StoreWithID(user, "users|")
assert.NoError(t, err)
err = session.SaveChanges()
assert.NoError(t, err)
session.Close()
}
command := ravendb.NewNextIdentityForCommand("users")
err = store.GetRequestExecutor("").ExecuteCommand(command, nil)
assert.NoError(t, err)
{
session := openSessionMust(t, store)
user := &User{}
user.setLastName("Avivi")
err = session.StoreWithID(user, "users|")
assert.NoError(t, err)
err = session.SaveChanges()
assert.NoError(t, err)
session.Close()
}
{
var entityWithId1, entityWithId2, entityWithId3, entityWithId4 *User
session := openSessionMust(t, store)
err = session.Load(&entityWithId1, "users/1")
assert.NoError(t, err)
err = session.Load(&entityWithId2, "users/2")
assert.NoError(t, err)
err = session.Load(&entityWithId3, "users/3")
assert.NoError(t, err)
err = session.Load(&entityWithId4, "users/4")
assert.NoError(t, err)
assert.NotNil(t, entityWithId1)
assert.NotNil(t, entityWithId3)
assert.Nil(t, entityWithId2)
assert.Nil(t, entityWithId4)
assert.Equal(t, *entityWithId1.LastName, "Adi")
assert.Equal(t, *entityWithId3.LastName, "Avivi")
session.Close()
}
}
func nextAndSeedIdentitiesTestSeedIdentityFor(t *testing.T, driver *RavenTestDriver) {
var err error
store := driver.getDocumentStoreMust(t)
defer store.Close()
{
session := openSessionMust(t, store)
user := &User{}
user.setLastName("Adi")
err = session.StoreWithID(user, "users|")
assert.NoError(t, err)
err = session.SaveChanges()
assert.NoError(t, err)
session.Close()
}
command, err := ravendb.NewSeedIdentityForCommand("users", 1990, false)
assert.NoError(t, err)
err = store.GetRequestExecutor("").ExecuteCommand(command, nil)
assert.NoError(t, err)
result := command.Result
assert.Equal(t, result, 1990)
{
session := openSessionMust(t, store)
user := &User{}
user.setLastName("Avivi")
err = session.StoreWithID(user, "users|")
assert.NoError(t, err)
err = session.SaveChanges()
assert.NoError(t, err)
session.Close()
}
{
session := openSessionMust(t, store)
var entityWithId1, entityWithId2, entityWithId1990, entityWithId1991, entityWithId1992 *User
err = session.Load(&entityWithId1, "users/1")
assert.NoError(t, err)
err = session.Load(&entityWithId2, "users/2")
assert.NoError(t, err)
err = session.Load(&entityWithId1990, "users/1990")
assert.NoError(t, err)
err = session.Load(&entityWithId1991, "users/1991")
assert.NoError(t, err)
err = session.Load(&entityWithId1992, "users/1992")
assert.NoError(t, err)
assert.NotNil(t, entityWithId1)
assert.NotNil(t, entityWithId1991)
assert.Nil(t, entityWithId2)
assert.Nil(t, entityWithId1990)
assert.Nil(t, entityWithId1992)
assert.Equal(t, *entityWithId1.LastName, "Adi")
assert.Equal(t, *entityWithId1991.LastName, "Avivi")
session.Close()
}
command, err = ravendb.NewSeedIdentityForCommand("users", 1975, false)
assert.NoError(t, err)
err = store.GetRequestExecutor("").ExecuteCommand(command, nil)
assert.NoError(t, err)
assert.Equal(t, command.Result, 1991)
{
op := ravendb.NewGetIdentitiesOperation()
err = store.Maintenance().Send(op)
assert.NoError(t, err)
identites := op.Command.Result
n := identites["users|"]
assert.Equal(t, n, 1991)
}
}
func TestNextAndSeedIdentities(t *testing.T) {
driver := createTestDriver(t)
destroy := func() { destroyDriver(t, driver) }
defer recoverTest(t, destroy)
nextAndSeedIdentitiesTestNextIdentityFor(t, driver)
nextAndSeedIdentitiesTestSeedIdentityFor(t, driver)
}
|
package sort_test
import (
"github.com/ashwinrrao/algorithms/sort"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Mergesort", func() {
When("an empty input is given", func() {
var (
numbers = []int{}
output []int
)
BeforeEach(func() {
output = sort.Mergesort(numbers)
})
It("should return an empty result", func() {
Expect(output).To(Equal(numbers))
})
})
When("an input with a single number is given", func() {
var (
numbers = []int{42}
output = []int{42}
)
BeforeEach(func() {
output = sort.Mergesort(numbers)
})
It("should return the same slice as the output", func() {
Expect(output).To(Equal(numbers))
})
})
When("an input with an odd number of elements is given", func() {
var (
numbers = []int{42, 2, 21}
expectedOutput = []int{2, 21, 42}
output []int
)
BeforeEach(func() {
output = sort.Mergesort(numbers)
})
It("should return the sorted slice as the output", func() {
Expect(output).To(Equal(expectedOutput))
})
})
When("an input with an even number of elements is given", func() {
var (
numbers = []int{42, 2, 21, 32, 98, 6, -29, 0}
expectedOutput = []int{-29, 0, 2, 6, 21, 32, 42, 98}
output []int
)
BeforeEach(func() {
output = sort.Mergesort(numbers)
})
It("should return the sorted slice as the output", func() {
Expect(output).To(Equal(expectedOutput))
})
})
})
|
package odoo
import (
"fmt"
)
// MailMassMailingList represents mail.mass_mailing.list model.
type MailMassMailingList struct {
LastUpdate *Time `xmlrpc:"__last_update,omptempty"`
Active *Bool `xmlrpc:"active,omptempty"`
ContactNbr *Int `xmlrpc:"contact_nbr,omptempty"`
CreateDate *Time `xmlrpc:"create_date,omptempty"`
CreateUid *Many2One `xmlrpc:"create_uid,omptempty"`
DisplayName *String `xmlrpc:"display_name,omptempty"`
Id *Int `xmlrpc:"id,omptempty"`
Name *String `xmlrpc:"name,omptempty"`
WriteDate *Time `xmlrpc:"write_date,omptempty"`
WriteUid *Many2One `xmlrpc:"write_uid,omptempty"`
}
// MailMassMailingLists represents array of mail.mass_mailing.list model.
type MailMassMailingLists []MailMassMailingList
// MailMassMailingListModel is the odoo model name.
const MailMassMailingListModel = "mail.mass_mailing.list"
// Many2One convert MailMassMailingList to *Many2One.
func (mml *MailMassMailingList) Many2One() *Many2One {
return NewMany2One(mml.Id.Get(), "")
}
// CreateMailMassMailingList creates a new mail.mass_mailing.list model and returns its id.
func (c *Client) CreateMailMassMailingList(mml *MailMassMailingList) (int64, error) {
ids, err := c.CreateMailMassMailingLists([]*MailMassMailingList{mml})
if err != nil {
return -1, err
}
if len(ids) == 0 {
return -1, nil
}
return ids[0], nil
}
// CreateMailMassMailingList creates a new mail.mass_mailing.list model and returns its id.
func (c *Client) CreateMailMassMailingLists(mmls []*MailMassMailingList) ([]int64, error) {
var vv []interface{}
for _, v := range mmls {
vv = append(vv, v)
}
return c.Create(MailMassMailingListModel, vv)
}
// UpdateMailMassMailingList updates an existing mail.mass_mailing.list record.
func (c *Client) UpdateMailMassMailingList(mml *MailMassMailingList) error {
return c.UpdateMailMassMailingLists([]int64{mml.Id.Get()}, mml)
}
// UpdateMailMassMailingLists updates existing mail.mass_mailing.list records.
// All records (represented by ids) will be updated by mml values.
func (c *Client) UpdateMailMassMailingLists(ids []int64, mml *MailMassMailingList) error {
return c.Update(MailMassMailingListModel, ids, mml)
}
// DeleteMailMassMailingList deletes an existing mail.mass_mailing.list record.
func (c *Client) DeleteMailMassMailingList(id int64) error {
return c.DeleteMailMassMailingLists([]int64{id})
}
// DeleteMailMassMailingLists deletes existing mail.mass_mailing.list records.
func (c *Client) DeleteMailMassMailingLists(ids []int64) error {
return c.Delete(MailMassMailingListModel, ids)
}
// GetMailMassMailingList gets mail.mass_mailing.list existing record.
func (c *Client) GetMailMassMailingList(id int64) (*MailMassMailingList, error) {
mmls, err := c.GetMailMassMailingLists([]int64{id})
if err != nil {
return nil, err
}
if mmls != nil && len(*mmls) > 0 {
return &((*mmls)[0]), nil
}
return nil, fmt.Errorf("id %v of mail.mass_mailing.list not found", id)
}
// GetMailMassMailingLists gets mail.mass_mailing.list existing records.
func (c *Client) GetMailMassMailingLists(ids []int64) (*MailMassMailingLists, error) {
mmls := &MailMassMailingLists{}
if err := c.Read(MailMassMailingListModel, ids, nil, mmls); err != nil {
return nil, err
}
return mmls, nil
}
// FindMailMassMailingList finds mail.mass_mailing.list record by querying it with criteria.
func (c *Client) FindMailMassMailingList(criteria *Criteria) (*MailMassMailingList, error) {
mmls := &MailMassMailingLists{}
if err := c.SearchRead(MailMassMailingListModel, criteria, NewOptions().Limit(1), mmls); err != nil {
return nil, err
}
if mmls != nil && len(*mmls) > 0 {
return &((*mmls)[0]), nil
}
return nil, fmt.Errorf("mail.mass_mailing.list was not found with criteria %v", criteria)
}
// FindMailMassMailingLists finds mail.mass_mailing.list records by querying it
// and filtering it with criteria and options.
func (c *Client) FindMailMassMailingLists(criteria *Criteria, options *Options) (*MailMassMailingLists, error) {
mmls := &MailMassMailingLists{}
if err := c.SearchRead(MailMassMailingListModel, criteria, options, mmls); err != nil {
return nil, err
}
return mmls, nil
}
// FindMailMassMailingListIds finds records ids by querying it
// and filtering it with criteria and options.
func (c *Client) FindMailMassMailingListIds(criteria *Criteria, options *Options) ([]int64, error) {
ids, err := c.Search(MailMassMailingListModel, criteria, options)
if err != nil {
return []int64{}, err
}
return ids, nil
}
// FindMailMassMailingListId finds record id by querying it with criteria.
func (c *Client) FindMailMassMailingListId(criteria *Criteria, options *Options) (int64, error) {
ids, err := c.Search(MailMassMailingListModel, criteria, options)
if err != nil {
return -1, err
}
if len(ids) > 0 {
return ids[0], nil
}
return -1, fmt.Errorf("mail.mass_mailing.list was not found with criteria %v and options %v", criteria, options)
}
|
package FlatFS
import (
"flag"
"log"
"github.com/sarpk/go-fuse/fuse"
"github.com/sarpk/go-fuse/fuse/nodefs"
"github.com/sarpk/go-fuse/fuse/pathfs"
"os"
"path/filepath"
"strings"
"fmt"
"bytes"
)
var (
AttrMapperManagerInjector AttrMapperManager
)
func Prepare() {
AttrMapperManagerInjector = *NewAttrMapperManager()
AttrMapperManagerInjector.Set("default", NewMemAttrMapper())
AttrMapperManagerInjector.Set("sqlite", NewSQLiteAttrMapper())
}
func StartWithDebug(nfsDebug, serverDebug bool) {
Prepare()
flag.Parse()
if len(flag.Args()) < 3 {
log.Print("Given flags are: ", flag.Args())
log.Fatal("Usage:\n FlatFS MOUNTPOINT FLATSTORAGE [backend] \n [backend] can be 'default' (in memory) or 'sqlite' ")
}
attrMapperFromManager := AttrMapperManagerInjector.Get(flag.Arg(2))
defer attrMapperFromManager.Close()
flatFs := &FlatFs{
FileSystem: pathfs.NewDefaultFileSystem(),
attrMapper: attrMapperFromManager,
flatStorage: flag.Arg(1),
}
nfs := pathfs.NewPathNodeFs(flatFs, nil)
nfs.SetDebug(nfsDebug)
server, _, err := nodefs.MountRoot(flag.Arg(0), nfs.Root(), nil)
server.SetDebug(serverDebug)
if err != nil {
log.Fatalf("Mount fail: %v\n", err)
}
server.Serve()
}
func Start() {
StartWithDebug(true, true)
}
func NewQueryKeyValue() *QueryKeyValue {
return &QueryKeyValue{
keyValue: make(map[string]string, 0),
}
}
func ParseQuery(raw string) (*QueryKeyValue, QueryType) {
handledQueryTypeRaw, queryType := handleQueryType(raw)
query := NewQueryKeyValue()
for _, kv := range strings.Split(handledQueryTypeRaw, ",") {
pair := strings.Split(kv, ":")
if (len(pair) == 2) {
query.keyValue[pair[0]] = pair[1]
}
}
return query, queryType
}
func handleQueryType(raw string) (string, QueryType) {
queryType := createQueryType()
if len(raw) == 0 {
queryType.emptyType = true
} else if strings.IndexByte(raw, '?') == 0 {
raw = raw[1:]
queryType.querySpec = true
} else if strings.IndexByte(raw, '+') == 0 {
raw = raw[1:]
queryType.addSpec = true
} else if strings.IndexByte(raw, '-') == 0 {
raw = raw[1:]
queryType.deleteSpec = true
} else if strings.IndexByte(raw, '=') == 0 {
raw = raw[1:]
queryType.replaceSpec = true
} else {
queryType.fileSpec = true
}
return raw, queryType
}
func createFileSpecQueryType() QueryType {
queryType := createQueryType()
queryType.fileSpec = true
return queryType
}
func createQueryType() QueryType {
return QueryType{
addSpec: false,
querySpec: false,
replaceSpec: false,
deleteSpec: false,
fileSpec: false,
emptyType: false,
}
}
func (flatFs *FlatFs) OpenFileAsLoopback(fileName string, flags int) (file nodefs.File, code fuse.Status) {
f, err := os.OpenFile(flatFs.GetPath(fileName), flags, 0)
if err != nil {
return nil, fuse.ToStatus(err)
}
return nodefs.NewLoopbackFile(f), fuse.OK
}
func (flatFs *FlatFs) GetPath(relPath string) string {
return filepath.Join(flatFs.flatStorage, relPath)
}
func QueryOrAdd(queryType QueryType) bool {
return queryType.addSpec || queryType.querySpec
}
func ConvertToString(query QueryKeyValue) string {
var result bytes.Buffer
for key, value := range query.keyValue {
if result.Len() != 0 {
result.WriteString(",")
}
result.WriteString(fmt.Sprintf("%v:%v", key, value))
}
return result.String()
}
func (flatFs *FlatFs) GetFileInfoFromUUID(uuid string) os.FileInfo {
file, oErr := os.Open(flatFs.GetPath(uuid))
if oErr != nil {
return nil
}
fInfo, sErr := file.Stat()
if sErr != nil {
return nil
}
return fInfo
}
func AddKeyValuePairToUUIDMap(key, value, uuid string, uuidMap map[string]map[string]string) {
if uuidMap[uuid] == nil {
uuidMap[uuid] = make(map[string]string, 0)
}
uuidMap[uuid][key] = value
}
|
/*
* Copyright (c) 2019 Entrust Datacard Corporation.
* All rights reserved.
*/
package main
import (
"context"
"fmt"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
func (b *backend) opWriteConfigProfile(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
caId := data.Get("caId").(string)
id := data.Get("profile").(string)
profileID := CAGWConfigProfileID{id, ""}
profile, err := profileID.Profile(ctx, req, data)
if err != nil {
return logical.ErrorResponse(fmt.Sprint("Error retrieving the profile properties from CAGW: %s", err)), err
}
storageEntry, err := logical.StorageEntryJSON("config/"+caId+"/profiles/"+id, profile)
if err != nil {
return logical.ErrorResponse("error creating config storage entry for profile"), err
}
err = req.Storage.Put(ctx, storageEntry)
if err != nil {
return logical.ErrorResponse("could not store configuration"), err
}
respData := map[string]interface{}{
"Message": "Configuration successful",
"Profile ID": profile.Id,
"Profile Name": profile.Name,
"Subject Variable Requirements": profile.SubjectVariableRequirements,
"Subject Alt Name Requirements": profile.SubjectAltNameRequirements,
}
return &logical.Response{
Data: respData,
}, nil
}
func (b *backend) opReadConfigProfile(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
caId := data.Get("caId").(string)
profileID := data.Get("profile").(string)
if len(profileID) == 0 {
return logical.ErrorResponse("missing the profile ID"), nil
}
storageEntry, err := req.Storage.Get(ctx, "config/"+caId+"/profiles/"+profileID)
if err != nil {
return logical.ErrorResponse("could not read configuration"), err
}
if storageEntry == nil {
return logical.ErrorResponse("could not find configuration"), nil
}
var rawData map[string]interface{}
err = storageEntry.DecodeJSON(&rawData)
if err != nil {
return logical.ErrorResponse("json decoding failed"), err
}
resp := &logical.Response{
Data: rawData,
}
return resp, nil
}
|
package store_test
import (
"testing"
"github.com/golang/mock/gomock"
"github.com/smartcontractkit/chainlink/internal/cltest"
"github.com/smartcontractkit/chainlink/store"
"github.com/smartcontractkit/chainlink/store/mock_store"
"github.com/stretchr/testify/assert"
)
func TestStore_Start(t *testing.T) {
t.Parallel()
app, cleanup := cltest.NewApplicationWithKeyStore()
defer cleanup()
store := app.Store
ctrl := gomock.NewController(t)
txmMock := mock_store.NewMockTxManager(ctrl)
store.TxManager = txmMock
txmMock.EXPECT().Register(gomock.Any())
assert.NoError(t, store.Start())
}
func TestStore_Close(t *testing.T) {
t.Parallel()
s, cleanup := cltest.NewStore()
defer cleanup()
s.RunChannel.Send("whatever")
s.RunChannel.Send("whatever")
rr, open := <-s.RunChannel.Receive()
assert.True(t, open)
rr, open = <-s.RunChannel.Receive()
assert.True(t, open)
assert.NoError(t, s.Close())
rr, open = <-s.RunChannel.Receive()
assert.Equal(t, store.RunRequest{}, rr)
assert.False(t, open)
}
func TestQueuedRunChannel_Send(t *testing.T) {
t.Parallel()
rq := store.NewQueuedRunChannel()
assert.NoError(t, rq.Send("first"))
rr1 := <-rq.Receive()
assert.NotNil(t, rr1)
}
func TestQueuedRunChannel_Send_afterClose(t *testing.T) {
t.Parallel()
rq := store.NewQueuedRunChannel()
rq.Close()
assert.Error(t, rq.Send("first"))
}
|
// Package httperror provides the needes to build and HTTPError
package httperror
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package arc
import (
"context"
"fmt"
"time"
"chromiumos/tast/common/perf"
"chromiumos/tast/ctxutil"
"chromiumos/tast/local/arc"
"chromiumos/tast/local/bundles/cros/arc/audio"
"chromiumos/tast/local/cpu"
"chromiumos/tast/local/power"
"chromiumos/tast/local/power/setup"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
func init() {
testing.AddTest(&testing.Test{
Func: PowerAudioPlaybackPerf,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Measures the battery drain during audio playback with different performance flags",
Contacts: []string{
"judyhsiao@chromium.org", // Author
"cychiang@chromium.org", // Media team
"paulhsia@chromium.org", // Media team
"chromeos-audio-bugs@google.com", // Media team
},
SoftwareDeps: []string{"chrome"},
Fixture: "arcBootedWithDisableSyncFlags",
Attr: []string{"group:crosbolt", "crosbolt_nightly"},
Params: []testing.Param{
{
Name: "default",
Val: audio.TestParameters{
PerformanceMode: audio.PerformanceModeNone,
BatteryDischargeMode: setup.ForceBatteryDischarge,
},
ExtraSoftwareDeps: []string{"android_p"},
ExtraHardwareDeps: hwdep.D(hwdep.ForceDischarge()),
},
{
Name: "default_vm",
Val: audio.TestParameters{
PerformanceMode: audio.PerformanceModeNone,
BatteryDischargeMode: setup.ForceBatteryDischarge,
},
ExtraSoftwareDeps: []string{"android_vm"},
ExtraHardwareDeps: hwdep.D(hwdep.ForceDischarge()),
},
{
Name: "low_latency",
Val: audio.TestParameters{
PerformanceMode: audio.PerformanceModeLowLatency,
BatteryDischargeMode: setup.ForceBatteryDischarge,
},
ExtraSoftwareDeps: []string{"android_p"},
ExtraHardwareDeps: hwdep.D(hwdep.ForceDischarge()),
},
{
Name: "low_latency_vm",
Val: audio.TestParameters{
PerformanceMode: audio.PerformanceModeLowLatency,
BatteryDischargeMode: setup.ForceBatteryDischarge,
},
ExtraSoftwareDeps: []string{"android_vm"},
ExtraHardwareDeps: hwdep.D(hwdep.ForceDischarge()),
},
{
Name: "power_saving",
Val: audio.TestParameters{
PerformanceMode: audio.PerformanceModePowerSaving,
BatteryDischargeMode: setup.ForceBatteryDischarge,
},
ExtraSoftwareDeps: []string{"android_p"},
ExtraHardwareDeps: hwdep.D(hwdep.ForceDischarge()),
},
{
Name: "power_saving_vm",
Val: audio.TestParameters{
PerformanceMode: audio.PerformanceModePowerSaving,
BatteryDischargeMode: setup.ForceBatteryDischarge,
},
ExtraSoftwareDeps: []string{"android_vm"},
ExtraHardwareDeps: hwdep.D(hwdep.ForceDischarge()),
},
{
Name: "default_nobatterymetrics",
Val: audio.TestParameters{
PerformanceMode: audio.PerformanceModeNone,
BatteryDischargeMode: setup.NoBatteryDischarge,
},
ExtraSoftwareDeps: []string{"android_p"},
ExtraHardwareDeps: hwdep.D(hwdep.NoForceDischarge()),
},
{
Name: "default_vm_nobatterymetrics",
Val: audio.TestParameters{
PerformanceMode: audio.PerformanceModeNone,
BatteryDischargeMode: setup.NoBatteryDischarge,
},
ExtraSoftwareDeps: []string{"android_vm"},
ExtraHardwareDeps: hwdep.D(hwdep.NoForceDischarge()),
},
{
Name: "low_latency_nobatterymetrics",
Val: audio.TestParameters{
PerformanceMode: audio.PerformanceModeLowLatency,
BatteryDischargeMode: setup.NoBatteryDischarge,
},
ExtraSoftwareDeps: []string{"android_p"},
ExtraHardwareDeps: hwdep.D(hwdep.NoForceDischarge()),
},
{
Name: "low_latency_vm_nobatterymetrics",
Val: audio.TestParameters{
PerformanceMode: audio.PerformanceModeLowLatency,
BatteryDischargeMode: setup.NoBatteryDischarge,
},
ExtraSoftwareDeps: []string{"android_vm"},
ExtraHardwareDeps: hwdep.D(hwdep.NoForceDischarge()),
},
{
Name: "power_saving_nobatterymetrics",
Val: audio.TestParameters{
PerformanceMode: audio.PerformanceModePowerSaving,
BatteryDischargeMode: setup.NoBatteryDischarge,
},
ExtraSoftwareDeps: []string{"android_p"},
ExtraHardwareDeps: hwdep.D(hwdep.NoForceDischarge()),
},
{
Name: "power_saving_vm_nobatterymetrics",
Val: audio.TestParameters{
PerformanceMode: audio.PerformanceModePowerSaving,
BatteryDischargeMode: setup.NoBatteryDischarge,
},
ExtraSoftwareDeps: []string{"android_vm"},
ExtraHardwareDeps: hwdep.D(hwdep.NoForceDischarge()),
},
},
Timeout: 10 * time.Minute,
})
}
// PowerAudioPlaybackPerf measures the battery drain during audio playback with different performance flags.
func PowerAudioPlaybackPerf(ctx context.Context, s *testing.State) {
const (
testActivity = "org.chromium.arc.testapp.arcaudiotest.PlaybackPerformanceActivity"
audioWarmupDuration = 10 * time.Second
measureDuration = 60 * time.Second
keyPerformanceMode = "perf_mode"
keyDuration = "duration"
playbackDurationSecond = audioWarmupDuration + measureDuration + 10*time.Second // Add 10 seconds buffer.
)
param := s.Param().(audio.TestParameters)
s.Logf("Measuing power consumption of audio playback with flag: %#x", param.PerformanceMode)
// Give cleanup actions a minute to run, even if we fail by exceeding our
// deadline.
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, time.Minute)
defer cancel()
cr := s.FixtValue().(*arc.PreData).Chrome
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to create Test API connection: ", err)
}
sup, cleanup := setup.New(fmt.Sprintf("audio playback with flag: %#x.", param.PerformanceMode))
defer func(ctx context.Context) {
if err := cleanup(ctx); err != nil {
s.Error("Cleanup failed: ", err)
}
}(cleanupCtx)
sup.Add(setup.PowerTest(ctx, tconn,
setup.PowerTestOptions{Wifi: setup.DisableWifiInterfaces, NightLight: setup.DisableNightLight},
setup.NewBatteryDischargeFromMode(param.BatteryDischargeMode),
))
// Install testing app.
a := s.FixtValue().(*arc.PreData).ARC
sup.Add(setup.InstallApp(ctx, a, arc.APKPath(audio.Apk), audio.Pkg))
// Wait until CPU is cooled down.
if _, err := cpu.WaitUntilCoolDown(ctx, cpu.DefaultCoolDownConfig(cpu.CoolDownPreserveUI)); err != nil {
s.Fatal("CPU failed to cool down: ", err)
}
powerMetrics, err := perf.NewTimeline(ctx, power.TestMetrics(), perf.Interval(measureDuration))
if err != nil {
s.Fatal("Failed to build metrics: ", err)
}
if err := powerMetrics.Start(ctx); err != nil {
s.Fatal("Failed to start metrics: ", err)
}
// Start testing activity.
// TODO(b/203214749): Maybe need to make another field of ActivityStartOptions that can support uint64 types and pass them as int extras
sup.Add(
setup.StartActivity(ctx, tconn, a, audio.Pkg, testActivity,
arc.WithExtraIntUint64(keyPerformanceMode, uint64(param.PerformanceMode)),
arc.WithExtraIntUint64(keyDuration, uint64(playbackDurationSecond/time.Second))),
)
if err := sup.Check(ctx); err != nil {
s.Fatal("Setup failed: ", err)
}
s.Log("Warmup: Waiting a bit before starting the measurement")
if err := testing.Sleep(ctx, audioWarmupDuration); err != nil {
s.Fatal("Failed to sleep: ", err)
}
// Keep audio playback and record power usage.
s.Log("Starting measurement")
if err := powerMetrics.StartRecording(ctx); err != nil {
s.Fatal("Failed to start recording: ", err)
}
if err := testing.Sleep(ctx, measureDuration); err != nil {
s.Fatal("Failed to sleep: ", err)
}
p, err := powerMetrics.StopRecording(ctx)
if err != nil {
s.Fatal("Error while recording power metrics: ", err)
}
if err := p.Save(s.OutDir()); err != nil {
s.Error("Failed saving perf data: ", err)
}
}
|
package udwSqlite3
import (
"github.com/tachyon-protocol/udw/udwFile"
"github.com/tachyon-protocol/udw/udwLog"
"github.com/tachyon-protocol/udw/udwStrings"
)
type setExecReq struct {
k1 string
sql string
valueBuf [][]byte
respStatusCb func(status QueryRespStatus)
UseStmtCache bool
}
func (db *Db) setExec(req setExecReq) (errMsg string) {
for i := 0; i < 3; i++ {
errMsg = db.Query(QueryReq{
Query: req.sql,
Args: req.valueBuf,
RespStatusCb: req.respStatusCb,
UseStmtCache: req.UseStmtCache,
})
if errMsg != "" {
if db.handleEmptyDatabaseWhenCorrupt(errMsg) {
continue
}
if errorIsTableNotExist(errMsg) {
errMsg = createTable(db, req.k1)
if errMsg != "" {
if db.handleEmptyDatabaseWhenCorrupt(errMsg) {
continue
}
return "err: " + errMsg + " sql: " + req.sql
}
continue
}
return "err: " + errMsg + " sql: " + req.sql
}
return ""
}
return "[mustSetExec]try too many times sql: " + req.sql + " err: " + errMsg
}
func (db *Db) mustSetExec(k1 string, sql string, valueBuf [][]byte) {
errMsg := db.setExec(setExecReq{
k1: k1,
sql: sql,
valueBuf: valueBuf,
})
if errMsg != "" {
panic(errMsg)
}
return
}
func (db *Db) exec(sql string) (errMsg string) {
for i := 0; i < 2; i++ {
errMsg := db.querySkipResult(sql)
if errMsg != "" {
if db.handleEmptyDatabaseWhenCorrupt(errMsg) {
continue
}
return errMsg
}
return ""
}
return "[exec]try too many times sql:[" + sql + "] err:[" + errMsg + "]"
}
func (db *Db) mustExec(sql string) {
errMsg := db.exec(sql)
if errMsg != "" {
panic(errMsg)
}
return
}
func (db *Db) handleEmptyDatabaseWhenCorrupt(errMsg string) (ok bool) {
if IsErrorDatabaseCorrupt(errMsg) {
if db.req.DatabaseCorruptCallback != nil {
db.req.DatabaseCorruptCallback()
}
if db.req.EmptyDatabaseIfDatabaseCorrupt {
udwLog.Log("erorr", "[udwSqlite3.isEmptyDatabaseWhenCorrupt]", "DatabaseCorrupt and emtry database now.", db.req.FilePath, errMsg)
db.cClose()
MustDeleteSqliteDbFileByPath(db.req.FilePath)
db.mustInitDbL1()
return true
}
}
return false
}
func MustDeleteSqliteDbFileByPath(path string) {
udwFile.MustDelete(path)
udwFile.MustDelete(path + "-shm")
udwFile.MustDelete(path + "-wal")
udwFile.MustDelete(path + "-journal")
}
func stoB(s string) (b []byte) {
return udwStrings.GetByteArrayFromStringNoAlloc(s)
}
func btoS(b []byte) (s string) {
return udwStrings.GetStringFromByteArrayNoAlloc(b)
}
func (c *Db) getArgumentList(needSize int) [][]byte {
return make([][]byte, needSize)
}
|
package build
import (
"fmt"
"os"
"path/filepath"
"strings"
"time"
"github.com/dustin/go-humanize"
dockerapi "github.com/fsouza/go-dockerclient"
log "github.com/sirupsen/logrus"
"github.com/docker-slim/docker-slim/pkg/app"
"github.com/docker-slim/docker-slim/pkg/app/master/builder"
"github.com/docker-slim/docker-slim/pkg/app/master/commands"
"github.com/docker-slim/docker-slim/pkg/app/master/config"
"github.com/docker-slim/docker-slim/pkg/app/master/inspectors/image"
"github.com/docker-slim/docker-slim/pkg/command"
"github.com/docker-slim/docker-slim/pkg/consts"
"github.com/docker-slim/docker-slim/pkg/imagebuilder"
"github.com/docker-slim/docker-slim/pkg/imagebuilder/internalbuilder"
"github.com/docker-slim/docker-slim/pkg/report"
"github.com/docker-slim/docker-slim/pkg/util/errutil"
"github.com/docker-slim/docker-slim/pkg/util/fsutil"
v "github.com/docker-slim/docker-slim/pkg/version"
)
func inspectFatImage(
xc *app.ExecutionContext,
targetRef string,
doPull bool,
doShowPullLogs bool,
rtaOnbuildBaseImage bool,
dockerConfigPath string,
registryAccount string,
registrySecret string,
paramsStatePath string,
client *dockerapi.Client,
logger *log.Entry,
cmdReport *report.BuildCommand,
) (*image.Inspector, string, string, string) {
imageInspector, err := image.NewInspector(client, targetRef)
xc.FailOn(err)
if imageInspector.NoImage() {
if doPull {
xc.Out.Info("target.image",
ovars{
"status": "image.not.found",
"image": targetRef,
"message": "trying to pull target image",
})
err := imageInspector.Pull(doShowPullLogs, dockerConfigPath, registryAccount, registrySecret)
xc.FailOn(err)
} else {
xc.Out.Info("target.image.error",
ovars{
"status": "image.not.found",
"image": targetRef,
"message": "make sure the target image already exists locally (use --pull flag to auto-download it from registry)",
})
exitCode := commands.ECTCommon | commands.ECCImageNotFound
xc.Out.State("exited",
ovars{
"exit.code": exitCode,
})
xc.Exit(exitCode)
}
}
logger.Tracef("targetRef=%s ii.ImageRef=%s", targetRef, imageInspector.ImageRef)
cmdReport.TargetReference = imageInspector.ImageRef
xc.Out.State("image.inspection.start")
logger.Info("inspecting 'fat' image metadata...")
err = imageInspector.Inspect()
xc.FailOn(err)
localVolumePath, artifactLocation, statePath, stateKey := fsutil.PrepareImageStateDirs(paramsStatePath, imageInspector.ImageInfo.ID)
imageInspector.ArtifactLocation = artifactLocation
logger.Debugf("localVolumePath=%v, artifactLocation=%v, statePath=%v, stateKey=%v", localVolumePath, artifactLocation, statePath, stateKey)
xc.Out.Info("image",
ovars{
"id": imageInspector.ImageInfo.ID,
"size.bytes": imageInspector.ImageInfo.VirtualSize,
"size.human": humanize.Bytes(uint64(imageInspector.ImageInfo.VirtualSize)),
})
if imageInspector.ImageInfo.Config != nil &&
len(imageInspector.ImageInfo.Config.Labels) > 0 {
for labelName := range imageInspector.ImageInfo.Config.Labels {
if labelName == consts.ContainerLabelName {
xc.Out.Info("target.image.error",
ovars{
"status": "image.already.optimized",
"image": targetRef,
"message": "the target image is already optimized",
})
exitCode := commands.ECTBuild | ecbImageAlreadyOptimized
xc.Out.State("exited",
ovars{
"exit.code": exitCode,
})
cmdReport.Error = "image.already.optimized"
xc.Exit(exitCode)
}
}
}
logger.Info("processing 'fat' image info...")
err = imageInspector.ProcessCollectedData()
xc.FailOn(err)
if imageInspector.DockerfileInfo != nil {
if imageInspector.DockerfileInfo.ExeUser != "" {
xc.Out.Info("image.users",
ovars{
"exe": imageInspector.DockerfileInfo.ExeUser,
"all": strings.Join(imageInspector.DockerfileInfo.AllUsers, ","),
})
}
if len(imageInspector.DockerfileInfo.ImageStack) > 0 {
cmdReport.ImageStack = imageInspector.DockerfileInfo.ImageStack
for idx, layerInfo := range imageInspector.DockerfileInfo.ImageStack {
xc.Out.Info("image.stack",
ovars{
"index": idx,
"name": layerInfo.FullName,
"id": layerInfo.ID,
})
}
}
if len(imageInspector.DockerfileInfo.ExposedPorts) > 0 {
xc.Out.Info("image.exposed_ports",
ovars{
"list": strings.Join(imageInspector.DockerfileInfo.ExposedPorts, ","),
})
}
if !rtaOnbuildBaseImage && imageInspector.DockerfileInfo.HasOnbuild {
xc.Out.Info("target.image.error",
ovars{
"status": "onbuild.base.image",
"image": targetRef,
"message": "Runtime analysis for onbuild base images is not supported",
})
exitCode := commands.ECTBuild | ecbOnbuildBaseImage
xc.Out.State("exited",
ovars{
"exit.code": exitCode,
})
cmdReport.Error = "onbuild.base.image"
xc.Exit(exitCode)
}
}
xc.Out.State("image.inspection.done")
return imageInspector, localVolumePath, statePath, stateKey
}
func buildFatImage(
xc *app.ExecutionContext,
targetRef string,
customImageTag string,
cbOpts *config.ContainerBuildOptions,
doShowBuildLogs bool,
client *dockerapi.Client,
cmdReport *report.BuildCommand,
) (fatImageRepoNameTag string) {
xc.Out.State("building",
ovars{
"message": "building basic image",
})
//create a fat image name:
//* use the explicit fat image tag if provided
//* or create one based on the user provided (slim image) custom tag if it's available
//* otherwise auto-generate a name
if cbOpts.Tag != "" {
fatImageRepoNameTag = cbOpts.Tag
} else if customImageTag != "" {
citParts := strings.Split(customImageTag, ":")
switch len(citParts) {
case 1:
fatImageRepoNameTag = fmt.Sprintf("%s.fat", customImageTag)
case 2:
fatImageRepoNameTag = fmt.Sprintf("%s.fat:%s", citParts[0], citParts[1])
default:
xc.Out.Info("param.error",
ovars{
"status": "malformed.custom.image.tag",
"value": customImageTag,
})
exitCode := commands.ECTBuild | ecbBadCustomImageTag
xc.Out.State("exited",
ovars{
"exit.code": exitCode,
"version": v.Current(),
"location": fsutil.ExeDir(),
})
cmdReport.Error = "malformed.custom.image.tag"
xc.Exit(exitCode)
}
} else {
fatImageRepoNameTag = fmt.Sprintf("slim-tmp-fat-image.%v.%v",
os.Getpid(), time.Now().UTC().Format("20060102150405"))
}
cbOpts.Tag = fatImageRepoNameTag
xc.Out.Info("basic.image.info",
ovars{
"tag": cbOpts.Tag,
"dockerfile": cbOpts.Dockerfile,
"context": targetRef,
})
fatBuilder, err := builder.NewBasicImageBuilder(
client,
cbOpts,
targetRef,
doShowBuildLogs)
xc.FailOn(err)
err = fatBuilder.Build()
if doShowBuildLogs || err != nil {
xc.Out.LogDump("regular.image.build", fatBuilder.BuildLog.String(),
ovars{
"tag": cbOpts.Tag,
})
}
if err != nil {
xc.Out.Info("build.error",
ovars{
"status": "standard.image.build.error",
"value": err,
})
exitCode := commands.ECTBuild | ecbImageBuildError
xc.Out.State("exited",
ovars{
"exit.code": exitCode,
"version": v.Current(),
"location": fsutil.ExeDir(),
})
xc.Exit(exitCode)
}
xc.Out.State("basic.image.build.completed")
return fatImageRepoNameTag
}
func buildOutputImage(
xc *app.ExecutionContext,
customImageTag string,
additionalTags []string,
cbOpts *config.ContainerBuildOptions,
overrides *config.ContainerOverrides,
imageOverrideSelectors map[string]bool,
instructions *config.ImageNewInstructions,
doDeleteFatImage bool,
doShowBuildLogs bool,
imageInspector *image.Inspector,
client *dockerapi.Client,
logger *log.Entry,
cmdReport *report.BuildCommand,
imageBuildEngine string,
imageBuildArch string,
) string {
onError := func(e error) {
xc.Out.Info("build.error",
ovars{
"status": "optimized.image.build.error",
"error": e,
})
exitCode := commands.ECTBuild | ecbImageBuildError
xc.Out.State("exited",
ovars{
"exit.code": exitCode,
"version": v.Current(),
"location": fsutil.ExeDir(),
})
cmdReport.Error = "optimized.image.build.error"
xc.Exit(exitCode)
}
if customImageTag == "" {
customImageTag = imageInspector.SlimImageRepo
}
cmdReport.ImageBuildEngine = imageBuildEngine
logger.Debugf("image build engine - %v", imageBuildEngine)
xc.Out.State("building",
ovars{
"message": "building optimized image",
"engine": imageBuildEngine,
})
var outputImageName string
var hasData bool
var imageCreated bool
switch imageBuildEngine {
case IBENone:
case IBEInternal:
engine, err := internalbuilder.New(doShowBuildLogs,
true, //pushToDaemon - TODO: have a param to control this &
//output image tar (if not 'saving' to daemon)
false)
xc.FailOn(err)
opts := imagebuilder.SimpleBuildOptions{
ImageConfig: imagebuilder.ImageConfig{
Architecture: imageBuildArch,
Config: imagebuilder.RunConfig{
ExposedPorts: map[string]struct{}{},
Volumes: map[string]struct{}{},
Labels: map[string]string{},
},
},
}
if customImageTag != "" {
//must be first
opts.Tags = append(opts.Tags, customImageTag)
}
if len(additionalTags) > 0 {
opts.Tags = append(opts.Tags, additionalTags...)
}
UpdateBuildOptionsWithSrcImageInfo(&opts, imageInspector.ImageInfo)
UpdateBuildOptionsWithOverrides(&opts, imageOverrideSelectors, overrides)
if imageInspector.ImageRef != "" {
opts.ImageConfig.Config.Labels[consts.SourceImageLabelName] = imageInspector.ImageRef
}
opts.ImageConfig.Config.Labels[consts.ContainerLabelName] = v.Current()
//(new) instructions have higher value precedence over the runtime overrides
UpdateBuildOptionsWithNewInstructions(&opts, instructions)
dataTar := filepath.Join(imageInspector.ArtifactLocation, "files.tar")
if fsutil.Exists(dataTar) &&
fsutil.IsRegularFile(dataTar) &&
fsutil.IsTarFile(dataTar) {
layerInfo := imagebuilder.LayerDataInfo{
Type: imagebuilder.TarSource,
Source: dataTar,
Params: &imagebuilder.DataParams{
TargetPath: "/",
},
}
opts.Layers = append(opts.Layers, layerInfo)
hasData = true
} else {
dataDir := filepath.Join(imageInspector.ArtifactLocation, "files")
if fsutil.Exists(dataTar) && fsutil.IsDir(dataDir) {
layerInfo := imagebuilder.LayerDataInfo{
Type: imagebuilder.DirSource,
Source: dataDir,
Params: &imagebuilder.DataParams{
TargetPath: "/",
},
}
opts.Layers = append(opts.Layers, layerInfo)
hasData = true
} else {
logger.Info("WARNING - no data artifacts")
}
}
err = engine.Build(opts)
if err != nil {
onError(err)
}
outputImageName = customImageTag // engine.RepoName
imageCreated = true
case IBEBuildKit:
case IBEDocker:
engine, err := builder.NewImageBuilder(client,
customImageTag,
additionalTags,
imageInspector.ImageInfo,
imageInspector.ArtifactLocation,
doShowBuildLogs,
imageOverrideSelectors,
overrides,
instructions,
imageInspector.ImageRef)
xc.FailOn(err)
if !engine.HasData {
logger.Info("WARNING - no data artifacts")
}
err = engine.Build()
if doShowBuildLogs || err != nil {
xc.Out.LogDump("optimized.image.build", engine.BuildLog.String(),
ovars{
"tag": customImageTag,
})
}
if err != nil {
onError(err)
}
if cbOpts.Dockerfile != "" {
if doDeleteFatImage {
xc.Out.Info("Dockerfile", ovars{
"image.name": cbOpts.Tag,
"image.fat.deleted": "true",
})
var err = client.RemoveImage(cbOpts.Tag)
errutil.WarnOn(err)
} else {
xc.Out.Info("Dockerfile", ovars{
"image.name": cbOpts.Tag,
"image.fat.deleted": "false",
})
}
}
outputImageName = engine.RepoName
hasData = engine.HasData
imageCreated = true
default:
logger.Errorf("bad image build engine - %v", imageBuildEngine)
onError(fmt.Errorf("bad image build engine - %v", imageBuildEngine))
}
cmdReport.State = command.StateCompleted
cmdReport.MinifiedImage = outputImageName
cmdReport.MinifiedImageHasData = hasData
cmdReport.ImageCreated = imageCreated
xc.Out.State("completed")
return outputImageName
}
// NOTE: lots of C&P from image_builder (TODO: refactor)
const (
dsCmdPortInfo = "65501/tcp"
dsEvtPortInfo = "65502/tcp"
)
func UpdateBuildOptionsWithNewInstructions(
options *imagebuilder.SimpleBuildOptions,
instructions *config.ImageNewInstructions) {
if instructions != nil {
log.Debugf("NewImageBuilder: Using new image instructions => %+v", instructions)
if instructions.Workdir != "" {
options.ImageConfig.Config.WorkingDir = instructions.Workdir
}
if len(instructions.Env) > 0 {
options.ImageConfig.Config.Env = append(options.ImageConfig.Config.Env, instructions.Env...)
}
for k, v := range instructions.ExposedPorts {
options.ImageConfig.Config.ExposedPorts[string(k)] = v
}
for k, v := range instructions.Volumes {
options.ImageConfig.Config.Volumes[k] = v
}
for k, v := range instructions.Labels {
options.ImageConfig.Config.Labels[k] = v
}
if len(instructions.Entrypoint) > 0 {
options.ImageConfig.Config.Entrypoint = instructions.Entrypoint
}
if len(instructions.Cmd) > 0 {
options.ImageConfig.Config.Cmd = instructions.Cmd
}
if len(options.ImageConfig.Config.ExposedPorts) > 0 &&
len(instructions.RemoveExposedPorts) > 0 {
for k := range instructions.RemoveExposedPorts {
if _, ok := options.ImageConfig.Config.ExposedPorts[string(k)]; ok {
delete(options.ImageConfig.Config.ExposedPorts, string(k))
}
}
}
if len(options.ImageConfig.Config.Volumes) > 0 &&
len(instructions.RemoveVolumes) > 0 {
for k := range instructions.RemoveVolumes {
if _, ok := options.ImageConfig.Config.Volumes[k]; ok {
delete(options.ImageConfig.Config.Volumes, k)
}
}
}
if len(options.ImageConfig.Config.Labels) > 0 &&
len(instructions.RemoveLabels) > 0 {
for k := range instructions.RemoveLabels {
if _, ok := options.ImageConfig.Config.Labels[k]; ok {
delete(options.ImageConfig.Config.Labels, k)
}
}
}
if len(instructions.RemoveEnvs) > 0 &&
len(options.ImageConfig.Config.Env) > 0 {
var newEnv []string
for _, envPair := range options.ImageConfig.Config.Env {
envParts := strings.SplitN(envPair, "=", 2)
if len(envParts) > 0 && envParts[0] != "" {
if _, ok := instructions.RemoveEnvs[envParts[0]]; !ok {
newEnv = append(newEnv, envPair)
}
}
}
options.ImageConfig.Config.Env = newEnv
}
}
}
func UpdateBuildOptionsWithOverrides(
options *imagebuilder.SimpleBuildOptions,
overrideSelectors map[string]bool,
overrides *config.ContainerOverrides) {
if overrides != nil && len(overrideSelectors) > 0 {
log.Debugf("UpdateBuildOptionsWithOverrides: Using container runtime overrides => %+v", overrideSelectors)
for k := range overrideSelectors {
switch k {
case "entrypoint":
if len(overrides.Entrypoint) > 0 {
options.ImageConfig.Config.Entrypoint = overrides.Entrypoint
}
case "cmd":
if len(overrides.Cmd) > 0 {
options.ImageConfig.Config.Cmd = overrides.Cmd
}
case "workdir":
if overrides.Workdir != "" {
options.ImageConfig.Config.WorkingDir = overrides.Workdir
}
case "env":
if len(overrides.Env) > 0 {
options.ImageConfig.Config.Env = append(options.ImageConfig.Config.Env, overrides.Env...)
}
case "label":
for k, v := range overrides.Labels {
options.ImageConfig.Config.Labels[k] = v
}
case "volume":
for k, v := range overrides.Volumes {
options.ImageConfig.Config.Volumes[k] = v
}
case "expose":
dsCmdPort := dockerapi.Port(dsCmdPortInfo)
dsEvtPort := dockerapi.Port(dsEvtPortInfo)
for k, v := range overrides.ExposedPorts {
if k == dsCmdPort || k == dsEvtPort {
continue
}
options.ImageConfig.Config.ExposedPorts[string(k)] = v
}
}
}
}
}
func UpdateBuildOptionsWithSrcImageInfo(
options *imagebuilder.SimpleBuildOptions,
imageInfo *dockerapi.Image) {
labels := SourceToOutputImageLabels(imageInfo.Config.Labels)
for k, v := range labels {
options.ImageConfig.Config.Labels[k] = v
}
//note: not passing imageInfo.OS explicitly
//because it gets "hardcoded" to "linux" internally
//(other OS types are not supported)
if options.ImageConfig.Architecture == "" {
options.ImageConfig.Architecture = imageInfo.Architecture
}
options.ImageConfig.Config.User = imageInfo.Config.User
options.ImageConfig.Config.Entrypoint = imageInfo.Config.Entrypoint
options.ImageConfig.Config.Cmd = imageInfo.Config.Cmd
options.ImageConfig.Config.WorkingDir = imageInfo.Config.WorkingDir
options.ImageConfig.Config.Env = imageInfo.Config.Env
options.ImageConfig.Config.Volumes = imageInfo.Config.Volumes
options.ImageConfig.Config.OnBuild = imageInfo.Config.OnBuild
options.ImageConfig.Config.StopSignal = imageInfo.Config.StopSignal
for k, v := range imageInfo.Config.ExposedPorts {
options.ImageConfig.Config.ExposedPorts[string(k)] = v
}
if options.ImageConfig.Config.ExposedPorts == nil {
options.ImageConfig.Config.ExposedPorts = map[string]struct{}{}
}
if options.ImageConfig.Config.Volumes == nil {
options.ImageConfig.Config.Volumes = map[string]struct{}{}
}
if options.ImageConfig.Config.Labels == nil {
options.ImageConfig.Config.Labels = map[string]string{}
}
}
func SourceToOutputImageLabels(srcLabels map[string]string) map[string]string {
labels := map[string]string{}
if srcLabels != nil {
//cleanup non-standard labels from buildpacks
for k, v := range srcLabels {
lineLen := len(k) + len(v) + 7
if lineLen > 65535 {
//TODO: improve JSON data splitting
valueLen := len(v)
parts := valueLen / 50000
parts++
offset := 0
for i := 0; i < parts && offset < valueLen; i++ {
chunkSize := 50000
if (offset + chunkSize) > valueLen {
chunkSize = valueLen - offset
}
value := v[offset:(offset + chunkSize)]
offset += chunkSize
key := fmt.Sprintf("%s.%d", k, i)
labels[key] = value
}
} else {
labels[k] = v
}
}
}
return labels
}
|
package capital
import (
"fmt"
"github.com/spf13/cobra"
)
var capitalCmd = &cobra.Command{
Use: "capital",
Short: fmt.Sprint("这是capital命令很短的介绍"),
Long: fmt.Sprint("这是capital命令很长很长很长的介绍"),
}
func Cmd() *cobra.Command {
capitalCmd.AddCommand(anyCmd())
return capitalCmd
}
|
package dz2
import (
"testing"
)
type item struct {
in string
out string
}
func TestUnpackOk(t *testing.T) {
items := []item{
{in: "a4bc2d5e", out: "aaaabccddddde"},
{in: "abcd", out: "abcd"},
{in: `qwe\4\5`, out: `qwe45`},
{in: `qwe\45`, out: `qwe44444`},
{in: `qwe\\5`, out: `qwe\\\\\`},
}
for _, value := range items {
result, err := Unpack(value.in)
if err != nil {
t.Error("Error:", err)
continue
}
if result != value.out {
t.Error("error: unpack ", value.in, " <> ", value.out)
}
}
}
func TestUnpackErr(t *testing.T) {
items := []string{
"35",
"",
}
for _, value := range items {
result, err := Unpack(value)
if err == nil {
t.Error("invalid string")
continue
}
if result == "" {
t.Error("invalid string")
continue
}
}
}
|
package confirm_test
import (
"net/http"
"net/http/httptest"
"sync"
"testing"
"time"
"github.com/jrapoport/gothic/hosts/rest"
"github.com/jrapoport/gothic/hosts/rest/account/confirm"
"github.com/jrapoport/gothic/mail/template"
"github.com/jrapoport/gothic/test/tconf"
"github.com/jrapoport/gothic/test/tcore"
"github.com/jrapoport/gothic/test/thttp"
"github.com/jrapoport/gothic/test/tsrv"
"github.com/stretchr/testify/assert"
)
const (
confirmRt = confirm.Confirm + rest.Root
sendRt = confirm.Confirm + confirm.Send
)
func testServer(t *testing.T) (*rest.Host, *httptest.Server, *tconf.SMTPMock) {
srv, web, smtp := tsrv.RESTHost(t, []rest.RegisterServer{
confirm.RegisterServer,
}, true)
c := srv.Config()
c.Signup.AutoConfirm = false
return srv, web, smtp
}
func TestConfirmServer_ConfirmUser(t *testing.T) {
t.Parallel()
srv, web, smtp := testServer(t)
// invalid req
_, err := thttp.DoRequest(t, web, http.MethodPost, confirmRt, nil, []byte("\n"))
assert.Error(t, err)
// empty token
req := new(confirm.Request)
_, err = thttp.DoRequest(t, web, http.MethodPost, confirmRt, nil, req)
assert.Error(t, err)
// bad token
req = &confirm.Request{
Token: "bad",
}
_, err = thttp.DoRequest(t, web, http.MethodPost, confirmRt, nil, req)
assert.Error(t, err)
var tok string
smtp.AddHook(t, func(email string) {
tok = tconf.GetEmailToken(template.ConfirmUserAction, email)
})
u, _ := tcore.TestUser(t, srv.API, "", false)
assert.False(t, u.IsConfirmed())
assert.Eventually(t, func() bool {
return tok != ""
}, 1*time.Second, 10*time.Millisecond)
req = &confirm.Request{
Token: tok,
}
res, err := thttp.DoRequest(t, web, http.MethodPost, confirmRt, nil, req)
assert.NoError(t, err)
u, err = srv.GetUser(u.ID)
assert.NoError(t, err)
assert.True(t, u.IsConfirmed())
_, claims := tsrv.UnmarshalTokenResponse(t, srv.Config().JWT, res)
assert.Equal(t, u.ID.String(), claims.Subject())
}
func TestConfirmServer_SendConfirmUser(t *testing.T) {
t.Parallel()
srv, web, smtp := testServer(t)
// invalid req
_, err := thttp.DoRequest(t, web, http.MethodPost, sendRt, nil, []byte("\n"))
assert.Error(t, err)
// empty email
req := new(confirm.Request)
_, err = thttp.DoRequest(t, web, http.MethodPost, sendRt, nil, req)
assert.Error(t, err)
// bad email
req = &confirm.Request{
Email: "bad",
}
_, err = thttp.DoRequest(t, web, http.MethodPost, sendRt, nil, req)
assert.Error(t, err)
// email not found
req = &confirm.Request{
Email: "i-dont-exist@example.com",
}
_, err = thttp.DoRequest(t, web, http.MethodPost, sendRt, nil, req)
assert.NoError(t, err)
srv.Config().Mail.SendLimit = 0
var tok1 string
var mu sync.Mutex
smtp.AddHook(t, func(email string) {
mu.Lock()
defer mu.Unlock()
tok1 = tconf.GetEmailToken(template.ConfirmUserAction, email)
})
u, _ := tcore.TestUser(t, srv.API, "", false)
assert.False(t, u.IsConfirmed())
assert.Eventually(t, func() bool {
return tok1 != ""
}, 2*time.Second, 10*time.Millisecond)
var tok2 string
smtp.AddHook(t, func(email string) {
mu.Lock()
defer mu.Unlock()
tok2 = tconf.GetEmailToken(template.ConfirmUserAction, email)
})
req = &confirm.Request{
Email: u.Email,
}
_, err = thttp.DoRequest(t, web, http.MethodPost, sendRt, nil, req)
assert.NoError(t, err)
assert.Eventually(t, func() bool {
return tok2 != ""
}, 2*time.Second, 10*time.Millisecond)
assert.Equal(t, tok1, tok2)
}
func TestConfirmServer_SendConfirmUser_RateLimit(t *testing.T) {
t.Parallel()
srv, web, smtp := testServer(t)
srv.Config().Mail.SendLimit = 5 * time.Minute
var sent string
var mu sync.Mutex
smtp.AddHook(t, func(email string) {
mu.Lock()
defer mu.Unlock()
sent = email
})
// sent initial
u, _ := tcore.TestUser(t, srv.API, "", false)
assert.False(t, u.IsConfirmed())
assert.Eventually(t, func() bool {
return sent != ""
}, 2*time.Second, 10*time.Millisecond)
// resend
sent = ""
req := &confirm.Request{
Email: u.Email,
}
_, err := thttp.DoRequest(t, web, http.MethodPost, sendRt, nil, req)
assert.EqualError(t, err, thttp.FmtError(http.StatusTooEarly).Error())
assert.Never(t, func() bool {
return sent != ""
}, 2*time.Second, 10*time.Millisecond)
}
|
/*
* Copyright 2020 zpxio (Jeff Sharpe)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package generator
import (
"github.com/stretchr/testify/suite"
"testing"
)
type SelectorSuite struct {
suite.Suite
}
func TestSelectorSuite(t *testing.T) {
suite.Run(t, new(SelectorSuite))
}
func (s *SelectorSuite) TestParseSelector_Simple() {
testId := "animal"
x := ParseSelector(testId, "type=mammal,env!=water,enabled")
s.NotNil(x)
s.Equal(testId, x.Category)
s.Contains(x.Require, "type")
s.Equal("mammal", x.Require["type"])
s.Contains(x.Exclude, "env")
s.Equal("water", x.Exclude["env"])
s.Contains(x.Exists, "enabled")
s.True(x.Exists["enabled"])
}
func (s *SelectorSuite) TestIsSimple_Sure() {
x := ParseSelector("test", "")
s.True(x.IsSimple())
}
func (s *SelectorSuite) TestIsSimple_HasRequire() {
x := ParseSelector("test", "x=y")
s.False(x.IsSimple())
}
func (s *SelectorSuite) TestIsSimple_HasExclude() {
x := ParseSelector("test", "x!=y")
s.False(x.IsSimple())
}
func (s *SelectorSuite) TestIsSimple_HasExists() {
x := ParseSelector("test", "x")
s.False(x.IsSimple())
}
func (s *SelectorSuite) TestMatchesToken_IdOnly() {
x := ParseSelector("animal", "")
t := BuildToken("animal", "Gemsbok", 1.0, Properties{})
s.True(x.MatchesToken(&t))
}
func (s *SelectorSuite) TestMatchesToken_SingleRequire() {
t := BuildToken("animal", "Gemsbok", 1.0, Properties{"type": "mammal", "continent": "africa", "family": "antelope", "extant": ""})
x := ParseSelector("animal", "type=mammal")
s.True(x.MatchesToken(&t))
}
func (s *SelectorSuite) TestMatchesToken_MultiRequire() {
t := BuildToken("animal", "Gemsbok", 1.0, Properties{"type": "mammal", "continent": "africa", "family": "antelope", "extant": ""})
x := ParseSelector("animal", "type=mammal,family=antelope")
s.True(x.MatchesToken(&t))
}
func (s *SelectorSuite) TestMatchesToken_SingleRequire_Mismatch() {
t := BuildToken("animal", "Gemsbok", 1.0, Properties{"type": "mammal", "continent": "africa", "family": "antelope", "extant": ""})
x := ParseSelector("animal", "type=fish")
s.False(x.MatchesToken(&t))
}
func (s *SelectorSuite) TestMatchesToken_MultiRequire_MixedMismatch() {
t := BuildToken("animal", "Gemsbok", 1.0, Properties{"type": "mammal", "continent": "africa", "family": "antelope", "extant": ""})
x := ParseSelector("animal", "type=mammal,family=prosimian")
s.False(x.MatchesToken(&t))
}
func (s *SelectorSuite) TestMatchesToken_SingleRestrict() {
t := BuildToken("animal", "Gemsbok", 1.0, Properties{"type": "mammal", "continent": "africa", "family": "antelope", "extant": ""})
x := ParseSelector("animal", "type!=fish")
s.True(x.MatchesToken(&t))
}
func (s *SelectorSuite) TestMatchesToken_MultiRestrict() {
t := BuildToken("animal", "Gemsbok", 1.0, Properties{"type": "mammal", "continent": "africa", "family": "antelope", "extant": ""})
x := ParseSelector("animal", "type!=fish,family!=mustelid")
s.True(x.MatchesToken(&t))
}
func (s *SelectorSuite) TestMatchesToken_SingleRestrict_Mismatch() {
t := BuildToken("animal", "Gemsbok", 1.0, Properties{"type": "mammal", "continent": "africa", "family": "antelope", "extant": ""})
x := ParseSelector("animal", "type!=mammal")
s.False(x.MatchesToken(&t))
}
func (s *SelectorSuite) TestMatchesToken_MultiRestrict_MixedMismatch() {
t := BuildToken("animal", "Gemsbok", 1.0, Properties{"type": "mammal", "continent": "africa", "family": "antelope", "extant": ""})
x := ParseSelector("animal", "type!=fish,family!=antelope")
s.False(x.MatchesToken(&t))
}
func (s *SelectorSuite) TestMatchesToken_SingleExists() {
t := BuildToken("animal", "Gemsbok", 1.0, Properties{"type": "mammal", "continent": "africa", "family": "antelope", "extant": ""})
x := ParseSelector("animal", "extant")
s.True(x.MatchesToken(&t))
}
func (s *SelectorSuite) TestMatchesToken_MultiExists() {
t := BuildToken("animal", "Gemsbok", 1.0, Properties{"type": "mammal", "continent": "africa", "family": "antelope", "extant": ""})
x := ParseSelector("animal", "family,extant")
s.True(x.MatchesToken(&t))
}
func (s *SelectorSuite) TestMatchesToken_SingleExists_Mismatch() {
t := BuildToken("animal", "Gemsbok", 1.0, Properties{"type": "mammal", "continent": "africa", "family": "antelope", "extant": ""})
x := ParseSelector("animal", "prey")
s.False(x.MatchesToken(&t))
}
func (s *SelectorSuite) TestMatchesToken_MultiExists_MixedMismatch() {
t := BuildToken("animal", "Gemsbok", 1.0, Properties{"type": "mammal", "continent": "africa", "family": "antelope", "extant": ""})
x := ParseSelector("animal", "extant,robotic")
s.False(x.MatchesToken(&t))
}
|
package middlewares
import (
"log"
"net/http"
"strconv"
"github.com/Anondo/graphql-and-go/conn"
"github.com/Anondo/graphql-and-go/database/repos"
"github.com/labstack/echo/v4"
)
// AuthMiddleware ...
func AuthMiddleware(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
userID, _ := strconv.Atoi(c.Request().Header.Get("User-id"))
repo := repos.NewUserRepo(conn.Default())
user, err := repo.GetUser(userID)
if err != nil {
log.Println("Failed to fetch user info for authentication: ", err.Error())
return c.JSON(http.StatusInternalServerError, map[string]string{
"error": "something went wrong",
})
}
if user == nil {
log.Printf("User of user-id:%d is not authorized\n", userID)
return c.JSON(http.StatusUnauthorized, map[string]string{
"error": "User not authorized",
})
}
return next(c)
}
}
|
package nxrm
import (
"context"
"encoding/json"
"errors"
"fmt"
"sync"
"time"
"github.com/hashicorp/errwrap"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/sdk/database/dbplugin"
"github.com/hashicorp/vault/sdk/database/helper/credsutil"
"github.com/hashicorp/vault/sdk/database/helper/dbutil"
)
func New() (interface{}, error) {
db := NewNxrm()
return dbplugin.NewDatabaseErrorSanitizerMiddleware(db, db.SecretValues), nil
}
func Run(apiTLSConfig *api.TLSConfig) error {
dbplugin.Serve(NewNxrm(), api.VaultPluginTLSProvider(apiTLSConfig))
return nil
}
func NewNxrm() *Nxrm {
return &Nxrm{
credentialProducer: &credsutil.SQLCredentialsProducer{
DisplayNameLen: 15,
RoleNameLen: 15,
UsernameLen: 100,
Separator: "-",
},
}
}
type Nxrm struct {
credentialProducer credsutil.CredentialsProducer
mux sync.RWMutex
config map[string]interface{}
}
func (nxrm *Nxrm) Type() (string, error) {
return "nxrm", nil
}
func (nxrm *Nxrm) SecretValues() map[string]interface{} {
nxrm.mux.RLock()
defer nxrm.mux.RUnlock()
replacements := make(map[string]interface{})
for _, secretKey := range []string{"password", "client_key"} {
vIfc, found := nxrm.config[secretKey]
if !found {
continue
}
secretVal, ok := vIfc.(string)
if !ok {
continue
}
replacements[secretVal] = "[" + secretKey + "]"
}
return replacements
}
func (nxrm *Nxrm) Init(ctx context.Context, config map[string]interface{}, verifyConnection bool) (map[string]interface{}, error) {
for _, requiredField := range []string{"username", "password", "url"} {
raw, ok := config[requiredField]
if !ok {
return nil, fmt.Errorf(`%q must be provided`, requiredField)
}
if _, ok := raw.(string); !ok {
return nil, fmt.Errorf(`%q must be a string`, requiredField)
}
}
for _, optionalField := range []string{"ca_cert", "ca_path", "client_cert", "client_key", "tls_server_name"} {
raw, ok := config[optionalField]
if !ok {
continue
}
if _, ok = raw.(string); !ok {
return nil, fmt.Errorf(`%q must be a string`, optionalField)
}
}
if raw, ok := config["insecure"]; ok {
if _, ok = raw.(bool); !ok {
return nil, errors.New(`"insecure" must be a bool`)
}
}
client, err := buildClient(config)
if err != nil {
return nil, errwrap.Wrapf("couldn't make client with inbound config: {{err}}", err)
}
if verifyConnection {
if _, err := client.GetUser(ctx, "admin"); err != nil {
return nil, errwrap.Wrapf("client test of getting a user failed: {{err}}", err)
}
}
nxrm.mux.Lock()
defer nxrm.mux.Unlock()
nxrm.config = config
return nxrm.config, nil
}
func (nxrm *Nxrm) CreateUser(ctx context.Context, statements dbplugin.Statements, usernameConfig dbplugin.UsernameConfig, _ time.Time) (string, string, error) {
username, err := nxrm.credentialProducer.GenerateUsername(usernameConfig)
if err != nil {
return "", "", errwrap.Wrapf(fmt.Sprintf("unable to generate username for %q: {{err}}", usernameConfig), err)
}
password, err := nxrm.credentialProducer.GeneratePassword()
if err != nil {
return "", "", errwrap.Wrapf("unable to generate password: {{err}}", err)
}
stmt, err := newCreationStatement(statements)
if err != nil {
return "", "", errwrap.Wrapf("unable to read creation_statements: {{err}}", err)
}
user := &User{
UserId: username,
FirstName: "vault user",
LastName: "vault user",
EmailAddress: "vaultuser@example.com",
Status: "active",
Password: password,
Roles: stmt.NxrmRoles,
}
nxrm.mux.RLock()
defer nxrm.mux.RUnlock()
client, err := buildClient(nxrm.config)
if err != nil {
return "", "", errwrap.Wrapf("unable to get client: {{err}}", err)
}
if err := client.CreateUser(ctx, username, user); err != nil {
return "", "", errwrap.Wrapf(fmt.Sprintf("unable to create user name %s, user %q: {{err}}", username, user), err)
}
return username, password, nil
}
func (nxrm *Nxrm) RenewUser(_ context.Context, _ dbplugin.Statements, _ string, _ time.Time) error {
// FIXME
return nil
}
func (nxrm *Nxrm) RevokeUser(ctx context.Context, statements dbplugin.Statements, username string) error {
nxrm.mux.RLock()
defer nxrm.mux.RUnlock()
client, err := buildClient(nxrm.config)
if err != nil {
return errwrap.Wrapf("unable to get client: {{err}}", err)
}
var errs error
if err := client.DeleteUser(ctx, username); err != nil {
errs = multierror.Append(errs, errwrap.Wrapf(fmt.Sprintf("unable to create user name %s: {{err}}", username), err))
}
return errs
}
func (nxrm *Nxrm) RotateRootCredentials(ctx context.Context, _ []string) (map[string]interface{}, error) {
newPassword, err := nxrm.credentialProducer.GeneratePassword()
if err != nil {
return nil, errwrap.Wrapf("unable to generate root password: {{err}}", err)
}
nxrm.mux.Lock()
defer nxrm.mux.Unlock()
client, err := buildClient(nxrm.config)
if err != nil {
return nil, errwrap.Wrapf("unable to get client: {{err}}", err)
}
if err := client.ChangePassword(ctx, nxrm.config["username"].(string), newPassword); err != nil {
return nil, errwrap.Wrapf("unable to change password: {{}}", err)
}
nxrm.config["password"] = newPassword
return nxrm.config, nil
}
func (nxrm *Nxrm) Close() error {
return nil
}
func (nxrm *Nxrm) Initialize(ctx context.Context, config map[string]interface{}, verifyConnection bool) error {
_, err := nxrm.Init(ctx, config, verifyConnection)
return err
}
func newCreationStatement(statements dbplugin.Statements) (*creationStatement, error) {
if len(statements.Creation) == 0 {
return nil, dbutil.ErrEmptyCreationStatement
}
stmt := &creationStatement{}
if err := json.Unmarshal([]byte(statements.Creation[0]), stmt); err != nil {
return nil, errwrap.Wrapf(fmt.Sprintf("unable to unmarshal %s: {{err}}", []byte(statements.Creation[0])), err)
}
return stmt, nil
}
type creationStatement struct {
NxrmRoles []string `json:"nxrm-roles"`
}
func buildClient(config map[string]interface{}) (*Client, error) {
clientConfig := &ClientConfig{
Username: config["username"].(string),
Password: config["password"].(string),
BaseURL: config["url"].(string),
}
client, err := NewClient(clientConfig)
if err != nil {
return nil, err
}
return client, nil
}
func (nxrm *Nxrm) GenerateCredentials(ctx context.Context) (string, error) {
password, err := nxrm.credentialProducer.GeneratePassword()
if err != nil {
return "", err
}
return password, nil
}
func (nxrm *Nxrm) SetCredentials(ctx context.Context, statements dbplugin.Statements, staticConfig dbplugin.StaticUserConfig) (username string, password string, err error) {
return "", "", dbutil.Unimplemented()
}
|
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package analyze
import (
"bytes"
"fmt"
"log"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/gonum/plot"
"github.com/gonum/plot/plotter"
"github.com/gonum/plot/plotutil"
"github.com/gonum/plot/vg"
"github.com/gyuho/dataframe"
"github.com/gyuho/psn/process"
"github.com/spf13/cobra"
)
var (
Command = &cobra.Command{
Use: "analyze",
Short: "Analyzes test results specific to dbtester.",
RunE: CommandFunc,
}
configPath string
)
func init() {
Command.PersistentFlags().StringVarP(&configPath, "config", "c", "", "YAML configuration file path.")
}
func CommandFunc(cmd *cobra.Command, args []string) error {
cfg, err := ReadConfig(configPath)
if err != nil {
return err
}
println()
log.Println("Step 1: aggregating each database...")
for step1Idx, elem := range cfg.Step1 {
var (
frames = []dataframe.Frame{}
maxCommonMinUnixTime int64
maxCommonMaxUnixTime int64
)
for i, monitorPath := range elem.DataPathList {
log.Printf("Step 1-%d-%d: creating dataframe from %s", step1Idx, i, monitorPath)
// fill in missing timestamps
tb, err := process.ReadCSVFillIn(monitorPath)
if err != nil {
return err
}
ext := filepath.Ext(monitorPath)
cPath := strings.Replace(monitorPath, ext, "-filled-in"+ext, -1)
if err := tb.ToCSV(cPath); err != nil {
return err
}
fr, err := dataframe.NewFromCSV(nil, cPath)
if err != nil {
return err
}
nf := dataframe.New()
c1, err := fr.GetColumn("unix_ts")
if err != nil {
return err
}
c2, err := fr.GetColumn("CpuUsageFloat64")
if err != nil {
return err
}
c3, err := fr.GetColumn("VmRSSBytes")
if err != nil {
return err
}
if err = nf.AddColumn(c1); err != nil {
return err
}
if err = nf.AddColumn(c2); err != nil {
return err
}
if err = nf.AddColumn(c3); err != nil {
return err
}
frames = append(frames, nf)
fv, ok := c1.FrontNonNil()
if !ok {
return fmt.Errorf("FrontNonNil %s has empty Unix time %v", monitorPath, fv)
}
fs, ok := fv.ToString()
if !ok {
return fmt.Errorf("cannot ToString %v", fv)
}
fd, err := strconv.ParseInt(fs, 10, 64)
if err != nil {
return err
}
bv, ok := c1.BackNonNil()
if !ok {
return fmt.Errorf("BackNonNil %s has empty Unix time %v", monitorPath, fv)
}
bs, ok := bv.ToString()
if !ok {
return fmt.Errorf("cannot ToString %v", bv)
}
bd, err := strconv.ParseInt(bs, 10, 64)
if err != nil {
return err
}
if i == 0 {
maxCommonMinUnixTime = fd
maxCommonMaxUnixTime = bd
}
if maxCommonMinUnixTime < fd {
maxCommonMinUnixTime = fd
}
if maxCommonMaxUnixTime > bd {
maxCommonMaxUnixTime = bd
}
}
// make all columns have equal row number, based on the column unix_ts
// truncate all rows before maxCommonMinUnixTime and after maxCommonMinUnixTime
minTS := fmt.Sprintf("%d", maxCommonMinUnixTime)
maxTS := fmt.Sprintf("%d", maxCommonMaxUnixTime)
frMonitor := dataframe.New()
for i := range frames {
uc, err := frames[i].GetColumn("unix_ts")
if err != nil {
return err
}
j, ok := uc.FindValue(dataframe.NewStringValue(minTS))
if !ok {
return fmt.Errorf("%v does not exist in %s", minTS, elem.DataPathList[i])
}
k, ok := uc.FindValue(dataframe.NewStringValue(maxTS))
if !ok {
return fmt.Errorf("%v does not exist in %s", maxTS, elem.DataPathList[i])
}
for _, hd := range frames[i].GetHeader() {
if i > 0 && hd == "unix_ts" {
continue
}
var col dataframe.Column
col, err = frames[i].GetColumn(hd)
if err != nil {
return err
}
if err = col.KeepRows(j, k+1); err != nil {
return err
}
if hd != "unix_ts" {
switch hd {
case "CpuUsageFloat64":
hd = "cpu"
case "VmRSSBytes":
hd = "memory_mb"
// to bytes to mb
colN := col.RowNumber()
for rowIdx := 0; rowIdx < colN; rowIdx++ {
var rowV dataframe.Value
rowV, err = col.GetValue(rowIdx)
if err != nil {
return err
}
fv, _ := rowV.ToNumber()
frv := float64(fv) * 0.000001
if err = col.SetValue(rowIdx, dataframe.NewStringValue(fmt.Sprintf("%.2f", frv))); err != nil {
return err
}
}
}
col.UpdateHeader(fmt.Sprintf("%s_%d", hd, i+1))
}
if err = frMonitor.AddColumn(col); err != nil {
return err
}
}
}
log.Printf("Step 1-%d-%d: creating dataframe from %s", step1Idx, len(elem.DataPathList), elem.DataBenchmarkPath)
colMonitorUnixTs, err := frMonitor.GetColumn("unix_ts")
if err != nil {
return err
}
// need to combine frMonitor to frBench
frBench, err := dataframe.NewFromCSV(nil, elem.DataBenchmarkPath)
if err != nil {
return err
}
colBenchUnixTs, err := frBench.GetColumn("unix_ts")
if err != nil {
return err
}
fv, ok := colBenchUnixTs.FrontNonNil()
if !ok {
return fmt.Errorf("FrontNonNil %s has empty Unix time %v", elem.DataBenchmarkPath, fv)
}
startRowMonitor, ok := colMonitorUnixTs.FindValue(fv)
if !ok {
return fmt.Errorf("%v is not found in monitor results %q", fv, elem.DataPathList)
}
bv, ok := colBenchUnixTs.BackNonNil()
if !ok {
return fmt.Errorf("BackNonNil %s has empty Unix time %v", elem.DataBenchmarkPath, bv)
}
endRowMonitor, ok := colMonitorUnixTs.FindValue(bv)
if !ok { // monitor short of rows
endRowMonitor = colMonitorUnixTs.RowNumber() - 1
}
var benchLastIdx int
for _, col := range frBench.GetColumns() {
if benchLastIdx == 0 {
benchLastIdx = col.RowNumber()
}
if benchLastIdx > col.RowNumber() {
benchLastIdx = col.RowNumber()
}
}
benchLastIdx--
if benchLastIdx+1 < endRowMonitor-startRowMonitor+1 { // benchmark is short of rows
endRowMonitor = startRowMonitor + benchLastIdx
} else { // monitor is short of rows
benchLastIdx = endRowMonitor - startRowMonitor
}
for _, hd := range frMonitor.GetHeader() {
if hd == "unix_ts" {
continue
}
var col dataframe.Column
col, err = frMonitor.GetColumn(hd)
if err != nil {
return err
}
if err = col.KeepRows(startRowMonitor, endRowMonitor); err != nil {
return err
}
if err = frBench.AddColumn(col); err != nil {
return err
}
}
log.Printf("Step 1-%d-%d: calculating average values", step1Idx, len(elem.DataPathList)+1)
var (
sampleSize = float64(len(elem.DataPathList))
cumulativeThroughputCol = dataframe.NewColumn("cumulative_throughput")
totalThrougput int
avgCpuCol = dataframe.NewColumn("avg_cpu")
avgMemCol = dataframe.NewColumn("avg_memory_mb")
)
for i := 0; i < benchLastIdx; i++ {
var (
cpuTotal float64
memoryTotal float64
)
for _, col := range frBench.GetColumns() {
var rv dataframe.Value
rv, err = col.GetValue(i)
if err != nil {
return err
}
fv, _ := rv.ToNumber()
switch {
case strings.HasPrefix(col.GetHeader(), "cpu_"):
cpuTotal += fv
case strings.HasPrefix(col.GetHeader(), "memory_"):
memoryTotal += fv
case col.GetHeader() == "throughput":
fv, _ := rv.ToNumber()
totalThrougput += int(fv)
cumulativeThroughputCol.PushBack(dataframe.NewStringValue(totalThrougput))
}
}
avgCpuCol.PushBack(dataframe.NewStringValue(fmt.Sprintf("%.2f", cpuTotal/sampleSize)))
avgMemCol.PushBack(dataframe.NewStringValue(fmt.Sprintf("%.2f", memoryTotal/sampleSize)))
}
log.Printf("Step 1-%d-%d: combine %s and %q", step1Idx, len(elem.DataPathList)+2, elem.DataBenchmarkPath, elem.DataPathList)
unixTsCol, err := frBench.GetColumn("unix_ts")
if err != nil {
return err
}
latencyCol, err := frBench.GetColumn("avg_latency_ms")
if err != nil {
return err
}
throughputCol, err := frBench.GetColumn("throughput")
if err != nil {
return err
}
aggFr := dataframe.New()
aggFr.AddColumn(unixTsCol)
aggFr.AddColumn(latencyCol)
aggFr.AddColumn(throughputCol)
aggFr.AddColumn(cumulativeThroughputCol)
for _, hd := range frBench.GetHeader() {
col, err := frBench.GetColumn(hd)
if err != nil {
return err
}
switch {
case strings.HasPrefix(hd, "cpu_"):
aggFr.AddColumn(col)
case strings.HasPrefix(hd, "memory_"):
aggFr.AddColumn(col)
}
}
aggFr.AddColumn(avgCpuCol)
aggFr.AddColumn(avgMemCol)
log.Printf("Step 1-%d-%d: saving to %s", step1Idx, len(elem.DataPathList)+3, elem.OutputPath)
if err := aggFr.ToCSV(elem.OutputPath); err != nil {
return err
}
println()
}
println()
log.Println("Step 2: aggregating aggregates...")
for step2Idx, elem := range cfg.Step2 {
var (
frames = []dataframe.Frame{}
maxSize int
)
for i, data := range elem.DataList {
log.Printf("Step 2-%d-%d: creating dataframe from %s...", step2Idx, i, data.Path)
fr, err := dataframe.NewFromCSV(nil, data.Path)
if err != nil {
return err
}
frames = append(frames, fr)
col, err := fr.GetColumn("unix_ts")
if err != nil {
return err
}
rNum := col.RowNumber()
if maxSize < rNum {
maxSize = rNum
}
}
nf := dataframe.New()
secondCol := dataframe.NewColumn("second")
for i := 0; i < maxSize; i++ {
secondCol.PushBack(dataframe.NewStringValue(i))
}
nf.AddColumn(secondCol)
colsToKeep := []string{"avg_latency_ms", "throughput", "cumulative_throughput", "avg_cpu", "avg_memory_mb"}
for i, fr := range frames {
dbID := elem.DataList[i].Name
log.Printf("Step 2-%d-%d: cleaning up %s...", step2Idx, i, dbID)
for _, col := range fr.GetColumns() {
toSkip := true
for _, cv := range colsToKeep {
if col.GetHeader() == cv {
toSkip = false
break
}
}
if toSkip {
continue
}
if err := col.Appends(dataframe.NewStringValueNil(), maxSize); err != nil {
return err
}
col.UpdateHeader(fmt.Sprintf("%s_%s", col.GetHeader(), dbID))
nf.AddColumn(col)
}
}
log.Printf("Step 2-%d: saving to %s...", step2Idx, elem.OutputPath)
if err := nf.ToCSV(elem.OutputPath); err != nil {
return err
}
}
println()
log.Println("Step 3: plotting...")
plot.DefaultFont = "Helvetica"
plotter.DefaultLineStyle.Width = vg.Points(1.5)
plotter.DefaultGlyphStyle.Radius = vg.Points(2.0)
var (
plotWidth = 12 * vg.Inch
plotHeight = 8 * vg.Inch
)
for step3Idx, elem := range cfg.Step3 {
fr, err := dataframe.NewFromCSV(nil, elem.DataPath)
if err != nil {
return err
}
log.Printf("Step 3-%d: %s with %q", step3Idx, elem.DataPath, fr.GetHeader())
for i, pelem := range elem.PlotList {
log.Printf("Step 3-%d-%d: %s at %q", step3Idx, i, pelem.YAxis, pelem.OutputPathList)
pl, err := plot.New()
if err != nil {
return err
}
pl.Title.Text = fmt.Sprintf("%s, %s", cfg.Titles[step3Idx], pelem.YAxis)
pl.X.Label.Text = pelem.XAxis
pl.Y.Label.Text = pelem.YAxis
pl.Legend.Top = true
var args []interface{}
for _, line := range pelem.Lines {
col, err := fr.GetColumn(line.Column)
if err != nil {
return err
}
pt, err := points(col)
if err != nil {
return err
}
args = append(args, line.Legend, pt)
}
if err = plotutil.AddLines(pl, args...); err != nil {
return err
}
for _, outputPath := range pelem.OutputPathList {
if err = pl.Save(plotWidth, plotHeight, outputPath); err != nil {
return err
}
}
}
}
println()
log.Println("Step 4: writing README...")
rdBuf := new(bytes.Buffer)
rdBuf.WriteString("\n\n")
rdBuf.WriteString(cfg.Step4.Preface)
rdBuf.WriteString("\n\n\n")
for i, result := range cfg.Step4.Results {
rdBuf.WriteString(fmt.Sprintf("<br><br><hr>\n##### %s", cfg.Titles[i]))
rdBuf.WriteString("\n\n")
for _, img := range result.Images {
imgPath := ""
switch img.ImageType {
case "local":
imgPath = "./" + filepath.Base(img.ImagePath)
rdBuf.WriteString(fmt.Sprintf("\n\n", img.ImageTitle, imgPath))
case "remote":
rdBuf.WriteString(fmt.Sprintf(`<img src="%s" alt="%s">`, img.ImagePath, img.ImageTitle))
rdBuf.WriteString("\n\n")
default:
return fmt.Errorf("%s is not supported", img.ImageType)
}
}
rdBuf.WriteString("\n\n")
}
if err := toFile(rdBuf.String(), cfg.Step4.OutputPath); err != nil {
return err
}
println()
log.Println("FINISHED!")
return nil
}
func toFile(txt, fpath string) error {
f, err := os.OpenFile(fpath, os.O_RDWR|os.O_TRUNC, 0777)
if err != nil {
f, err = os.Create(fpath)
if err != nil {
return err
}
}
defer f.Close()
if _, err := f.WriteString(txt); err != nil {
return err
}
return nil
}
func points(col dataframe.Column) (plotter.XYs, error) {
bv, ok := col.BackNonNil()
if !ok {
return nil, fmt.Errorf("BackNonNil not found")
}
rowN, ok := col.FindLastValue(bv)
if !ok {
return nil, fmt.Errorf("not found %v", bv)
}
pts := make(plotter.XYs, rowN)
for i := range pts {
v, err := col.GetValue(i)
if err != nil {
return nil, err
}
n, _ := v.ToNumber()
pts[i].X = float64(i)
pts[i].Y = n
}
return pts, nil
}
|
package edsm
import (
"encoding/json"
"errors"
"goed/edGalaxy"
"io/ioutil"
"log"
"net/http"
"net/url"
"strings"
"sync"
"time"
)
func edsmSysInfo2galaxyBriefSystemInfo(si *EDSMSysInfo) *edGalaxy.BriefSystemInfo {
if si == nil {
return nil
}
return &edGalaxy.BriefSystemInfo{
Allegiance: si.Allegiance,
Government: si.Government,
Faction: si.Faction,
FactionState: si.FactionState,
Population: si.Population,
Reserve: si.Reserve,
Security: si.Security,
Economy: si.Economy,
}
}
func edsmSystem2GalaxySummary(eds *EDSMSystemV1) *edGalaxy.SystemSummary {
if eds == nil {
return nil
}
return &edGalaxy.SystemSummary{
Name: eds.Name,
EDSMid: eds.EDSMid,
EDSMid64: eds.EDSMid64,
EDDBid: 0,
Coords: eds.Coords,
BriefInfo: edsmSysInfo2galaxyBriefSystemInfo(eds.SystemInfo),
PrimaryStar: &edGalaxy.StarInfo{
Name: eds.PrimaryStar.Name,
Type: eds.PrimaryStar.Type,
IsScoopable: eds.PrimaryStar.IsScoopable,
},
}
}
type cachedEDSMSystemV1 struct {
system *EDSMSystemV1
timestamp time.Time
}
type FetchEDSMSystemReply struct {
RequestedSystemName string
System *EDSMSystemV1
Err error
}
const (
max_concurrent_edsm_requests = 10
)
type EDSMConnector struct {
tr *http.Transport
mtx sync.RWMutex
systemsCache map[string]*cachedEDSMSystemV1
aliveRequests int
maxRequests int
}
func normalizeSystemName(name string) string {
return strings.ToUpper(name)
}
func NewEDSMConnector(maxConnections int) *EDSMConnector {
tr := &http.Transport{
MaxIdleConns: 10,
IdleConnTimeout: 30 * time.Second,
DisableCompression: true,
}
if maxConnections < 1 {
maxConnections = 1
}
if maxConnections > max_concurrent_edsm_requests {
maxConnections = max_concurrent_edsm_requests
}
rv := &EDSMConnector{
tr: tr,
systemsCache: make(map[string]*cachedEDSMSystemV1),
aliveRequests: 0,
maxRequests: maxConnections,
}
return rv
}
func (c *EDSMConnector) Close() {
c.tr.CloseIdleConnections()
}
func (c *EDSMConnector) SystemSummaryByName(systemName string, rplChannel edGalaxy.SystemSummaryReplyChan) {
rplC := make(chan *FetchEDSMSystemReply)
go c.GetSystemInfo(systemName, rplC)
rpl := <-rplC
rplChannel <- &edGalaxy.SystemSummaryReply{
RequestedSystemName: rpl.RequestedSystemName,
System: edsmSystem2GalaxySummary(rpl.System),
Err: rpl.Err,
}
}
func (c *EDSMConnector) fetchSystem(systemName string) (*EDSMSystemV1, error) {
client := &http.Client{
Transport: c.tr,
Timeout: 20 * time.Second,
}
formData := url.Values{
"showId": {"1"},
"showCoordinates": {"1"},
"showInformation": {"1"},
"showPrimaryStar": {"1"},
}
formData.Add("systemName", systemName)
log.Printf("Getting info on %s\n", systemName)
resp, err := client.PostForm("https://www.edsm.net/api-v1/system", formData)
if err != nil {
log.Printf("Failed post system %s : %v\n", systemName, err)
return nil, err
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Printf("Failed read system %s : %v\n", systemName, err)
return nil, err
}
resp.Body.Close()
// sbody := string(body)
// if idx := strings.Index(sbody, `"information":[]`); idx != -1 {
// log.Printf("Stripping empty information from: \n%s\n", sbody)
// body = append(body[:idx], body[idx+17:]...)
// }
if len(body) < 10 {
log.Printf("Failed parse system %s (data too short): %s\n", systemName, string(body))
if string(body) == "[]" {
return nil, errors.New("Unknown system.")
}
return nil, errors.New("System is not known of EDSM failure")
}
var si EDSMSystemV1_T
if err = json.Unmarshal(body, &si); err != nil {
log.Printf("Failed parse system %s data %s: %v\n", systemName, string(body), err)
return nil, err
}
log.Println(string(body))
log.Printf("Checking interface: its a %v\n", si.SI)
return temp2publicSysteinfo(&si), nil
}
func temp2publicSysteinfo(t *EDSMSystemV1_T) *EDSMSystemV1 {
rv := &EDSMSystemV1{
Name: t.Name,
EDSMid: t.EDSMid,
EDSMid64: t.EDSMid64,
Coords: t.Coords.Clone(),
CoordsLocked: t.CoordsLocked,
PrimaryStar: &t.PrimaryStar,
}
var si EDSMSysInfo
if err := json.Unmarshal(t.SI, &si); err == nil {
rv.SystemInfo = &si
} else {
rv.SystemInfo = nil
}
return rv
}
func (c *EDSMConnector) GetSystemInfo(systemName string, rplChannel chan *FetchEDSMSystemReply) {
usn := normalizeSystemName(systemName)
c.mtx.RLock()
cs, here := c.systemsCache[usn]
mayAskEDSM := c.aliveRequests < c.maxRequests
c.mtx.RUnlock() // must NOT defer
if here {
if time.Now().Sub(cs.timestamp).Hours() < 1 {
log.Printf("Returning cached system %s\n", systemName)
rplChannel <- &FetchEDSMSystemReply{systemName, cs.system, nil}
return
}
log.Printf("System %s is expired in the cache\n", systemName)
if !mayAskEDSM {
log.Printf("Returning EXPIRED cached system %s (no free slots)\n", systemName)
rplChannel <- &FetchEDSMSystemReply{systemName, cs.system, nil}
return
}
}
if !mayAskEDSM {
rplChannel <- &FetchEDSMSystemReply{systemName, nil, errors.New("all fetchers are busy")}
return
}
c.incAliveRequestsCount()
s, err := c.fetchSystem(systemName)
c.decAliveRequestsCount()
if err != nil {
rplChannel <- &FetchEDSMSystemReply{systemName, nil, err}
return
}
usn = normalizeSystemName(s.Name)
if len(usn) > 1 {
c.mtx.Lock()
c.systemsCache[usn] = &cachedEDSMSystemV1{s, time.Now()}
c.mtx.Unlock()
rplChannel <- &FetchEDSMSystemReply{systemName, s, nil}
return
} else {
log.Printf("Strange data for system %s: %s\n", usn)
rplChannel <- &FetchEDSMSystemReply{systemName, nil, errors.New("Inconsistent data")}
return
}
}
func (c *EDSMConnector) incAliveRequestsCount() {
c.mtx.Lock()
c.aliveRequests++
c.mtx.Unlock()
}
func (c *EDSMConnector) decAliveRequestsCount() {
c.mtx.Lock()
c.aliveRequests--
c.mtx.Unlock()
}
|
package main
import (
"io/ioutil"
"net/http"
"fmt"
)
type page struct {
Title string
Body[]byte
}
func (p *page) save () error{
f := p.Title + ".txt"
return ioutil.WriteFile(f, p.Body, 0600)
}
func load(title string) (*page, error) {
f := title + ".txt"
body, err := ioutil.ReadFile(f)
if err != nil {
return nil, err
}
return &page{Title: title, Body: body}, nil
}
func view(w http.ResponseWriter, r *http.Request){
title := r.URL.Path[len("/Test/"):]
p, _ := load(title)
fmt.Fprintf(w,"<h1>%s</h1><div>%s</div>", p.Title, p.Body)
}
func main(){
p := &page{Title: "Test", Body:[]byte("BRING IT ON.... ")}
p.save()
http.HandleFunc("/Test/", view)
http.ListenAndServe(":8000", nil)
}
|
// Map project main.go
package main
import (
"fmt"
)
type myMap struct {
lat, long string
}
func main() {
var m map[string]myMap
m = make(map[string]myMap)
m["John"] = myMap{"40", "40"}
m2 := map[string]myMap{"Amy": {"10", "10"}, "Alex": {"20", "20"}}
m3 := map[string]myMap{"Kyle": myMap{"30", "30"}}
fmt.Println(m)
fmt.Println(m2)
fmt.Println(m3)
m2["Amy"] = myMap{"11", "11"}
fmt.Println(m2)
fmt.Println(m2["Amy"])
m2["Zach"] = myMap{"50", "50"}
for k, v := range m2 {
fmt.Println(k, v)
}
// fmt.Println(len(m2), m2)
delete(m2, "Alex")
for k, v := range m2 {
fmt.Println(k, v)
}
v, ok := m2["Zach"]
fmt.Println("The value:", v, "Present?", ok)
v, ok = m2["Alex"]
fmt.Println("The value:", v, "Present?", ok)
// fmt.Println(len(m2), m2)
normap := make(map[string]int)
normap["A"] = 1
normap["B"] = 2
fmt.Println(normap)
normap2 := map[string]int{"C": 3, "D": 3}
fmt.Println(normap2)
}
|
package ent_ex
type verifyInfo struct {
Email string `json:"email"`
Tel string `json:"tel"`
Password string `json:"password"`
}
|
package flatten
//func flattenCloud(in v1beta1.Cloud) []interface{} {
// att := make(map[string]interface{})
//
// if len(in.Profile) > 0 {
// att["profile"] = in.Profile
// }
// if len(in.Region) > 0 {
// att["region"] = in.Region
// }
// att["secret_binding_ref"] = flattenLocalObjectReference(&in.SecretBindingRef)
// if in.Seed != nil {
// att["seed"] = in.Seed
// }
// if in.AWS != nil {
// att["aws"] = flattenCloudAWS(in.AWS)
// }
// if in.GCP != nil {
// att["gcp"] = flattenCloudGCP(in.GCP)
// }
// if in.Azure != nil {
// att["azure"] = flattenCloudAzure(in.Azure)
// }
// return []interface{}{att}
//}
//func flattenCloudAWS(in *v1beta1.AWSCloud) []interface{} {
// att := make(map[string]interface{})
//if in.MachineImage != nil {
// image := make(map[string]interface{})
// if len(in.MachineImage.Name) > 0 {
// image["name"] = in.MachineImage.Name
// }
// if len(in.MachineImage.Version) > 0 {
// image["version"] = in.MachineImage.Version
// }
// att["machine_image"] = []interface{}{image}
//}
//networks := make(map[string]interface{})
//if in.Networks.Nodes != nil {
// networks["nodes"] = in.Networks.Nodes
//}
//if in.Networks.Pods != nil {
// networks["pods"] = in.Networks.Pods
//}
//if in.Networks.Services != nil {
// networks["services"] = in.Networks.Services
//}
//vpc := make(map[string]interface{})
//if in.Networks.VPC.ID != nil {
// vpc["id"] = in.Networks.VPC.ID
//}
//if in.Networks.VPC.CIDR != nil {
// vpc["cidr"] = in.Networks.VPC.CIDR
//}
//networks["vpc"] = []interface{}{vpc}
//if in.Networks.Internal != nil {
// networks["internal"] = newStringSet(schema.HashString, in.Networks.Internal)
//}
//if in.Networks.Public != nil {
// networks["public"] = newStringSet(schema.HashString, in.Networks.Public)
//}
//if in.Networks.Workers != nil {
// networks["workers"] = newStringSet(schema.HashString, in.Networks.Workers)
//}
//att["networks"] = []interface{}{networks}
//if len(in.Workers) > 0 {
// workers := make([]interface{}, len(in.Workers))
// for i, v := range in.Workers {
// m := map[string]interface{}{}
//
// if v.Name != "" {
// m["name"] = v.Name
// }
// if v.MachineType != "" {
// m["machine_type"] = v.MachineType
// }
// if v.AutoScalerMin != 0 {
// m["auto_scaler_min"] = v.AutoScalerMin
// }
// if v.AutoScalerMax != 0 {
// m["auto_scaler_max"] = v.AutoScalerMax
// }
// if v.MaxSurge != nil {
// m["max_surge"] = v.MaxSurge.IntValue()
// }
// if v.MaxUnavailable != nil {
// m["max_unavailable"] = v.MaxUnavailable.IntValue()
// }
// if len(v.Annotations) > 0 {
// m["annotations"] = v.Annotations
// }
// if len(v.Labels) > 0 {
// m["labels"] = v.Labels
// }
// if len(v.Taints) > 0 {
// taints := make([]interface{}, len(v.Taints))
// for i, v := range v.Taints {
// m := map[string]interface{}{}
//
// if v.Key != "" {
// m["key"] = v.Key
// }
// // if v.Operator != "" {
// // m["operator"] = v.Operator
// // }
// if v.Value != "" {
// m["value"] = v.Value
// }
// if v.Effect != "" {
// m["effect"] = v.Effect
// }
// taints[i] = m
// }
// m["taints"] = taints
// }
// if len(v.VolumeType) > 0 {
// m["volume_type"] = v.VolumeType
// }
// if len(v.VolumeSize) > 0 {
// m["volume_size"] = v.VolumeSize
// }
// workers[i] = m
// }
// att["worker"] = workers
// if in.Zones != nil {
// att["zones"] = newStringSet(schema.HashString, in.Zones)
// }
//}
// return []interface{}{att}
//}
//func flattenCloudGCP(in *v1beta1.GCPCloud) []interface{} {
// att := make(map[string]interface{})
//if in.MachineImage != nil {
// image := make(map[string]interface{})
// if len(in.MachineImage.Name) > 0 {
// image["name"] = in.MachineImage.Name
// }
// if len(in.MachineImage.Version) > 0 {
// image["version"] = in.MachineImage.Version
// }
// att["machine_image"] = []interface{}{image}
//}
//networks := make(map[string]interface{})
//if in.Networks.Workers != nil {
// networks["workers"] = newStringSet(schema.HashString, in.Networks.Workers)
//}
//if in.Networks.Internal != nil {
// networks["internal"] = *in.Networks.Internal
//}
//att["networks"] = []interface{}{networks}
//if len(in.Workers) > 0 {
// workers := make([]interface{}, len(in.Workers))
// for i, v := range in.Workers {
// m := map[string]interface{}{}
//
// if v.Name != "" {
// m["name"] = v.Name
// }
// if v.MachineType != "" {
// m["machine_type"] = v.MachineType
// }
// if v.AutoScalerMin != 0 {
// m["auto_scaler_min"] = v.AutoScalerMin
// }
// if v.AutoScalerMax != 0 {
// m["auto_scaler_max"] = v.AutoScalerMax
// }
// if v.MaxSurge != nil {
// m["max_surge"] = v.MaxSurge.IntValue()
// }
// if v.MaxUnavailable != nil {
// m["max_unavailable"] = v.MaxUnavailable.IntValue()
// }
// if len(v.Annotations) > 0 {
// m["annotations"] = v.Annotations
// }
// if len(v.Labels) > 0 {
// m["labels"] = v.Labels
// }
// if len(v.Taints) > 0 {
// taints := make([]interface{}, len(v.Taints))
// for i, v := range v.Taints {
// m := map[string]interface{}{}
//
// if v.Key != "" {
// m["key"] = v.Key
// }
// // if v.Operator != "" {
// // m["operator"] = v.Operator
// // }
// if v.Value != "" {
// m["value"] = v.Value
// }
// if v.Effect != "" {
// m["effect"] = v.Effect
// }
// taints[i] = m
// }
// m["taints"] = taints
// }
// if len(v.VolumeType) > 0 {
// m["volume_type"] = v.VolumeType
// }
// if len(v.VolumeSize) > 0 {
// m["volume_size"] = v.VolumeSize
// }
// workers[i] = m
// }
// att["worker"] = workers
// if in.Zones != nil {
// att["zones"] = newStringSet(schema.HashString, in.Zones)
// }
//}
// return []interface{}{att}
//}
//func flattenCloudAzure(in *v1beta1.AzureCloud) []interface{} {
// att := make(map[string]interface{})
//
// if in.MachineImage != nil {
// image := make(map[string]interface{})
// if len(in.MachineImage.Name) > 0 {
// image["name"] = in.MachineImage.Name
// }
// if len(in.MachineImage.Version) > 0 {
// image["version"] = in.MachineImage.Version
// }
// att["machine_image"] = []interface{}{image}
// }
// networks := make(map[string]interface{})
//
// vnet := make(map[string]interface{})
// if in.Networks.VNet.Name != nil {
// vnet["id"] = in.Networks.VNet.Name
// }
// if in.Networks.VNet.CIDR != nil {
// vnet["cidr"] = in.Networks.VNet.CIDR
// }
// networks["vnet"] = []interface{}{vnet}
// if in.Networks.Workers != "" {
// networks["workers"] = in.Networks.Workers
// }
// att["networks"] = []interface{}{networks}
// if len(in.Workers) > 0 {
// workers := make([]interface{}, len(in.Workers))
// for i, v := range in.Workers {
// m := map[string]interface{}{}
//
// if v.Name != "" {
// m["name"] = v.Name
// }
// if v.MachineType != "" {
// m["machine_type"] = v.MachineType
// }
// if v.AutoScalerMin != 0 {
// m["auto_scaler_min"] = v.AutoScalerMin
// }
// if v.AutoScalerMax != 0 {
// m["auto_scaler_max"] = v.AutoScalerMax
// }
// if v.MaxSurge != nil {
// m["max_surge"] = v.MaxSurge.IntValue()
// }
// if v.MaxUnavailable != nil {
// m["max_unavailable"] = v.MaxUnavailable.IntValue()
// }
// if len(v.Annotations) > 0 {
// m["annotations"] = v.Annotations
// }
// if len(v.Labels) > 0 {
// m["labels"] = v.Labels
// }
// if len(v.Taints) > 0 {
// taints := make([]interface{}, len(v.Taints))
// for i, v := range v.Taints {
// m := map[string]interface{}{}
//
// if v.Key != "" {
// m["key"] = v.Key
// }
// // if v.Operator != "" {
// // m["operator"] = v.Operator
// // }
// if v.Value != "" {
// m["value"] = v.Value
// }
// if v.Effect != "" {
// m["effect"] = v.Effect
// }
// taints[i] = m
// }
// m["taints"] = taints
// }
// if len(v.VolumeType) > 0 {
// m["volume_type"] = v.VolumeType
// }
// if len(v.VolumeSize) > 0 {
// m["volume_size"] = v.VolumeSize
// }
// workers[i] = m
// }
// att["worker"] = workers
// }
//
// return []interface{}{att}
//}
|
package service
import (
"easynote/bean"
"easynote/mongodb"
"fmt"
"gopkg.in/mgo.v2/bson"
//"strings"
//"log"
)
func IsUserExist(name string) bool {
session := mongodb.GetSession()
fmt.Println("name = %s",name)
c := session.DB("test").C("t_user")
if c == nil {
fmt.Println("get t_user fail");
return false
}
fmt.Println("get t_user success");
result := bean.UserBean{}
err := c.Find(bson.M{"name": name}).One(&result)
if err != nil {
return false
}
fmt.Println("query name = %s,password = %s",result.Name,result.Password);
return true
}
func UserRegister(user *bean.UserBean) bool{
session := mongodb.GetSession()
c := session.DB("test").C("t_user")
if c == nil {
fmt.Println("get t_user fail");
return false
}
if IsUserExist(user.Name) {
fmt.Println("Insert fail,user already exist:" + user.Name);
return false;
}
err := c.Insert(user)
if err != nil {
fmt.Println("Insert fail:" + user.Name);
return false;
}
return true;
} |
// Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo"
"github.com/cockroachdb/cockroach/pkg/sql/parser"
)
// Statement contains a statement with optional expected result columns and metadata.
type Statement struct {
parser.Statement
AnonymizedStr string
QueryID ClusterWideID
ExpectedTypes colinfo.ResultColumns
// Prepared is non-nil during the PREPARE phase, as well as during EXECUTE of
// a previously prepared statement. The Prepared statement can be modified
// during either phase; the PREPARE phase sets its initial state, and the
// EXECUTE phase can re-prepare it. This happens when the original plan has
// been invalidated by schema changes, session data changes, permission
// changes, or other changes to the context in which the original plan was
// prepared.
//
// Given that the PreparedStatement can be modified during planning, it is
// not safe for use on multiple threads.
Prepared *PreparedStatement
}
func makeStatement(parserStmt parser.Statement, queryID ClusterWideID) Statement {
return Statement{
Statement: parserStmt,
AnonymizedStr: anonymizeStmt(parserStmt.AST),
QueryID: queryID,
}
}
func makeStatementFromPrepared(prepared *PreparedStatement, queryID ClusterWideID) Statement {
return Statement{
Statement: prepared.Statement,
Prepared: prepared,
ExpectedTypes: prepared.Columns,
AnonymizedStr: prepared.AnonymizedStr,
QueryID: queryID,
}
}
func (s Statement) String() string {
// We have the original SQL, but we still use String() because it obfuscates
// passwords.
return s.AST.String()
}
|
package config
import (
"errors"
"fmt"
"sort"
"strings"
"github.com/spf13/viper"
)
const UsersKey = "users"
type Users map[string]User
type User struct {
APIToken string
Email string
ID uint64
Name string
Username string
}
func (Config) GetAPITokenForUser(id uint64) (string, error) {
var users Users
err := viper.UnmarshalKey("users", &users)
if err != nil {
return "", err
}
if len(users) == 0 {
return "", errors.New("no configured users")
}
if user, ok := users[fmt.Sprint(id)]; ok {
return user.APIToken, nil
}
return "", errors.New("provided user id not found")
}
func (Config) GetUsers() ([]User, error) {
var raw Users
err := viper.UnmarshalKey("users", &raw)
if err != nil {
return nil, err
}
users := []User{}
for _, user := range raw {
users = append(users, user)
}
sort.Slice(users,
func(i, j int) bool {
return strings.Compare(users[i].Username, users[j].Username) == -1
},
)
return users, nil
}
func (Config) AddUser(user User) error {
var raw Users
err := viper.UnmarshalKey("users", &raw)
if err != nil {
return err
}
raw[fmt.Sprint(user.ID)] = user
viper.Set("users", raw)
return nil
}
|
package utils
import (
md52 "crypto/md5"
"fmt"
"github.com/astaxie/beego"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/mysql"
"time"
)
type Txt struct {
Project string
User string
Achievements string
Goal string
}
type Text struct {
gorm.Model
Txt
}
type Usr struct {
Username string
Password string
Status int
}
type User struct {
gorm.Model
Usr
}
var DB *gorm.DB
var err error
func InitMysql() {
usr := beego.AppConfig.String("mysqluser")
pwd := beego.AppConfig.String("mysqlpwd")
host := beego.AppConfig.String("host")
port, _ := beego.AppConfig.Int("port")
dbname := beego.AppConfig.String("dbname")
timeout := "10s"
dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?charset=utf8&parseTime=True&loc=Local&timeout=%s", usr, pwd, host, port, dbname, timeout)
fmt.Println(dsn)
DB, err = gorm.Open("mysql", dsn)
if err != nil {
panic("Failed to link database, error = " + err.Error())
} else {
DB.AutoMigrate(&User{}, &Text{})
}
}
func FormatTimeStamp(times time.Time) string {
timestamp := times.Format("2006-01-02 15:04:05")
return timestamp
}
func MD5(passwd string) string {
data := []byte(passwd)
md5 := md52.Sum(data)
hexMd5 := fmt.Sprintf("%x", md5)
return hexMd5
}
|
package main
import (
"clipper"
"flag"
"os"
)
var opType = flag.Uint("op", 0, "-op")
var path = flag.String("path", "", "-path")
var masterAddr = flag.String("master", "", "-master")
func main() {
flag.Parse()
client := clipper.NewClient()
op := clipper.OpType(*opType)
client.StartUp(op, *path, *masterAddr, os.Args[0])
}
|
package flag
import (
"reflect"
"github.com/achilleasa/usrv/config/store"
)
// Map provides a thread-safe flag wrapping a map[string]string value. Its
// value can be dynamically updated via a watched configuration key or manually
// set using its Set method.
//
// The flag also provides a mechanism for listening for changes.
type Map struct {
flagImpl
}
// NewMap creates a map flag. If a non-empty config path is specified, the flag
// will register a watcher to the supplied configuration store instance and
// automatically update its value.
//
// Passing a nil store instance and a non-empty cfgPath will cause this function
// to panic.
//
// Dynamic updates can be disabled by invoking the CancelDynamicUpdates method.
func NewMap(store *store.Store, cfgPath string) *Map {
f := &Map{}
f.flagImpl.checkEquality = equalMaps
f.init(store, f.mapCfgValue, cfgPath)
return f
}
// Get the stored flag value. If no initial value has been set for this flag,
// this method will block.
func (f *Map) Get() map[string]string {
return f.get().(map[string]string)
}
// Set the stored flag value. Calling Set will also trigger a change event to
// be emitted.
func (f *Map) Set(val map[string]string) {
f.set(-1, val, false)
}
func equalMaps(v1, v2 interface{}) bool {
return reflect.DeepEqual(v1, v2)
}
// mapCfgValue validates and converts a dynamic config value into the expected type for this flag.
func (f *Map) mapCfgValue(cfg map[string]string) (interface{}, error) {
return cfg, nil
}
|
package main
type runeSorted []rune
func (s runeSorted) Len() int { return len(s) }
func (s runeSorted) Less(i, j int) bool { return s[i] < s[j] }
func (s runeSorted) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
package main
// Leetcode 47. (medium)
func permuteUnique(nums []int) [][]int {
return recursivePermuteUnique(nums, []int{}, [][]int{})
}
func recursivePermuteUnique(nums, arr []int, res [][]int) [][]int {
if len(nums) == len(arr) {
tmp := make([]int, len(arr))
for i, j := range arr {
tmp[i] = nums[j]
}
res = append(res, tmp)
return res
}
used := []int{}
for i := 0; i < len(nums); i++ {
if contains(used, nums[i]) || contains(arr, i) {
continue
}
arr = append(arr, i)
res = recursivePermuteUnique(nums, arr, res)
arr = arr[:len(arr)-1]
used = append(used, nums[i])
}
return res
}
|
// Copyright 2019 - 2022 The Samply Community
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fhir
import (
"encoding/json"
"fmt"
"strings"
)
// THIS FILE IS GENERATED BY https://github.com/samply/golang-fhir-models
// PLEASE DO NOT EDIT BY HAND
// QuantityComparator is documented here http://hl7.org/fhir/ValueSet/quantity-comparator
type QuantityComparator int
const (
QuantityComparatorLessThan QuantityComparator = iota
QuantityComparatorLessOrEquals
QuantityComparatorGreaterOrEquals
QuantityComparatorGreaterThan
)
func (code QuantityComparator) MarshalJSON() ([]byte, error) {
return json.Marshal(code.Code())
}
func (code *QuantityComparator) UnmarshalJSON(json []byte) error {
s := strings.Trim(string(json), "\"")
switch s {
case "<":
*code = QuantityComparatorLessThan
case "<=":
*code = QuantityComparatorLessOrEquals
case ">=":
*code = QuantityComparatorGreaterOrEquals
case ">":
*code = QuantityComparatorGreaterThan
default:
return fmt.Errorf("unknown QuantityComparator code `%s`", s)
}
return nil
}
func (code QuantityComparator) String() string {
return code.Code()
}
func (code QuantityComparator) Code() string {
switch code {
case QuantityComparatorLessThan:
return "<"
case QuantityComparatorLessOrEquals:
return "<="
case QuantityComparatorGreaterOrEquals:
return ">="
case QuantityComparatorGreaterThan:
return ">"
}
return "<unknown>"
}
func (code QuantityComparator) Display() string {
switch code {
case QuantityComparatorLessThan:
return "Less than"
case QuantityComparatorLessOrEquals:
return "Less or Equal to"
case QuantityComparatorGreaterOrEquals:
return "Greater or Equal to"
case QuantityComparatorGreaterThan:
return "Greater than"
}
return "<unknown>"
}
func (code QuantityComparator) Definition() string {
switch code {
case QuantityComparatorLessThan:
return "The actual value is less than the given value."
case QuantityComparatorLessOrEquals:
return "The actual value is less than or equal to the given value."
case QuantityComparatorGreaterOrEquals:
return "The actual value is greater than or equal to the given value."
case QuantityComparatorGreaterThan:
return "The actual value is greater than the given value."
}
return "<unknown>"
}
|
package main
import ishell "gopkg.in/abiosoft/ishell.v2"
func cmdFiles(c *ishell.Context, d Data) Data {
names := d.Names()
initIndexes := []int{}
for i := range names {
initIndexes = append(initIndexes, i)
}
choices := c.Checklist(names, "Files", initIndexes)
return d.Subset(choices)
}
|
package model
type Record struct {
RecordID int `json:"record_id"`
UserID int `json:"uid"`
UserScore float64 `json:"user_score"`
KindName string `json:"kind_name"`
GameName string `json:"game_name"`
EnterTime string `json:"enter_time"`
LeaveTime string `json:"leave_time"`
}
func (t *Record) TableName() string {
return "record"
}
|
package models
import(
"encoding/json"
)
/**
* Type definition for TierType1Enum enum
*/
type TierType1Enum int
/**
* Value collection for TierType1Enum enum
*/
const (
TierType1_KAZURETIERHOT TierType1Enum = 1 + iota
TierType1_KAZURETIERCOOL
TierType1_KAZURETIERARCHIVE
)
func (r TierType1Enum) MarshalJSON() ([]byte, error) {
s := TierType1EnumToValue(r)
return json.Marshal(s)
}
func (r *TierType1Enum) UnmarshalJSON(data []byte) error {
var s string
json.Unmarshal(data, &s)
v := TierType1EnumFromValue(s)
*r = v
return nil
}
/**
* Converts TierType1Enum to its string representation
*/
func TierType1EnumToValue(tierType1Enum TierType1Enum) string {
switch tierType1Enum {
case TierType1_KAZURETIERHOT:
return "kAzureTierHot"
case TierType1_KAZURETIERCOOL:
return "kAzureTierCool"
case TierType1_KAZURETIERARCHIVE:
return "kAzureTierArchive"
default:
return "kAzureTierHot"
}
}
/**
* Converts TierType1Enum Array to its string Array representation
*/
func TierType1EnumArrayToValue(tierType1Enum []TierType1Enum) []string {
convArray := make([]string,len( tierType1Enum))
for i:=0; i<len(tierType1Enum);i++ {
convArray[i] = TierType1EnumToValue(tierType1Enum[i])
}
return convArray
}
/**
* Converts given value to its enum representation
*/
func TierType1EnumFromValue(value string) TierType1Enum {
switch value {
case "kAzureTierHot":
return TierType1_KAZURETIERHOT
case "kAzureTierCool":
return TierType1_KAZURETIERCOOL
case "kAzureTierArchive":
return TierType1_KAZURETIERARCHIVE
default:
return TierType1_KAZURETIERHOT
}
}
|
package types
import (
"math"
"math/big"
)
// Note, Zero and Max are functions just to make read-only values.
// We cannot define constants for structures, and global variables
// are unacceptable because it will be possible to change them.
// Zero is the lowest possible Int128 value.
func Int128Zero() Int128 {
return Int128From64(0)
}
// Max is the largest possible Int128 value.
func Int128Max() Int128 {
return Int128{
Lo: math.MaxUint64,
Hi: math.MaxInt64,
}
}
// Int128 is an unsigned 128-bit number.
// All methods are immutable, works just like standard uint64.
type Int128 struct {
Lo uint64 // lower 64-bit half
Hi int64 // upper 64-bit half
}
// Note, there in no New(lo, hi) just not to confuse
// which half goes first: lower or upper.
// Use structure initialization Int128{Lo: ..., Hi: ...} instead.
// From64 converts 64-bit value v to a Int128 value.
// Upper 64-bit half will be zero.
func Int128From64(v int64) Int128 {
var hi int64
if v < 0 {
hi = -1
}
return Int128{Lo: uint64(v), Hi: hi}
}
// FromBig converts *big.Int to 128-bit Int128 value ignoring overflows.
// If input integer is nil or negative then return Zero.
// If input interger overflows 128-bit then return Max.
func Int128FromBig(i *big.Int) Int128 {
u, _ := Int128FromBigEx(i)
return u
}
// FromBigEx converts *big.Int to 128-bit Int128 value (eXtended version).
// Provides ok successful flag as a second return value.
// If input integer is negative or overflows 128-bit then ok=false.
// If input is nil then zero 128-bit returned.
func Int128FromBigEx(i *big.Int) (Int128, bool) {
switch {
case i == nil:
return Int128Zero(), true // assuming nil === 0
case i.BitLen() > 128:
return Int128Max(), false // value overflows 128-bit!
}
neg := false
if i.Sign() == -1 {
i = new(big.Int).Neg(i)
neg = true
}
// Note, actually result of big.Int.Uint64 is undefined
// if stored value is greater than 2^64
// but we assume that it just gets lower 64 bits.
t := new(big.Int)
lo := i.Uint64()
hi := int64(t.Rsh(i, 64).Uint64())
val := Int128{
Lo: lo,
Hi: hi,
}
if neg {
return val.Neg(), true
}
return val, true
}
// Big returns 128-bit value as a *big.Int.
func (u Int128) Big() *big.Int {
i := new(big.Int).SetInt64(u.Hi)
i = i.Lsh(i, 64)
i = i.Or(i, new(big.Int).SetUint64(u.Lo))
return i
}
// Equals returns true if two 128-bit values are equal.
// Int128 values can be compared directly with == operator
// but use of the Equals method is preferred for consistency.
func (u Int128) Equals(v Int128) bool {
return (u.Lo == v.Lo) && (u.Hi == v.Hi)
}
// Neg returns the additive inverse of an Int128
func (u Int128) Neg() (z Int128) {
z.Hi = -u.Hi
z.Lo = -u.Lo
if z.Lo > 0 {
z.Hi--
}
return z
}
|
// ===================================== //
// author: gavingqf //
// == Please don'g change me by hand == //
//====================================== //
/*you have defined the following interface:
type IConfig interface {
// load interface
Load(path string) bool
// clear interface
Clear()
}
*/
package base
import (
"shared/utility/glog"
)
type CfgScorePassGroup struct {
Id int32
PhaseId int32
StartDayOffset int32
TaskGroupId int32
}
type CfgScorePassGroupConfig struct {
data map[int32]*CfgScorePassGroup
}
func NewCfgScorePassGroupConfig() *CfgScorePassGroupConfig {
return &CfgScorePassGroupConfig{
data: make(map[int32]*CfgScorePassGroup),
}
}
func (c *CfgScorePassGroupConfig) Load(filePath string) bool {
parse := NewParser()
if err := parse.Load(filePath, true); err != nil {
glog.Info("Load", filePath, "err: ", err)
return false
}
// iterator all lines' content
for i := 2; i < parse.GetAllCount(); i++ {
data := new(CfgScorePassGroup)
/* parse Id field */
vId, _ := parse.GetFieldByName(uint32(i), "id")
var IdRet bool
data.Id, IdRet = String2Int32(vId)
if !IdRet {
glog.Error("Parse CfgScorePassGroup.Id field error,value:", vId)
return false
}
/* parse PhaseId field */
vPhaseId, _ := parse.GetFieldByName(uint32(i), "phaseId")
var PhaseIdRet bool
data.PhaseId, PhaseIdRet = String2Int32(vPhaseId)
if !PhaseIdRet {
glog.Error("Parse CfgScorePassGroup.PhaseId field error,value:", vPhaseId)
return false
}
/* parse StartDayOffset field */
vStartDayOffset, _ := parse.GetFieldByName(uint32(i), "startDayOffset")
var StartDayOffsetRet bool
data.StartDayOffset, StartDayOffsetRet = String2Int32(vStartDayOffset)
if !StartDayOffsetRet {
glog.Error("Parse CfgScorePassGroup.StartDayOffset field error,value:", vStartDayOffset)
return false
}
/* parse TaskGroupId field */
vTaskGroupId, _ := parse.GetFieldByName(uint32(i), "taskGroupId")
var TaskGroupIdRet bool
data.TaskGroupId, TaskGroupIdRet = String2Int32(vTaskGroupId)
if !TaskGroupIdRet {
glog.Error("Parse CfgScorePassGroup.TaskGroupId field error,value:", vTaskGroupId)
return false
}
if _, ok := c.data[data.Id]; ok {
glog.Errorf("Find %d repeated", data.Id)
return false
}
c.data[data.Id] = data
}
return true
}
func (c *CfgScorePassGroupConfig) Clear() {
}
func (c *CfgScorePassGroupConfig) Find(id int32) (*CfgScorePassGroup, bool) {
v, ok := c.data[id]
return v, ok
}
func (c *CfgScorePassGroupConfig) GetAllData() map[int32]*CfgScorePassGroup {
return c.data
}
func (c *CfgScorePassGroupConfig) Traverse() {
for _, v := range c.data {
glog.Info(v.Id, ",", v.PhaseId, ",", v.StartDayOffset, ",", v.TaskGroupId)
}
}
|
package data
import (
"github.com/sirupsen/logrus"
"poetryAdmin/worker/core/define"
"reflect"
)
const ChanMaxLen = 50000
//抓取结果处理
type GraspResult struct {
err chan error
close chan bool
Data chan *define.HomeFormat
ParseData chan *define.ParseData
storage *Storage
}
var G_GraspResult *GraspResult
func NewGraspResult() *GraspResult {
result := &GraspResult{
err: make(chan error),
close: make(chan bool),
Data: make(chan *define.HomeFormat, ChanMaxLen),
ParseData: make(chan *define.ParseData, ChanMaxLen),
storage: NewStorage(),
}
G_GraspResult = result
return result
}
//发送错误消息
func (g *GraspResult) PushError(err error, params ...interface{}) {
logrus.Infoln("PushError err:", err)
valid := reflect.ValueOf(err).IsValid()
if valid == false {
logrus.Infoln("valid=false, err:=", err)
return
}
//if err != nil {
// g.err <- err
//}
}
//发送错误消息并关闭协和
func (g *GraspResult) PushErrorAndClose(err error) {
g.PushError(err)
g.PushCloseMark(true)
}
//发送结束标志信息
func (g *GraspResult) PushCloseMark(mark bool) {
g.close <- mark
}
//发送数据
func (g *GraspResult) SendData(data *define.HomeFormat) {
g.Data <- data
}
func (g *GraspResult) SendParseData(parseData *define.ParseData) {
g.ParseData <- parseData
}
//统一处理错误消息
func (g *GraspResult) PrintMsg() {
var (
err error
close bool
data *define.HomeFormat
parseData *define.ParseData
autoClose bool
)
for {
if autoClose == true && len(g.ParseData) == 0 {
goto PRINTERR
}
select {
case err = <-g.err:
logrus.Debug("Execution error:", err)
g.WriteErrLog(err)
case data = <-g.Data:
g.storage.LoadData(data)
case parseData = <-g.ParseData:
parseData.ParseFunc(parseData.Data, parseData.Params)
logrus.Infoln("g.ParseData len :", len(g.ParseData))
case close = <-g.close:
logrus.Infoln("close:", close)
if len(g.Data) > 0 {
autoClose = true
logrus.Info("data 还有数据,暂时不能退出")
continue
}
if close == true {
goto PRINTERR
}
}
}
PRINTERR:
logrus.Debug("PrintMsg 结果处理结束......")
return
}
func (g *GraspResult) WriteErrLog(err error) {
if err == nil {
return
}
logrus.Infoln("WriteErrLog err:", err)
//logFile := fmt.Sprintf("error-log-%d-%d-%d.log", time.Now().Year(), time.Now().Month(), time.Now().Hour())
//file, err := os.OpenFile(logFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, os.ModePerm)
//defer file.Close()
///*
// file.Write()
// file.WriteString()
// io.WriteString()
// ioutil.WriteFile()
//*/
//
//writeObj := bufio.NewWriterSize(file, 4096)
//buf := []byte(err)
//if _, err := writeObj.Write(buf); err == nil {
// writeObj.Flush()
//}
}
|
package main
import (
"bytes"
"fmt"
"io"
)
const debug = false
type A struct{}
func main() {
var buf *bytes.Buffer
var a *A
var w io.Writer
if debug {
buf = new(bytes.Buffer) // enable collection of output
}
fmt.Println(a == nil)
fmt.Println(buf == nil)
fmt.Println(w == nil)
f(buf) // note: subtly wrong
if debug {
fmt.Println(buf.String())
}
}
func f(out io.Writer) {
// <io.Writer(*bytes.Buffer)>)
// out: { data:nil <*bytes.Buffer> }
fmt.Println(out == nil)
if out != nil {
out.Write([]byte("done!\n"))
}
}
|
package maximum_depth_of_binary_tree
import (
"LeetCodeGo/base"
"LeetCodeGo/utils"
)
/*
给定一个二叉树,找出其最大深度。
二叉树的深度为根节点到最远叶子节点的最长路径上的节点数。
说明: 叶子节点是指没有子节点的节点。
示例:
给定二叉树 [3,9,20,null,null,15,7],
3
/ \
9 20
/ \
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/maximum-depth-of-binary-tree
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
*/
func maxDepth(root *base.TreeNode) int {
if root == nil {
return 0
} else if root.Left == nil && root.Right == nil {
return 1
}
return utils.Max(maxDepth(root.Left), maxDepth(root.Right)) + 1
}
|
package function
import (
"errors"
"log"
"os"
)
type Storage interface {
AddEntityToOtherEntity(string, string, string) error
}
type Executor struct {
Store Storage
}
var ExecutorLogger = log.New(os.Stdout, "Executor: ", log.Lshortfile)
var (
// ErrCategoryCanNotBeAddedToProduct means that the category can't be added to product
ErrCategoryCanNotBeAddedToProduct = errors.New("category can not be added to product")
// ErrProductCanNotBeAddedToCategory means that the product can't be added to category
ErrProductCanNotBeAddedToCategory = errors.New("product can not be added to category")
)
// AddCategoryToProduct method for set quad of predicate about product and category
func (executor *Executor) AddCategoryToProduct(productID, categoryID string) error {
err := executor.Store.AddEntityToOtherEntity(categoryID, "has_product", productID)
if err != nil {
ExecutorLogger.Printf("Product with ID: %v can not be added to category with ID: %v", productID, categoryID)
return ErrProductCanNotBeAddedToCategory
}
err = executor.Store.AddEntityToOtherEntity(productID, "belongs_to_category", categoryID)
if err != nil {
ExecutorLogger.Printf("Category with ID: %v can not be added to product with ID: %v", categoryID, productID)
return ErrCategoryCanNotBeAddedToProduct
}
return nil
}
|
// Copyright 2013 - by Jim Lawless
// License: MIT / X11
// See: http://www.mailsend-online.com/license2013.php
//
// Bear with me ... I'm a Go noob.
package main
import (
"flag"
"fmt"
)
// Define a type named "intslice" as a slice of ints
type connections []string
// Now, for our new type, implement the two methods of
// the flag.Value interface...
// The first method is String() string
func (i *connections) String() string {
return fmt.Sprintf("%d", *i)
}
func (i *connections) Set(value string) error {
*i = append(*i, value )
return nil
}
var myints connections
func main() {
flag.Var(&myints, "i", "List of integers")
flag.Parse()
if flag.NFlag() == 0 {
flag.PrintDefaults()
} else {
fmt.Println("Here are the values in 'myints'")
for i := 0; i < len(myints); i++ {
fmt.Printf("%s\n", myints[i])
}
}
}
|
package main
import "fmt"
func plusOne(digits []int) []int {
size := len(digits)
for i := size - 1; i >= 0; i-- {
digits[i] += 1
if digits[i] > 9 && i > 0 {
digits[i] %= 10
} else {
break
}
}
if digits[0] == 10 {
digits[0] = 0
result := make([]int, 0)
result = append(result, 1)
result = append(result, digits...)
digits = result
}
return digits
}
func main() {
print(fmt.Sprintf("%v", plusOne([]int{9, 9, 9})))
}
|
// Alex Ray 2011 <ajray@ncsu.edu>
// Reference:
// http://en.wikipedia.org/wiki/Bencode
package bencode
import "fmt"
// Bencode an Integer
func EncInt(i int) []byte {
return []byte(fmt.Sprintf("i%de", i))
}
// Bencode a byte string
func EncBytes(a []byte) []byte {
return []byte(fmt.Sprintf("%d:%s",len(a),a))
}
// Bencode a list of bencoded values
func EncList(list [][]byte) []byte {
b := make([]byte,0,2)
b = append(b,'l')
for _, value := range list {
b = append(b,value...)
}
b = append(b,'e')
return b
}
// Bencode a dictionary of key:value pairs given as a list
func EncDict(dict [][]byte) []byte {
b := make([]byte,0,2)
b = append(b,'d')
for _, value := range dict {
b = append(b,value...)
}
b = append(b,'e')
return b
}
// Bencode a dictionary of key:value pairs given as a map
func EncDictMap(dict map[string][]byte) []byte {
b := make([]byte,0,2)
b = append(b,'d')
for key, value := range dict {
b = append(b,[]byte(key)...)
b = append(b,value...)
}
b = append(b,'e')
return b
}
|
package disc
import (
"github.com/diamondburned/arikawa/v2/discord"
"github.com/diamondburned/arikawa/v2/gateway"
)
// Help prints the default help message.
func (b *Bot) Help(_ *gateway.MessageCreateEvent) (*discord.Embed, error) {
return &discord.Embed{
Description: b.Ctx.Help(),
Footer: &discord.EmbedFooter{
Text: "https://github.com/Karitham/WaifuBot",
Icon: "https://upload.wikimedia.org/wikipedia/commons/thumb/9/91/Octicons-mark-github.svg/1200px-Octicons-mark-github.svg.png",
},
}, nil
}
|
package netsync
import (
"github.com/constant-money/constant-chain/common"
"github.com/constant-money/constant-chain/metadata"
lru "github.com/hashicorp/golang-lru"
"github.com/patrickmn/go-cache"
"sync"
"sync/atomic"
"time"
"github.com/constant-money/constant-chain/blockchain"
"github.com/constant-money/constant-chain/mempool"
"github.com/constant-money/constant-chain/peer"
"github.com/constant-money/constant-chain/wire"
libp2p "github.com/libp2p/go-libp2p-peer"
)
const (
txCache = 10000
workers = 5
MsgLiveTime = 3 * time.Second // in second
MsgsCleanupInterval = 10 * time.Second //in second
)
type NetSync struct {
started int32
shutdown int32
waitgroup sync.WaitGroup
cMessage chan interface{}
cQuit chan struct{}
config *NetSyncConfig
Cache *NetSyncCache
ShardIDConfig *ShardIDConfig
}
type ShardIDConfig struct {
RelayShard []byte
RoleInCommittees int
CRoleInCommittees chan int
roleInCommitteesMtx sync.RWMutex
}
type NetSyncConfig struct {
BlockChain *blockchain.BlockChain
ChainParam *blockchain.Params
TxMemPool *mempool.TxPool
ShardToBeaconPool blockchain.ShardToBeaconPool
CrossShardPool map[byte]blockchain.CrossShardPool
Server interface {
// list functions callback which are assigned from Server struct
PushMessageToPeer(wire.Message, libp2p.ID) error
PushMessageToAll(wire.Message) error
}
Consensus interface {
OnBFTMsg(wire.Message)
}
}
type NetSyncCache struct {
blockCache *cache.Cache
txCache *lru.Cache
CTxCache chan common.Hash
}
func (netSync NetSync) New(cfg *NetSyncConfig, cTxCache chan common.Hash, cfgShardID *ShardIDConfig) *NetSync {
netSync.config = cfg
netSync.ShardIDConfig = cfgShardID
netSync.cQuit = make(chan struct{})
netSync.cMessage = make(chan interface{})
blockCache := cache.New(MsgLiveTime, MsgsCleanupInterval)
txCache, _ := lru.New(txCache)
netSync.Cache = &NetSyncCache{
txCache: txCache,
blockCache: blockCache,
}
netSync.Cache.CTxCache = cTxCache
return &netSync
}
func (netSync *NetSync) Start() {
// Already started?
if atomic.AddInt32(&netSync.started, 1) != 1 {
return
}
Logger.log.Info("Starting sync manager")
netSync.waitgroup.Add(1)
go netSync.messageHandler()
go netSync.cacheLoop()
}
// Stop gracefully shuts down the sync manager by stopping all asynchronous
// handlers and waiting for them to finish.
func (netSync *NetSync) Stop() {
if atomic.AddInt32(&netSync.shutdown, 1) != 1 {
Logger.log.Warn("Sync manager is already in the process of shutting down")
}
Logger.log.Warn("Sync manager shutting down")
close(netSync.cQuit)
}
// messageHandler is the main handler for the sync manager. It must be run as a
// goroutine. It processes block and inv messages in a separate goroutine
// from the peer handlers so the block (MsgBlock) messages are handled by a
// single thread without needing to lock memory data structures. This is
// important because the sync manager controls which blocks are needed and how
// the fetching should proceed.
func (netSync *NetSync) messageHandler() {
out:
for {
select {
case msgChan := <-netSync.cMessage:
{
go func(msgC interface{}) {
switch msg := msgC.(type) {
case *wire.MessageTx, *wire.MessageTxToken, *wire.MessageTxPrivacyToken:
{
switch msg := msgC.(type) {
case *wire.MessageTx:
{
netSync.HandleMessageTx(msg)
}
case *wire.MessageTxToken:
{
netSync.HandleMessageTxToken(msg)
}
case *wire.MessageTxPrivacyToken:
{
netSync.HandleMessageTxPrivacyToken(msg)
}
}
}
case *wire.MessageBFTPropose:
{
netSync.HandleMessageBFTMsg(msg)
}
case *wire.MessageBFTPrepare:
{
netSync.HandleMessageBFTMsg(msg)
}
case *wire.MessageBFTCommit:
{
netSync.HandleMessageBFTMsg(msg)
}
case *wire.MessageBFTReady:
{
netSync.HandleMessageBFTMsg(msg)
}
case *wire.MessageBFTReq:
{
netSync.HandleMessageBFTMsg(msg)
}
case *wire.MessageBlockBeacon:
{
netSync.HandleMessageBeaconBlock(msg)
}
case *wire.MessageBlockShard:
{
netSync.HandleMessageShardBlock(msg)
}
case *wire.MessageGetCrossShard:
{
netSync.HandleMessageGetCrossShard(msg)
}
case *wire.MessageCrossShard:
{
netSync.HandleMessageCrossShard(msg)
}
case *wire.MessageGetShardToBeacon:
{
netSync.HandleMessageGetShardToBeacon(msg)
}
case *wire.MessageShardToBeacon:
{
netSync.HandleMessageShardToBeacon(msg)
}
case *wire.MessageGetBlockBeacon:
{
netSync.HandleMessageGetBlockBeacon(msg)
}
case *wire.MessageGetBlockShard:
{
netSync.HandleMessageGetBlockShard(msg)
}
case *wire.MessagePeerState:
{
netSync.HandleMessagePeerState(msg)
}
default:
Logger.log.Infof("Invalid message type in block "+"handler: %T", msg)
}
}(msgChan)
}
case msgChan := <-netSync.cQuit:
{
Logger.log.Warn(msgChan)
break out
}
}
}
netSync.waitgroup.Done()
Logger.log.Info("Block handler done")
}
// QueueTx adds the passed transaction message and peer to the block handling
// queue. Responds to the done channel argument after the tx message is
// processed.
/*func (netSync *NetSync) QueueRegisteration(peer *peer.Peer, msg *wire.MessageRegistration, done chan struct{}) {
// Don't accept more transactions if we're shutting down.
if atomic.LoadInt32(&netSync.shutdown) != 0 {
done <- struct{}{}
return
}
netSync.cMessage <- msg
}*/
func (netSync *NetSync) QueueTx(peer *peer.Peer, msg *wire.MessageTx, done chan struct{}) {
// Don't accept more transactions if we're shutting down.
if atomic.LoadInt32(&netSync.shutdown) != 0 {
done <- struct{}{}
return
}
netSync.cMessage <- msg
}
func (netSync *NetSync) QueueTxToken(peer *peer.Peer, msg *wire.MessageTxToken, done chan struct{}) {
// Don't accept more transactions if we're shutting down.
if atomic.LoadInt32(&netSync.shutdown) != 0 {
done <- struct{}{}
return
}
netSync.cMessage <- msg
}
func (netSync *NetSync) QueueTxPrivacyToken(peer *peer.Peer, msg *wire.MessageTxPrivacyToken, done chan struct{}) {
// Don't accept more transactions if we're shutting down.
if atomic.LoadInt32(&netSync.shutdown) != 0 {
done <- struct{}{}
return
}
netSync.cMessage <- msg
}
// handleTxMsg handles transaction messages from all peers.
func (netSync *NetSync) HandleMessageTx(msg *wire.MessageTx) {
Logger.log.Info("Handling new message tx")
if !netSync.HandleTxWithRole(msg.Transaction) {
return
}
if isAdded := netSync.HandleCacheTx(msg.Transaction); !isAdded {
hash, _, err := netSync.config.TxMemPool.MaybeAcceptTransaction(msg.Transaction)
if err != nil {
Logger.log.Error(err)
// Broadcast to network
} else {
Logger.log.Infof("there is hash of transaction %s", hash.String())
err := netSync.config.Server.PushMessageToAll(msg)
if err != nil {
Logger.log.Error(err)
} else {
netSync.config.TxMemPool.MarkFowardedTransaction(*msg.Transaction.Hash())
}
}
}
}
// handleTxMsg handles transaction messages from all peers.
func (netSync *NetSync) HandleMessageTxToken(msg *wire.MessageTxToken) {
Logger.log.Info("Handling new message tx")
if !netSync.HandleTxWithRole(msg.Transaction) {
return
}
if isAdded := netSync.HandleCacheTx(msg.Transaction); !isAdded {
hash, _, err := netSync.config.TxMemPool.MaybeAcceptTransaction(msg.Transaction)
if err != nil {
Logger.log.Error(err)
} else {
Logger.log.Infof("there is hash of transaction %s", hash.String())
// Broadcast to network
err := netSync.config.Server.PushMessageToAll(msg)
if err != nil {
Logger.log.Error(err)
} else {
netSync.config.TxMemPool.MarkFowardedTransaction(*msg.Transaction.Hash())
}
}
}
}
// handleTxMsg handles transaction messages from all peers.
func (netSync *NetSync) HandleMessageTxPrivacyToken(msg *wire.MessageTxPrivacyToken) {
Logger.log.Info("Handling new message tx")
if !netSync.HandleTxWithRole(msg.Transaction) {
return
}
if isAdded := netSync.HandleCacheTx(msg.Transaction); !isAdded {
hash, _, err := netSync.config.TxMemPool.MaybeAcceptTransaction(msg.Transaction)
if err != nil {
Logger.log.Error(err)
} else {
Logger.log.Infof("Node got hash of transaction %s", hash.String())
// Broadcast to network
err := netSync.config.Server.PushMessageToAll(msg)
if err != nil {
Logger.log.Error(err)
} else {
netSync.config.TxMemPool.MarkFowardedTransaction(*msg.Transaction.Hash())
}
}
}
}
// QueueBlock adds the passed block message and peer to the block handling
// queue. Responds to the done channel argument after the block message is
// processed.
func (netSync *NetSync) QueueBlock(_ *peer.Peer, msg wire.Message, done chan struct{}) {
// Don't accept more transactions if we're shutting down.
if atomic.LoadInt32(&netSync.shutdown) != 0 {
done <- struct{}{}
return
}
netSync.cMessage <- msg
}
func (netSync *NetSync) QueueGetBlockShard(peer *peer.Peer, msg *wire.MessageGetBlockShard, done chan struct{}) {
// Don't accept more transactions if we're shutting down.
if atomic.LoadInt32(&netSync.shutdown) != 0 {
done <- struct{}{}
return
}
netSync.cMessage <- msg
}
func (netSync *NetSync) QueueGetBlockBeacon(peer *peer.Peer, msg *wire.MessageGetBlockBeacon, done chan struct{}) {
// Don't accept more transactions if we're shutting down.
if atomic.LoadInt32(&netSync.shutdown) != 0 {
done <- struct{}{}
return
}
netSync.cMessage <- msg
}
func (netSync *NetSync) QueueMessage(peer *peer.Peer, msg wire.Message, done chan struct{}) {
// Don't accept more transactions if we're shutting down.
if atomic.LoadInt32(&netSync.shutdown) != 0 {
done <- struct{}{}
return
}
netSync.cMessage <- msg
}
func (netSync *NetSync) HandleMessageBeaconBlock(msg *wire.MessageBlockBeacon) {
Logger.log.Info("Handling new message BlockBeacon")
if isAdded := netSync.HandleCacheBlock(*msg.Block.Hash()); !isAdded {
netSync.config.BlockChain.OnBlockBeaconReceived(&msg.Block)
}
}
func (netSync *NetSync) HandleMessageShardBlock(msg *wire.MessageBlockShard) {
Logger.log.Info("Handling new message BlockShard")
if isAdded := netSync.HandleCacheBlock(*msg.Block.Hash()); !isAdded {
netSync.config.BlockChain.OnBlockShardReceived(&msg.Block)
}
}
func (netSync *NetSync) HandleMessageCrossShard(msg *wire.MessageCrossShard) {
Logger.log.Info("Handling new message CrossShard")
if isAdded := netSync.HandleCacheBlock(*msg.Block.Hash()); !isAdded {
netSync.config.BlockChain.OnCrossShardBlockReceived(msg.Block)
}
}
func (netSync *NetSync) HandleMessageShardToBeacon(msg *wire.MessageShardToBeacon) {
Logger.log.Info("Handling new message ShardToBeacon")
if isAdded := netSync.HandleCacheBlock(*msg.Block.Hash()); !isAdded {
netSync.config.BlockChain.OnShardToBeaconBlockReceived(msg.Block)
}
}
func (netSync *NetSync) HandleMessageBFTMsg(msg wire.Message) {
Logger.log.Info("Handling new message BFTMsg")
if err := msg.VerifyMsgSanity(); err != nil {
Logger.log.Error(err)
return
}
netSync.config.Consensus.OnBFTMsg(msg)
}
// func (netSync *NetSync) HandleMessageInvalidBlock(msg *wire.MessageInvalidBlock) {
// Logger.log.Info("Handling new message invalidblock")
// netSync.config.Consensus.OnInvalidBlockReceived(msg.BlockHash, msg.shardID, msg.Reason)
// }
func (netSync *NetSync) HandleMessagePeerState(msg *wire.MessagePeerState) {
Logger.log.Info("Handling new message peerstate", msg.SenderID)
peerID, err := libp2p.IDB58Decode(msg.SenderID)
if err != nil {
Logger.log.Error(err)
return
}
netSync.config.BlockChain.OnPeerStateReceived(&msg.Beacon, &msg.Shards, &msg.ShardToBeaconPool, &msg.CrossShardPool, peerID)
}
func (netSync *NetSync) HandleMessageGetBlockShard(msg *wire.MessageGetBlockShard) {
Logger.log.Info("Handling new message - " + wire.CmdGetBlockShard)
peerID, err := libp2p.IDB58Decode(msg.SenderID)
if err != nil {
Logger.log.Error(err)
return
}
if msg.ByHash {
netSync.GetBlkShardByHashAndSend(peerID, 0, msg.BlksHash, 0)
} else {
netSync.GetBlkShardByHeightAndSend(peerID, msg.FromPool, 0, msg.BySpecificHeight, msg.ShardID, msg.BlkHeights, 0)
}
}
func (netSync *NetSync) HandleMessageGetBlockBeacon(msg *wire.MessageGetBlockBeacon) {
Logger.log.Info("Handling new message - " + wire.CmdGetBlockBeacon)
peerID, err := libp2p.IDB58Decode(msg.SenderID)
if err != nil {
Logger.log.Error(err)
return
}
if msg.ByHash {
netSync.GetBlkBeaconByHashAndSend(peerID, msg.BlkHashes)
} else {
netSync.GetBlkBeaconByHeightAndSend(peerID, msg.FromPool, msg.BySpecificHeight, msg.BlkHeights)
}
}
func (netSync *NetSync) HandleMessageGetShardToBeacon(msg *wire.MessageGetShardToBeacon) {
Logger.log.Info("Handling new message getshardtobeacon")
peerID, err := libp2p.IDB58Decode(msg.SenderID)
if err != nil {
Logger.log.Error(err)
return
}
if msg.ByHash {
netSync.GetBlkShardByHashAndSend(peerID, 2, msg.BlkHashes, 0)
} else {
netSync.GetBlkShardByHeightAndSend(peerID, msg.FromPool, 2, msg.BySpecificHeight, msg.ShardID, msg.BlkHeights, 0)
}
}
func (netSync *NetSync) HandleMessageGetCrossShard(msg *wire.MessageGetCrossShard) {
Logger.log.Info("Handling new message getcrossshard")
peerID, err := libp2p.IDB58Decode(msg.SenderID)
if err != nil {
Logger.log.Error(err)
return
}
if msg.ByHash {
netSync.GetBlkShardByHashAndSend(peerID, 1, msg.BlkHashes, msg.ToShardID)
} else {
netSync.GetBlkShardByHeightAndSend(peerID, msg.FromPool, 1, msg.BySpecificHeight, msg.FromShardID, msg.BlkHeights, msg.ToShardID)
}
}
func (netSync *NetSync) HandleCacheBlock(blockHash common.Hash) bool {
_, ok := netSync.Cache.blockCache.Get(blockHash.String())
if ok {
return true
}
netSync.Cache.blockCache.Add(blockHash.String(),1, MsgLiveTime)
return false
}
func (netSync *NetSync) HandleCacheTx(transaction metadata.Transaction) bool {
_, ok := netSync.Cache.txCache.Get(*transaction.Hash())
if ok {
return true
}
netSync.Cache.txCache.Add(*transaction.Hash(), true)
return false
}
func (netSync *NetSync) HandleCacheTxHash(txHash common.Hash) {
netSync.Cache.txCache.Add(txHash, true)
}
func (netSync *NetSync) HandleTxWithRole(tx metadata.Transaction) bool {
senderShardID := common.GetShardIDFromLastByte(tx.GetSenderAddrLastByte())
for _, shardID := range netSync.ShardIDConfig.RelayShard {
if senderShardID == shardID {
return true
}
}
netSync.ShardIDConfig.roleInCommitteesMtx.RLock()
if netSync.ShardIDConfig.RoleInCommittees > -1 && byte(netSync.ShardIDConfig.RoleInCommittees) == senderShardID {
netSync.ShardIDConfig.roleInCommitteesMtx.RUnlock()
return true
} else {
netSync.ShardIDConfig.roleInCommitteesMtx.RUnlock()
return false
}
}
func (netSync *NetSync) cacheLoop() {
for w := 0; w < workers; w++ {
go netSync.HandleCacheTxHashWoker(netSync.Cache.CTxCache)
}
for {
select {
//case txHash := <-netSync.Cache.CTxCache: {
// go netSync.HandleCacheTxHash(txHash)
//}
case shardID := <-netSync.ShardIDConfig.CRoleInCommittees:
{
go func() {
netSync.ShardIDConfig.roleInCommitteesMtx.Lock()
defer netSync.ShardIDConfig.roleInCommitteesMtx.Unlock()
netSync.ShardIDConfig.RoleInCommittees = shardID
}()
}
}
}
}
func (netSync *NetSync) HandleCacheTxHashWoker(cTxCache <-chan common.Hash) {
for job := range cTxCache {
go netSync.HandleCacheTxHash(job)
time.Sleep(time.Nanosecond)
}
}
|
// Copyright 2021 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package streamingtest
import (
"bytes"
"context"
"net/url"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/changefeedbase"
"github.com/cockroachdb/cockroach/pkg/ccl/streamingccl"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/require"
)
// FeedPredicate allows tests to search a ReplicationFeed.
type FeedPredicate func(message streamingccl.Event) bool
// KeyMatches makes a FeedPredicate that matches a given key.
func KeyMatches(key roachpb.Key) FeedPredicate {
return func(msg streamingccl.Event) bool {
if msg.Type() != streamingccl.KVEvent {
return false
}
return bytes.Equal(key, msg.GetKV().Key)
}
}
// ResolvedAtLeast makes a FeedPredicate that matches when a timestamp has been
// reached.
func ResolvedAtLeast(lo hlc.Timestamp) FeedPredicate {
return func(msg streamingccl.Event) bool {
if msg.Type() != streamingccl.CheckpointEvent {
return false
}
return lo.LessEq(*msg.GetResolved())
}
}
// FeedSource is a source of events for a ReplicationFeed.
type FeedSource interface {
// Next returns the next event, and a flag indicating if there are more events
// to consume.
Next() (streamingccl.Event, bool)
// Close shuts down the source.
Close()
}
// ReplicationFeed allows tests to search for events on a feed.
type ReplicationFeed struct {
t *testing.T
f FeedSource
msg streamingccl.Event
}
// MakeReplicationFeed creates a ReplicationFeed based on a given FeedSource.
func MakeReplicationFeed(t *testing.T, f FeedSource) *ReplicationFeed {
return &ReplicationFeed{
t: t,
f: f,
}
}
// ObserveKey consumes the feed until requested key has been seen (or deadline expired).
// Note: we don't do any buffering here. Therefore, it is required that the key
// we want to observe will arrive at some point in the future.
func (rf *ReplicationFeed) ObserveKey(key roachpb.Key) roachpb.KeyValue {
require.NoError(rf.t, rf.consumeUntil(KeyMatches(key)))
return *rf.msg.GetKV()
}
// ObserveResolved consumes the feed until we received resolved timestamp that's at least
// as high as the specified low watermark. Returns observed resolved timestamp.
func (rf *ReplicationFeed) ObserveResolved(lo hlc.Timestamp) hlc.Timestamp {
require.NoError(rf.t, rf.consumeUntil(ResolvedAtLeast(lo)))
return *rf.msg.GetResolved()
}
// Close cleans up any resources.
func (rf *ReplicationFeed) Close() {
rf.f.Close()
}
func (rf *ReplicationFeed) consumeUntil(pred FeedPredicate) error {
const maxWait = 10 * time.Second
doneCh := make(chan struct{})
mu := struct {
syncutil.Mutex
err error
}{}
defer close(doneCh)
go func() {
select {
case <-time.After(maxWait):
mu.Lock()
mu.err = errors.New("test timed out")
mu.Unlock()
rf.f.Close()
case <-doneCh:
}
}()
rowCount := 0
for {
msg, haveMoreRows := rf.f.Next()
if !haveMoreRows {
// We have unexpectedly run out of rows, let's try and make a nice error
// message.
mu.Lock()
err := mu.err
mu.Unlock()
if err != nil {
rf.t.Fatal(err)
} else {
rf.t.Fatalf("ran out of rows after processing %d rows", rowCount)
}
}
rowCount++
require.NotNil(rf.t, msg)
if pred(msg) {
rf.msg = msg
return nil
}
}
}
// TenantState maintains test state related to tenant.
type TenantState struct {
// ID is the ID of the tenant.
ID roachpb.TenantID
// Codec is the Codec of the tenant.
Codec keys.SQLCodec
// SQL is a sql connection to the tenant.
SQL *sqlutils.SQLRunner
}
// ReplicationHelper wraps a test server configured to be run in streaming
// replication tests. It exposes easy access to a tenant in the server, as well
// as a PGUrl to the underlying server.
type ReplicationHelper struct {
// SysServer is the backing server.
SysServer serverutils.TestServerInterface
// SysDB is a sql connection to the system tenant.
SysDB *sqlutils.SQLRunner
// PGUrl is the pgurl of this server.
PGUrl url.URL
// Tenant is a tenant running on this server.
Tenant TenantState
}
// NewReplicationHelper starts test server and configures it to have active
// tenant.
func NewReplicationHelper(t *testing.T) (*ReplicationHelper, func()) {
ctx := context.Background()
// Start server
s, db, _ := serverutils.StartServer(t, base.TestServerArgs{})
// Make changefeeds run faster.
resetFreq := changefeedbase.TestingSetDefaultFlushFrequency(50 * time.Millisecond)
// Set required cluster settings.
_, err := db.Exec(`
SET CLUSTER SETTING kv.rangefeed.enabled = true;
SET CLUSTER SETTING kv.closed_timestamp.target_duration = '1s';
SET CLUSTER SETTING changefeed.experimental_poll_interval = '10ms';
SET CLUSTER SETTING sql.defaults.experimental_stream_replication.enabled = 'on';
`)
require.NoError(t, err)
// Start tenant server
tenantID := roachpb.MakeTenantID(10)
_, tenantConn := serverutils.StartTenant(t, s, base.TestTenantArgs{TenantID: tenantID})
// Sink to read data from.
sink, cleanupSink := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(security.RootUser))
h := &ReplicationHelper{
SysServer: s,
SysDB: sqlutils.MakeSQLRunner(db),
PGUrl: sink,
Tenant: TenantState{
ID: tenantID,
Codec: keys.MakeSQLCodec(tenantID),
SQL: sqlutils.MakeSQLRunner(tenantConn),
},
}
return h, func() {
cleanupSink()
resetFreq()
require.NoError(t, tenantConn.Close())
s.Stopper().Stop(ctx)
}
}
|
package linqo
import (
"bytes"
)
type SelectTermWhere interface {
Where(term SearchTerm) SelectWhere
}
type SelectTermGroupBy interface {
GroupBy(columns ...string) SelectGroupBy
}
type SelectTermHaving interface {
Having(term SearchTerm) SelectHaving
}
type SelectTermOrderBy interface {
OrderBy(sortSpecs ...SortSpec) Action
}
type SelectOptions interface {
Action
SelectTermWhere
SelectTermGroupBy
SelectTermHaving
SelectTermOrderBy
}
type SelectWhere interface {
Action
SelectTermGroupBy
SelectTermHaving
SelectTermOrderBy
}
type SelectGroupBy interface {
Action
SelectTermHaving
SelectTermOrderBy
}
type SelectHaving interface {
Action
SelectTermOrderBy
}
type SelectPrelude interface {
From(tables ...string) SelectOptions
}
type selectBuilder struct {
*bytes.Buffer
}
func Select(what ...string) SelectPrelude {
b := selectBuilder{bytes.NewBuffer(nil)}
b.WriteString("SELECT ")
if len(what) == 0 {
b.WriteByte('*')
} else {
writeCommaSepList(b, what...)
}
return b
}
func SelectDistinct(what ...string) SelectPrelude {
b := selectBuilder{bytes.NewBuffer(nil)}
b.WriteString("SELECT DISTINCT ")
if len(what) == 0 {
b.WriteByte('*')
} else {
writeCommaSepList(b, what...)
}
return b
}
func (b selectBuilder) From(tables ...string) SelectOptions {
b.WriteString(" FROM ")
writeCommaSepList(b, tables...)
return b
}
func (b selectBuilder) Where(term SearchTerm) SelectWhere {
b.WriteString(" WHERE ")
b.WriteString(string(term))
return b
}
func (b selectBuilder) GroupBy(columns ...string) SelectGroupBy {
b.WriteString(" GROUP BY ")
writeCommaSepList(b, columns...)
return b
}
func (b selectBuilder) Having(term SearchTerm) SelectHaving {
b.WriteString(" HAVING ")
b.WriteString(string(term))
return b
}
func (b selectBuilder) OrderBy(sortingSpecs ...SortSpec) Action {
b.WriteString(" ORDER BY ")
specs := make([]string, len(sortingSpecs))
for i, spec := range sortingSpecs {
specs[i] = spec.String()
}
writeCommaSepList(b, specs...)
return b
}
func (b selectBuilder) String() string {
return b.Buffer.String() + ";"
}
|
package chapters
import "fmt"
func structs() {
type user struct {
ID int
FirstName string
LastName string
}
var u user
u.ID = 1
u.FirstName = "Arthur"
u.LastName = "Fleck"
fmt.Println(u)
u2 := user{
ID: 1,
FirstName: "Arthur",
LastName: "Morgan",
}
fmt.Println(u2)
}
|
package slacklogger
import (
"fmt"
)
type SlackLogger struct {
webhookURL string
environment string
isDebug bool
}
// NewSlackLogger returns a new instance of SlackLogger
func NewSlackLogger(webhookURL, environment string, isDebug bool) *SlackLogger {
return &SlackLogger{
webhookURL: webhookURL,
environment: environment,
isDebug: isDebug,
}
}
func (logger *SlackLogger) Log(message string) {
LogWithURL(message, logger.webhookURL, logger.environment, logger.isDebug)
}
func LogWithURL(message, url, env string, isDebug bool) {
if env != "" {
message = fmt.Sprintf("env=%s, %s", env, message)
}
if isDebug {
fmt.Println("Debug: Logging to Slack: " + message)
return
}
err := Send(url, Payload{Text: message})
if err != nil {
fmt.Printf("Error while logging to Slack: %s\nOriginal message was: %s\n", err, message)
}
}
|
package main
import _ "alpha.test/database/migration"
|
package builder
import (
"net/http"
"encoding/json"
"bytes"
"net/http/httputil"
"log"
)
type Request struct {
Method string
Path string
Headers map[string]string
QueryParams map[string]string
Body interface{}
}
func NewRequest(method string, path string) *Request {
return &Request{
Method: method,
Path: path,
Headers: make(map[string]string),
QueryParams: make(map[string]string),
}
}
func (simpleRequest *Request) Execute(client HttpClient) (*http.Response, error) {
byteSlice, marshallErr := json.Marshal(simpleRequest.Body)
if marshallErr != nil {
return nil, marshallErr
}
request, _ := http.NewRequest(simpleRequest.Method, simpleRequest.Path, bytes.NewBuffer(byteSlice))
query := request.URL.Query()
for key, value := range simpleRequest.QueryParams {
query.Add(key, value)
}
request.URL.RawQuery = query.Encode()
for key, value := range simpleRequest.Headers {
request.Header.Set(key, value)
}
rawRequest, _ := httputil.DumpRequestOut(request, true)
log.Println(string(rawRequest))
return client.Do(request)
}
|
package handlers
import (
"bytes"
"database/sql"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/http/httptest"
"os"
"testing"
"time"
"github.com/go-redis/redis"
"wayneli.me/m/servers/gateway/models/users"
"wayneli.me/m/servers/gateway/sessions"
)
// TestServeHTTP tests if ServeHTTP in cors.go correctly returns required CORS
func TestServeHTTP(t *testing.T) {
// initial setup
redisaddr := os.Getenv("REDISADDR")
if len(redisaddr) == 0 {
redisaddr = "127.0.0.1:6379"
}
client := redis.NewClient(&redis.Options{
Addr: redisaddr,
})
sessionsStore := sessions.NewRedisStore(client, time.Hour)
user1 := &users.NewUser{
Email: "John.Smith@testing.com",
Password: "moreThanAdaquate",
PasswordConf: "moreThanAdaquate",
UserName: "Adaquate",
FirstName: "John",
LastName: "Smith",
}
dsn := fmt.Sprintf("root:%s@tcp(127.0.0.1:3306)/test", os.Getenv("MYSQL_ROOT_PASSWORD"))
db, err := sql.Open("mysql", dsn)
if err != nil {
t.Fatal(err)
}
defer db.Close()
_, err = db.Exec("TRUNCATE TABLE Users")
if err != nil {
t.Fatal(err)
}
usersStore := users.NewMysqlStore(db)
ctx := HandlerContext{"someRanDomKey3120", sessionsStore, usersStore}
// 1. UsersHandler call
// We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response.
rr := httptest.NewRecorder()
b := new(bytes.Buffer)
if err = json.NewEncoder(b).Encode(user1); err != nil {
t.Fatal(err)
}
req, err := http.NewRequest(http.MethodPost, "/v1/users", b)
if err != nil {
t.Fatal(err)
}
// add content type
req.Header.Add("Content-Type", "application/json")
NewCORS(http.HandlerFunc(ctx.UsersHandler)).ServeHTTP(rr, req)
authHeader := rr.Header().Get("Authorization")
err = helperTestFunc(t, rr)
if err != nil {
t.Fatal(err)
}
// 2. SpecificUserHandler call
rr = httptest.NewRecorder()
newUpdate := &users.Updates{
FirstName: "Johnny",
LastName: "Depp",
}
b = new(bytes.Buffer)
if err = json.NewEncoder(b).Encode(newUpdate); err != nil {
t.Fatal(err)
}
req, err = http.NewRequest(http.MethodPatch, "/v1/users/me", b)
if err != nil {
t.Fatal(err)
}
req.Header.Add("Content-Type", "application/json")
req.Header.Add("Authorization", authHeader)
NewCORS(http.HandlerFunc(ctx.SpecificUsersHandler)).ServeHTTP(rr, req)
err = helperTestFunc(t, rr)
if err != nil {
t.Fatal(err)
}
// 3. SessionsHandler call
rr = httptest.NewRecorder()
cred := users.Credentials{
Email: "John.Smith@testing.com",
Password: "moreThanAdaquate",
}
b = new(bytes.Buffer)
if err = json.NewEncoder(b).Encode(cred); err != nil {
t.Fatal(err)
}
req, err = http.NewRequest(http.MethodPost, "/v1/sessions", b)
if err != nil {
t.Fatal(err)
}
req.Header.Add("Content-Type", "application/json")
req.Header.Add("Authorization", authHeader)
NewCORS(http.HandlerFunc(ctx.SessionsHandler)).ServeHTTP(rr, req)
err = helperTestFunc(t, rr)
if err != nil {
t.Fatal(err)
}
// 4. SessionsHandler call
rr = httptest.NewRecorder()
req, err = http.NewRequest(http.MethodPost, "/v1/sessions/mine", nil)
if err != nil {
t.Fatal(err)
}
req.Header.Add("Authorization", authHeader)
NewCORS(http.HandlerFunc(ctx.SpecificSessionHandler)).ServeHTTP(rr, req)
err = helperTestFunc(t, rr)
if err != nil {
t.Fatal(err)
}
// 5. OPTIONS call
// We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response.
rr = httptest.NewRecorder()
req, err = http.NewRequest(http.MethodOptions, "/v1/users", b)
if err != nil {
t.Fatal(err)
}
// add content type
req.Header.Add("Content-Type", "application/json")
NewCORS(http.HandlerFunc(ctx.UsersHandler)).ServeHTTP(rr, req)
authHeader = rr.Header().Get("Authorization")
err = helperTestFunc(t, rr)
if err != nil {
t.Fatal(err)
}
// Check the status code is what we expect.
if status := rr.Code; status != http.StatusOK {
t.Errorf("handler returned wrong status code: got %v want %v",
status, http.StatusOK)
}
}
// This is a helper function to help check if the CORS headers are correct
func helperTestFunc(t *testing.T, rr *httptest.ResponseRecorder) error {
corsACAO := rr.Header().Get("Access-Control-Allow-Origin")
if corsACAO != "*" {
t.Errorf("handler returned CORS: got %v want %v",
corsACAO, "*")
return errors.New("Did not find CORS header")
}
corsACAM := rr.Header().Get("Access-Control-Allow-Methods")
if corsACAM != "GET, PUT, POST, PATCH, DELETE" {
t.Errorf("handler returned CORS: got %v want %v",
corsACAM, "GET, PUT, POST, PATCH, DELETE")
return errors.New("Did not find CORS header")
}
corsACAH := rr.Header().Get("Access-Control-Allow-Headers")
if corsACAH != "Content-Type, Authorization" {
t.Errorf("handler returned CORS: got %v want %v",
corsACAH, "Content-Type, Authorization")
return errors.New("Did not find CORS header")
}
corsACEH := rr.Header().Get("Access-Control-Expose-Headers")
if corsACEH != "Authorization" {
t.Errorf("handler returned CORS: got %v want %v",
corsACEH, "Authorization")
return errors.New("Did not find CORS header")
}
corsACMA := rr.Header().Get("Access-Control-Max-Age")
if corsACMA != "600" {
t.Errorf("handler returned CORS: got %v want %v",
corsACMA, "600")
return errors.New("Did not find CORS header")
}
return nil
}
|
package chat
import (
"fmt"
)
type Agent interface {
Id() (string)
Read(msg *string)(error)
Write(msg string)(error)
}
type chatAgent struct{
a Agent
r *Room
}
func NewRoomAgent(a Agent, r *Room)(ra *chatAgent){
ra = &chatAgent{a, r}
// r.AddUser(ra)
return
}
func(ra *chatAgent)Id()(string){
return ra.a.Id()
}
func(ra *chatAgent)Write(msg string)(error){
return ra.a.Write(msg)
}
func (ra *chatAgent)handle(msg string)(err error){
// reply := ra.Id() + ":<br> " + msg
reply := ra.Id() + ":" + msg
// ra.r.Receive(reply)
ra.r.Broadcast <- reply
return
}
func (ra *chatAgent)Serve(){
ra.r.ChaterAdd <- ra
for {
var buf string
fmt.Println("Serve Read")
err := ra.a.Read(&buf)
if err != nil {
fmt.Println("chatagent:Serve:readerror:")
fmt.Println(err)
ra.r.ChaterRM <- ra.Id()
break
}
fmt.Println("id=" + ra.Id() + ", read=" + buf)
err = ra.handle(buf)
if err != nil{
fmt.Println("chatagent:Serve:handleerror:")
fmt.Println(err)
break
}
}
}
|
package core
import (
db "github.com/I-Reven/Hexagonal/src/infrastructure/repository/mongo"
"github.com/go-bongo/bongo"
"os"
"sync"
)
var (
config = bongo.Config{
ConnectionString: os.Getenv("MONGO_URL"),
Database: "core",
}
once sync.Once
connection *Connection
)
type (
Connection struct {
*bongo.Connection
}
Core struct {
DB db.Mongo
}
)
func (c *Core) Connection() *Connection {
once.Do(func() {
connection = &Connection{c.DB.Connection(config)}
})
return connection
}
|
package add_two_numbers
import (
"testing"
)
func Test_addTwoNumbers(t *testing.T) {
tests := []struct {
name string
l1 *ListNode
l2 *ListNode
want *ListNode
}{
{
l1: newList(2, 4, 3),
l2: newList(5, 6, 4),
want: newList(7, 0, 8),
},
{
l1: newList(0),
l2: newList(0),
want: newList(0),
},
{
l1: newList(9, 9, 9, 9, 9, 9, 9),
l2: newList(9, 9, 9, 9),
want: newList(8, 9, 9, 9, 0, 0, 0, 1),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := addTwoNumbers(tt.l1, tt.l2); !equalList(got, tt.want) {
t.Errorf("got:")
printList(got)
t.Errorf("want:")
printList(tt.want)
}
})
}
}
func newList(values ...int) *ListNode {
list := make([]*ListNode, len(values))
for i, v := range values {
list[i] = &ListNode{Val: v}
}
for i := 0; i < len(values)-1; i++ {
list[i].Next = list[i+1]
}
if len(list) == 0 {
return nil
}
return list[0]
}
func equalList(l1, l2 *ListNode) bool {
if l1 == nil && l2 != nil {
return false
} else if l1 != nil && l2 == nil {
return false
}
if l1 != nil && l2 != nil {
return l1.Val == l2.Val && equalList(l1.Next, l2.Next)
}
return true
}
|
/* ######################################################################
# Author: (__AUTHOR__)
# Created Time: __CREATE_DATETIME__
# File Name: default_handler.go
# Description:
####################################################################### */
package handlers
import (
"__PROJECT_NAME__/libs"
"github.com/ant-libs-go/util/logs"
)
type DefaultServiceImpl struct {
}
func NewDefaultServiceImpl() *DefaultServiceImpl {
o := &DefaultServiceImpl{}
return o
}
func (this *DefaultServiceImpl) GetByIds(req *libs.GetByIdsRequest, log *logs.SessLog) (r *libs.GetByIdsResponse) {
r = &libs.GetByIdsResponse{
Header: &libs.Header{Code: libs.ResponseCode_OK},
Body: map[int32]string{}}
/*
_, ms, err := query.New().GetByIds(req.Body)
if err != nil {
log.Warnf("server exception, %v", err)
r.Header.Code = libs.ResponseCode_SERVER_ERROR
return
}
for _, m := range ms {
r.Body[m.Id] = m.Format()
}
*/
return
}
|
package main
import (
"fmt"
)
func main() {
fmt.Println(countAndSay(5))
}
func countAndSay(n int) string {
/*
结束: n=0||n=1
处理: 得到后面的string,找连续数字,拼接
返回: 本级字符串
*/
if n == 0 {
return ""
} else if n == 1 {
return "1"
} else if n == 2 {
return "11"
}
str := countAndSay(n - 1)
i, k := 1, 0
count := 1
result := ""
for i = 1; i < len(str); i++ {
if str[k] == str[i] {
count++
} else {
result = fmt.Sprintf("%s%d%s", result, count, string(str[k]))
count = 1
}
k++
}
result = fmt.Sprintf("%s%d%s", result, count, string(str[k]))
return result
}
|
package main
import (
"fmt"
"math/rand"
"time"
)
/*
Задание 2. Нахождение первого вхождения числа в упорядоченном массиве (числа могут повторяться)
Что нужно сделать
Заполните упорядоченный массив из 12 элементов и введите число. Необходимо реализовать поиск первого вхождения заданного числа в массив. Сложность алгоритма должна быть минимальная.
Что оценивается
Верность индекса.
При вводе массива 1 2 2 2 3 4 5 6 7 8 9 10 и вводе числа 2 программа должна вывести индекс 1.
*/
const n = 10
func main() {
rand.Seed(time.Now().UnixNano())
var arr [n]int
for i := 0; i < n; i++ {
arr[i] = 10*i + rand.Intn(10)
}
value := arr[rand.Intn(n)]
fmt.Printf("Ищем %v в массиве %v. \n", value, arr)
index := find(value, arr)
fmt.Println(index)
for i := 0; i < n; i++ {
arr[i] = 1
}
value = arr[rand.Intn(n)]
fmt.Printf("Ищем %v в массиве %v. \n", value, arr)
index = find(value, arr)
fmt.Println(index)
}
func find(number int, arr [n]int) int {
min := 0
max := len(arr) - 1
if arr[0] > number || arr[max] < number {
return -1
}
for max >= min {
middle := (max + min) / 2
if number == arr[middle] {
index := isFirstElement(middle, arr)
return index
} else if number > arr[middle] {
min = middle + 1
} else {
max = middle - 1
}
}
return -1
}
func isFirstElement(index int, arr [n]int) int {
if index > 0 && arr[index] == arr[index-1] {
return isFirstElement(index-1, arr)
}
return index
}
|
package main
import (
"fmt"
xir "github.com/ceftb/xir/lang/go"
"github.com/ceftb/xir/tools/viz"
)
func main() {
a := xir.NewNet()
s0 := a.Node().Set(xir.Props{"name": "s0"})
s1 := a.Node().Set(xir.Props{"name": "s1"})
for i := 0; i < 5; i++ {
n := a.Node().Set(xir.Props{"name": fmt.Sprintf("n%d", i)})
a.Link(s0.Endpoint(), n.Endpoint())
a.Link(s1.Endpoint(), n.Endpoint())
}
viz.NetSvg("muffin", a)
}
|
/* bogomilter is a milter service for postfix */
package main
import (
"flag"
"fmt"
"github.com/phalaaxx/milter"
"io"
"io/ioutil"
"log"
"net"
"net/textproto"
"os"
"os/exec"
"strings"
"syscall"
)
/* global variables */
var BogoBin string
var BogoDir string
var LocalHold bool
/* BogoMilter object */
type BogoMilter struct {
milter.Milter
from string
cmd *exec.Cmd
stdin io.WriteCloser
stdout io.ReadCloser
}
/* Header parses message headers one by one */
func (b BogoMilter) Header(name, value string, m *milter.Modifier) (milter.Response, error) {
// check if bogofilter has been run on the message already
if name == "X-Bogosity" {
// X-Bogosity header is present, accept immediately
return milter.RespAccept, nil
}
return milter.RespContinue, nil
}
/* MailFrom is called on envelope from address */
func (b *BogoMilter) MailFrom(from string, m *milter.Modifier) (milter.Response, error) {
// save from address for later reference
b.from = from
return milter.RespContinue, nil
}
/* Headers is called after the last of message headers */
func (b *BogoMilter) Headers(headers textproto.MIMEHeader, m *milter.Modifier) (milter.Response, error) {
var err error
// end of headers, start bogofilter pipe
b.cmd = exec.Command(BogoBin, "-v", "-d", BogoDir)
// get bogofilter stdin
b.stdin, err = b.cmd.StdinPipe()
if err != nil {
return nil, err
}
// get bogofilter stdout
b.stdout, err = b.cmd.StdoutPipe()
if err != nil {
return nil, err
}
// start command
if err = b.cmd.Start(); err != nil {
return nil, err
}
// print headers to stdin
for k, vl := range headers {
for _, v := range vl {
if _, err := fmt.Fprintf(b.stdin, "%s: %s\n", k, v); err != nil {
return nil, err
}
}
}
if _, err := fmt.Fprintf(b.stdin, "\n"); err != nil {
return nil, err
}
return milter.RespContinue, nil
}
// accept body chunk
func (b BogoMilter) BodyChunk(chunk []byte, m *milter.Modifier) (milter.Response, error) {
// send chunk to bogofilter stdin
if _, err := b.stdin.Write(chunk); err != nil {
return nil, err
}
return milter.RespContinue, nil
}
/* Body is called when email message body has been sent */
func (b *BogoMilter) Body(m *milter.Modifier) (milter.Response, error) {
// close process stdin and read its output
if err := b.stdin.Close(); err != nil {
return nil, err
}
// get bogofilter output
output, err := ioutil.ReadAll(b.stdout)
if err != nil {
return nil, err
}
// wait for process to terminate
if err := b.cmd.Wait(); err != nil {
// no easy way to get exit code
if exiterr, ok := err.(*exec.ExitError); ok {
if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
// exit code is in status.ExitStatus()
if status.ExitStatus() == 3 {
// exit code 3 indicates error condition
return nil, err
}
}
}
}
// add X-Bogosity header
header := string(output)
if strings.HasPrefix(header, "X-Bogosity") {
if m.AddHeader("X-Bogosity", header[12:len(header)-1]); err != nil {
return nil, err
}
// log spam senders
if strings.HasPrefix(header, "X-Bogosity: Spam") {
fmt.Printf("detected spam from %s\n", b.from)
}
// put locally originating spam into quarantine
if LocalHold && len(m.Headers.Get("Received")) == 0 {
if strings.HasPrefix(header, "X-Bogosity: Spam") {
fmt.Printf("quarantine mail from %s\n", b.from)
m.Quarantine("local spam")
// TODO: notify administrator
}
}
}
return milter.RespAccept, nil
}
/* NewObject creates new BogoMilter instance */
func RunServer(socket net.Listener) {
// declare milter init function
init := func() (milter.Milter, uint32, uint32) {
return &BogoMilter{},
milter.OptAddHeader | milter.OptChangeHeader,
milter.OptNoConnect | milter.OptNoHelo | milter.OptNoRcptTo
}
// start server
if err := milter.RunServer(socket, init); err != nil {
log.Fatal(err)
}
}
/* main program */
func main() {
// parse commandline arguments
var protocol, address string
flag.StringVar(&protocol,
"proto",
"unix",
"Protocol family (unix or tcp)")
flag.StringVar(&address,
"addr",
"/var/spool/postfix/milter/bogo.sock",
"Bind to address or unix domain socket")
flag.StringVar(&BogoBin,
"bin",
"/usr/bin/bogofilter",
"Full path to bogofilter binary")
flag.StringVar(&BogoDir,
"db",
"/var/cache/filter",
"Path to bogofilter database")
flag.BoolVar(&LocalHold,
"localhold",
false,
"Put outgoing spam into quarantine")
flag.Parse()
// make sure the specified protocol is either unix or tcp
if protocol != "unix" && protocol != "tcp" {
log.Fatal("invalid protocol name")
}
// make sure socket does not exist
if protocol == "unix" {
// ignore os.Remove errors
os.Remove(address)
}
// bind to listening address
socket, err := net.Listen(protocol, address)
if err != nil {
log.Fatal(err)
}
defer socket.Close()
if protocol == "unix" {
// set mode 0660 for unix domain sockets
if err := os.Chmod(address, 0660); err != nil {
log.Fatal(err)
}
// remove socket on exit
defer os.Remove(address)
}
// run server
go RunServer(socket)
// sleep forever
select {}
}
|
package plug
import (
"mqtts/core"
"mqtts/utils"
"strings"
)
const iterStartDigits = 5
const iterEndDigits = 10
var clientIdTypes = []string{"string", "int", "effectiveNumber"}
func FuzzAvailableClientId(opts *core.TargetOptions) []string {
utils.OutputInfoMessage(opts.Host, opts.Port, "Start detecting available clientId...")
var availableClientIds []string
availableClientIdChan := make(chan string)
for k := range utils.IterRange(iterStartDigits, iterEndDigits, 1) {
for _, clientIdType := range clientIdTypes {
tmpOpts := new(core.TargetOptions)
utils.DeepCopy(tmpOpts, opts)
tmpOpts.ClientId = utils.GetRandomString(k, clientIdType)
go func() {
client := core.GetMQTTClient(tmpOpts)
err := client.Connect()
if err == nil || !strings.EqualFold(err.Error(), "identifier rejected") {
availableClientIdChan <- tmpOpts.ClientId
} else {
availableClientIdChan <- ""
}
}()
}
}
for range utils.Iter((iterEndDigits - iterStartDigits) * len(clientIdTypes)) {
clientId := <-availableClientIdChan
if clientId != "" {
availableClientIds = append(availableClientIds, clientId)
}
}
if len(availableClientIds) == 0 {
utils.OutputErrorMessage(opts.Host, opts.Port, "No available clientId found, you can try use -clientid **** in command to set clientid")
} else {
utils.OutputInfoMessage(opts.Host, opts.Port, "Available clientId: "+strings.Join(availableClientIds, ","))
opts.ClientId = availableClientIds[0]
}
return availableClientIds
}
|
package fileutils
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
)
func CopyFile(srcPath string, dstPath string) error {
srcFile, err := os.Open(srcPath)
if err != nil {
return err
}
defer srcFile.Close()
dstFile, err := os.Create(dstPath)
_, err = io.Copy(dstFile, srcFile)
if err != nil {
fileInfo, err := os.Stat(srcPath)
if err != nil {
err = os.Chmod(dstPath, fileInfo.Mode())
}
}
return nil
}
func CopyDirectoryRecursive(src string, dst string, overwrite bool) error {
fsrc, err := os.Stat(src)
if err != nil {
return err
}
if !fsrc.IsDir() {
return fmt.Errorf("Source is not a directory")
}
_, err = os.Open(dst)
if !os.IsNotExist(err) {
if overwrite {
err = os.RemoveAll(dst)
if err != nil {
return err
}
} else {
return fmt.Errorf("Destination already exists")
}
}
err = os.MkdirAll(dst, fsrc.Mode())
dir, err := ioutil.ReadDir(src)
for _, f := range dir {
srcPath := filepath.Join(src, f.Name())
dstPath := filepath.Join(dst, f.Name())
if f.IsDir() {
err = CopyDirectoryRecursive(srcPath, dstPath, overwrite)
if err != nil {
return err
}
} else {
err = CopyFile(srcPath, dstPath)
if err != nil {
return err
}
}
}
return nil
}
func EnsureDirectoryExists(dir string) error {
fileStat, err := os.Stat(dir)
if err != nil {
if os.IsNotExist(err) {
return os.Mkdir(dir, 0700)
}
}
if !fileStat.IsDir() {
return fmt.Errorf("Not a dir: %s", dir)
}
return nil
}
func IsDirectory(dir string) bool {
fileStat, err := os.Stat(dir)
if err != nil {
return false
}
if !fileStat.IsDir() {
return false
}
return true
}
func StripFileExtension(file string) string {
x := strings.Split(file, ".")
if len(x) == 1 {
return file
} else {
x = x[:len(x)-1]
return strings.Join(x, ".")
}
}
func MakeDirectoryIfNotExists(dir string) error {
fs, err := os.Stat(dir)
if err != nil {
if os.IsNotExist(err) {
return os.Mkdir(dir, 0750)
}
}
if !fs.IsDir() {
return fmt.Errorf("%v esxists, but is not a directory.", dir)
}
return nil
}
func EmptyDirectory(dir string) error {
return fmt.Errorf("Not implemented")
}
|
package api
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"github.com/thetogi/YReserve2/model"
)
func TestUpdateUserDetail(t *testing.T) {
t.Log("Starting update user detail test case")
apiTest := GetApiTest()
user := GetTestUser()
userAuth := apiTest.CreateUserAuthFromTestAPI(t, apiTest.API, user)
apiTest.CheckValidTestUser(t, user, userAuth.User)
if userAuth.Token == "" {
t.Errorf("handler returned wrong token: got %v",
userAuth.Token)
}
expectedUser := userAuth.User
userDetail := &model.UserDetail{
UserID: expectedUser.UserID,
UtmSource: "Dummay",
}
t.Log(expectedUser)
jsonUserDetail, _ := json.Marshal(userDetail)
req, err := http.NewRequest("PUT", fmt.Sprintf("/api/v1/user/%d/userDetail", expectedUser.UserID), bytes.NewBuffer(jsonUserDetail))
req.Header.Set(model.AUTHENTICATION, userAuth.Token)
if err != nil {
t.Fatal(err)
}
res := httptest.NewRecorder()
handler := apiTest.API.requestWithAuthHandler(apiTest.API.updateUserDetail)
handler.ServeHTTP(res, req)
CheckOkStatus(t, res.Code)
t.Log(res.Body.String())
receivedUserDetail := model.UserDetailFromString(res.Body.String())
if receivedUserDetail.UserID != userDetail.UserID {
t.Errorf("handler returned wrong name: got %d expected %d",
receivedUserDetail.UserID, userDetail.UserID)
}
if receivedUserDetail.UtmSource != userDetail.UtmSource {
t.Errorf("handler returned wrong utm source: got %s expected %s",
receivedUserDetail.UtmSource, userDetail.UtmSource)
}
}
|
package kademlia
import (
"fmt"
"io"
)
// Ping is an empty ping message.
type Ping struct{}
// Marshal implements Serializable interface and returns a nil byte slice.
func (r *Ping) Marshal() []byte {
return nil
}
// Unmarshal implements decode interface and returns a Ping message and never throws an error.
func (r *Ping) Unmarshal([]byte) error {
r = &Ping{}
return nil
}
// Pong is an empty pong message.
type Pong struct{}
// Marshal implements Serializable interface and returns a nil byte slice.
func (r *Pong) Marshal() []byte {
return nil
}
// Unmarshal implements decode interface and returns a Pong instance and never throws an error.
func (r *Pong) Unmarshal([]byte) error {
r = &Pong{}
return nil
}
// FindNodeRequest represents a FIND_NODE RPC call in the Kademlia specification.
type FindNodeRequest struct {
Target PublicKey
}
// Marshal implements Serializable interface and returns the public key of the target for this search
// request as a byte slice.
func (r *FindNodeRequest) Marshal() []byte {
return r.Target[:]
}
// Unmarshal implements decode interface.
func (r *FindNodeRequest) Unmarshal(buf []byte) error {
if len(buf) != SizePublicKey {
r = &FindNodeRequest{}
return fmt.Errorf("expected buf to be %d bytes, but got %d bytes: %w", SizePublicKey, len(buf), io.ErrUnexpectedEOF)
}
copy(r.Target[:], buf)
return nil
}
// FindNodeResponse represents the results of a FIND_NODE RPC call
type FindNodeResponse struct {
Results ID
}
// Marshal implements Serializable interface and encodes the list of closest peer ID results into list.
func (r *FindNodeResponse) Marshal() []byte {
buf := []byte{byte(len(r.Results))}
for _, result := range r.Results {
buf = append(buf, result.Marshal()...)
}
return buf
}
// Unmarshal implements decode interface.
func (r *FindNodeResponse) Unmarshal(buf []byte) error {
r = &FindNodeResponse{}
if len(buf) < 1 {
return io.ErrUnexpectedEOF
}
size := buf[0]
buf = buf[1:]
results := make([]ID, 0, size)
for i := 0; i < cap(results); i++ {
var id ID
err := id.Unmarshal(buf).(ID)
if err != nil {
return io.ErrUnexpectedEOF
}
results = append(results, id)
buf = buf[id.Size():]
}
r.Results = results
return nil
}
|
package state
import "time"
// SystemState - All kinds of system-related information and metrics
type SystemState struct {
Info SystemInfo
Scheduler Scheduler
Memory Memory
CPUInfo CPUInformation
CPUStats CPUStatisticMap
NetworkStats NetworkStatsMap
Disks DiskMap
DiskStats DiskStatsMap
DiskPartitions DiskPartitionMap
DataDirectoryPartition string // Partition that the data directory lives on (identified by the partition's mountpoint)
XlogPartition string // Partition that the WAL directory lives on
XlogUsedBytes uint64
}
// SystemType - Enum that describes which kind of system we're monitoring
type SystemType int
// Treat this list as append-only and never change the order
const (
SelfHostedSystem SystemType = iota
AmazonRdsSystem
HerokuSystem
GoogleCloudSQLSystem
AzureDatabaseSystem
CrunchyBridgeSystem
AivenSystem
)
type SystemInfo struct {
Type SystemType
SystemScope string
SystemID string
SelfHosted *SystemInfoSelfHosted
AmazonRds *SystemInfoAmazonRds
ResourceTags map[string]string
BootTime time.Time
}
// SystemInfoSelfHosted - System information for self-hosted systems (both physical and virtual)
type SystemInfoSelfHosted struct {
Hostname string
Architecture string
OperatingSystem string
Platform string
PlatformFamily string
PlatformVersion string
VirtualizationSystem string // Name of the virtualization system (only if we're a guest)
KernelVersion string
DatabaseSystemIdentifier string
}
// SystemInfoAmazonRds - System information for Amazon RDS systems
type SystemInfoAmazonRds struct {
Region string
InstanceClass string
InstanceID string
Status string
AvailabilityZone string
PubliclyAccessible bool
MultiAz bool
SecondaryAvailabilityZone string
CaCertificate string
AutoMinorVersionUpgrade bool
PreferredMaintenanceWindow string
PreferredBackupWindow string
LatestRestorableTime time.Time
BackupRetentionPeriodDays int32
MasterUsername string
InitialDbName string
CreatedAt time.Time
StorageProvisionedIOPS int32
StorageAllocatedGigabytes int32
StorageEncrypted bool
StorageType string
EnhancedMonitoring bool
PerformanceInsights bool
PostgresLogExport bool
IAMAuthentication bool
DeletionProtection bool
ParameterApplyStatus string
ParameterPgssEnabled bool
ParameterAutoExplainEnabled bool
IsAuroraPostgres bool
}
// Scheduler - Information about the OS scheduler
type Scheduler struct {
Loadavg1min float64
Loadavg5min float64
Loadavg15min float64
}
// Memory - Metrics related to system memory
type Memory struct {
TotalBytes uint64
CachedBytes uint64
BuffersBytes uint64
FreeBytes uint64
WritebackBytes uint64
DirtyBytes uint64
SlabBytes uint64
MappedBytes uint64
PageTablesBytes uint64
ActiveBytes uint64
InactiveBytes uint64
AvailableBytes uint64
SwapUsedBytes uint64
SwapTotalBytes uint64
HugePagesSizeBytes uint64
HugePagesFree uint64
HugePagesTotal uint64
HugePagesReserved uint64
HugePagesSurplus uint64
ApplicationBytes uint64
}
type CPUInformation struct {
Model string
CacheSizeBytes int32
SpeedMhz float64
SocketCount int32
PhysicalCoreCount int32
LogicalCoreCount int32
}
// CPUStatisticMap - Map of all CPU statistics (Key = CPU ID)
type CPUStatisticMap map[string]CPUStatistic
// CPUStatistic - Statistics for a single CPU core
type CPUStatistic struct {
DiffedOnInput bool // True if has already been diffed on input (and we can simply copy the diff)
DiffedValues *DiffedSystemCPUStats
// Seconds (counter values that need to be diff-ed between runs)
UserSeconds float64
SystemSeconds float64
IdleSeconds float64
NiceSeconds float64
IowaitSeconds float64
IrqSeconds float64
SoftIrqSeconds float64
StealSeconds float64
GuestSeconds float64
GuestNiceSeconds float64
}
// DiffedSystemCPUStatsMap - Map of all CPU statistics (Key = CPU ID)
type DiffedSystemCPUStatsMap map[string]DiffedSystemCPUStats
// DiffedSystemCPUStats - CPU statistics as percentages
type DiffedSystemCPUStats struct {
UserPercent float64
SystemPercent float64
IdlePercent float64
NicePercent float64
IowaitPercent float64
IrqPercent float64
SoftIrqPercent float64
StealPercent float64
GuestPercent float64
GuestNicePercent float64
}
// NetworkStatsMap - Map of all network statistics (Key = Interface Name)
type NetworkStatsMap map[string]NetworkStats
// NetworkStats - Information about the network activity on a single interface
type NetworkStats struct {
DiffedOnInput bool // True if has already been diffed on input (and we can simply copy the diff)
DiffedValues *DiffedNetworkStats
ReceiveThroughputBytes uint64
TransmitThroughputBytes uint64
}
// DiffedNetworkStats - Network statistics for a single interface as a diff
type DiffedNetworkStats struct {
ReceiveThroughputBytesPerSecond uint64
TransmitThroughputBytesPerSecond uint64
}
// DiffedNetworkStatsMap - Map of network statistics as a diff (Key = Interface Name)
type DiffedNetworkStatsMap map[string]DiffedNetworkStats
// Disk - Information about an individual disk device in the system
type Disk struct {
DiskType string // Disk type (hdd/sdd/io1/gp2)
Scheduler string // Linux Scheduler (noop/anticipatory/deadline/cfq)
ProvisionedIOPS uint32 // If applicable, how many IOPS are provisioned for this device
Encrypted bool // If applicable, is this device encrypted? (default false)
ComponentDisks []string // Identifiers for component disks (e.g. for a software RAID)
}
// DiskStats - Statistics about an individual disk device in the system
type DiskStats struct {
DiffedOnInput bool // True if has already been diffed on input (and we can simply copy the diff)
DiffedValues *DiffedDiskStats
// Counter values
ReadsCompleted uint64 // /proc/diskstats 4 - reads completed successfully
ReadsMerged uint64 // /proc/diskstats 5 - reads merged
BytesRead uint64 // /proc/diskstat 6 - sectors read, multiplied by sector size
ReadTimeMs uint64 // /proc/diskstat 7 - time spent reading (ms)
WritesCompleted uint64 // /proc/diskstats 8 - writes completed
WritesMerged uint64 // /proc/diskstats 9 - writes merged
BytesWritten uint64 // /proc/diskstat 10 - sectors written, multiplied by sector size
WriteTimeMs uint64 // /proc/diskstat 11 - time spent writing (ms)
AvgQueueSize int32 // /proc/diskstat 12 - I/Os currently in progress
IoTime uint64 // /proc/diskstat 13 - time spent doing I/Os (ms)
}
type DiffedDiskStats struct {
ReadOperationsPerSecond float64 // The average number of read requests that were issued to the device per second
ReadsMergedPerSecond float64 // The average number of read requests merged per second that were queued to the device
BytesReadPerSecond float64 // The average number of bytes read from the device per second
AvgReadLatency float64 // The average time (in milliseconds) for read requests issued to the device to be served
WriteOperationsPerSecond float64 // The average number of write requests that were issued to the device per second
WritesMergedPerSecond float64 // The average number of write requests merged per second that were queued to the device
BytesWrittenPerSecond float64 // The average number of bytes written to the device per second
AvgWriteLatency float64 // The average time (in milliseconds) for write requests issued to the device to be served
AvgQueueSize int32 // Average I/O operations in flight at the same time (waiting or worked on by the device)
UtilizationPercent float64 // Percentage of CPU time during which I/O requests were issued to the device (bandwidth utilization for the device)
}
// DiskMap - Map of all disks (key = device name)
type DiskMap map[string]Disk
// DiskStatsMap - Map of all disk statistics (key = device name)
type DiskStatsMap map[string]DiskStats
// DiffedDiskStatsMap - Map of all diffed disk statistics (key = device name)
type DiffedDiskStatsMap map[string]DiffedDiskStats
// DiskPartition - Information and statistics about one of the disk partitions in the system
type DiskPartition struct {
DiskName string // Name of the base device disk that this partition resides on (e.g. /dev/sda)
PartitionName string // Platform-specific name of the partition (e.g. /dev/sda1)
FilesystemType string
FilesystemOpts string
UsedBytes uint64
TotalBytes uint64
}
// DiskPartitionMap - Map of all disk partitions (key = mountpoint)
type DiskPartitionMap map[string]DiskPartition
// ---
// DiffSince - Calculate the diff between two CPU stats runs
func (curr CPUStatistic) DiffSince(prev CPUStatistic) DiffedSystemCPUStats {
userSecs := curr.UserSeconds - prev.UserSeconds
systemSecs := curr.SystemSeconds - prev.SystemSeconds
idleSecs := curr.IdleSeconds - prev.IdleSeconds
niceSecs := curr.NiceSeconds - prev.NiceSeconds
iowaitSecs := curr.IowaitSeconds - prev.IowaitSeconds
irqSecs := curr.IrqSeconds - prev.IrqSeconds
softIrqSecs := curr.SoftIrqSeconds - prev.SoftIrqSeconds
stealSecs := curr.StealSeconds - prev.StealSeconds
guestSecs := curr.GuestSeconds - prev.GuestSeconds
guestNiceSecs := curr.GuestNiceSeconds - prev.GuestNiceSeconds
totalSecs := userSecs + systemSecs + idleSecs + niceSecs + iowaitSecs + irqSecs + softIrqSecs + stealSecs + guestSecs + guestNiceSecs
if totalSecs == 0 {
return DiffedSystemCPUStats{}
}
return DiffedSystemCPUStats{
UserPercent: userSecs / totalSecs * 100,
SystemPercent: systemSecs / totalSecs * 100,
IdlePercent: idleSecs / totalSecs * 100,
NicePercent: niceSecs / totalSecs * 100,
IowaitPercent: iowaitSecs / totalSecs * 100,
IrqPercent: irqSecs / totalSecs * 100,
SoftIrqPercent: softIrqSecs / totalSecs * 100,
StealPercent: stealSecs / totalSecs * 100,
GuestPercent: guestSecs / totalSecs * 100,
GuestNicePercent: guestNiceSecs / totalSecs * 100,
}
}
// DiffSince - Calculate the diff between two network stats runs
func (curr NetworkStats) DiffSince(prev NetworkStats, collectedIntervalSecs uint32) DiffedNetworkStats {
return DiffedNetworkStats{
ReceiveThroughputBytesPerSecond: (curr.ReceiveThroughputBytes - prev.ReceiveThroughputBytes) / uint64(collectedIntervalSecs),
TransmitThroughputBytesPerSecond: (curr.TransmitThroughputBytes - prev.TransmitThroughputBytes) / uint64(collectedIntervalSecs),
}
}
// DiffSince - Calculate the diff between two disk stats runs
func (curr DiskStats) DiffSince(prev DiskStats, collectedIntervalSecs uint32) DiffedDiskStats {
reads := float64(curr.ReadsCompleted - prev.ReadsCompleted)
writes := float64(curr.WritesCompleted - prev.WritesCompleted)
diffed := DiffedDiskStats{
ReadOperationsPerSecond: reads / float64(collectedIntervalSecs),
ReadsMergedPerSecond: float64(curr.ReadsMerged-prev.ReadsMerged) / float64(collectedIntervalSecs),
BytesReadPerSecond: float64(curr.BytesRead-prev.BytesRead) / float64(collectedIntervalSecs),
WriteOperationsPerSecond: writes / float64(collectedIntervalSecs),
WritesMergedPerSecond: float64(curr.WritesMerged-prev.WritesMerged) / float64(collectedIntervalSecs),
BytesWrittenPerSecond: float64(curr.BytesWritten-prev.BytesWritten) / float64(collectedIntervalSecs),
AvgQueueSize: curr.AvgQueueSize,
UtilizationPercent: 100 * float64(curr.IoTime-prev.IoTime) / float64(1000*collectedIntervalSecs),
}
if reads > 0 {
diffed.AvgReadLatency = float64(curr.ReadTimeMs-prev.ReadTimeMs) / reads
}
if writes > 0 {
diffed.AvgWriteLatency = float64(curr.WriteTimeMs-prev.WriteTimeMs) / writes
}
return diffed
}
|
package main
import (
"flag"
"net/http"
"github.com/mgalela/akses"
"github.com/mgalela/akses/utils"
)
func main() {
r := hard.BootstrapAPI()
filename := flag.String("config", "config.json", "Path to configuration file")
flag.Parse()
hard.Initialize(*filename)
utils.Log.Info("Starting app server at 9015")
/*http.Handle("/", r)
if err := http.ListenAndServe(":9015", nil); err != nil {
utils.Log.Error("Starting app server failed, reason = ", err)
}*/
srv := &http.Server{Addr: ":9015", Handler: r}
srv.ListenAndServe()
}
|
package parser
import (
"errors"
)
// ParseFunction returns a string of the build instruction function.
func (p *Parser) ParseFunction() (result string, err error) {
token := p.scnr.Peak()
if !token.IsFunction() {
return result, errors.New("called ParseFunction without the beginning token being a function declaration")
}
indentLevel := p.scnr.GetIndentLevel()
for {
result += p.scnr.PeakLine() + "\n"
err = p.scnr.NextLine()
if err != nil {
return result, err
}
if p.scnr.GetIndentLevel() <= indentLevel {
result += "\n"
break
}
}
return result, nil
}
|
// 대기오염정보 조회 서비스
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
)
const API_Key = "api key"
type Result1 struct {
MangName string `json:"mangName"`
DataTime string `json:"dataTime"`
KhaiGrade string `json:"khaiGrade"` //통합지수
KhaiValue string `json:"khaiValue"`
No2Grade string `json:"no2Grade"` //이산화질소
No2Value string `json:"no2Value"`
O3Grade string `json:"o3Grade"` //오존
O3Value string `json:"o3Value"`
Pm10Grade string `json:"pm10Grade"` //미세먼지(pm10)
Pm10Value string `json:"pm10Value"`
Pm25Grade string `json:"pm25Grade"` //미세먼지(pm25)
Pm25Value string `json:"pm25Value"`
So2Grade string `json:"so2Grade"` //아황산가스
So2Value string `json:"so2Value"`
CoGrade string `json:"coGrade"` //일산화탄소
CoValue string `json:"coValue"`
}
type Response1 struct {
Results []Result1 `json:"list"`
}
type Result2 struct {
Addr string `json:"addr"`
StationName string `json:"stationName"`
}
type Response2 struct {
Results []Result2 `json:"list"`
}
type Result3 struct {
MangName string `json:"mangName"`
DataTime string `json:"dataTime"`
StationName string `json:"stationName"`
KhaiGrade string `json:"khaiGrade"` //통합지수
KhaiValue string `json:"khaiValue"`
No2Grade string `json:"no2Grade"` //이산화질소
No2Value string `json:"no2Value"`
O3Grade string `json:"o3Grade"` //오존
O3Value string `json:"o3Value"`
Pm10Grade string `json:"pm10Grade"` //미세먼지(pm10)
Pm10Value string `json:"pm10Value"`
Pm25Grade string `json:"pm25Grade"` //미세먼지(pm25)
Pm25Value string `json:"pm25Value"`
So2Grade string `json:"so2Grade"` //아황산가스
So2Value string `json:"so2Value"`
CoGrade string `json:"coGrade"` //일산화탄소
CoValue string `json:"coValue"`
}
type Response3 struct {
Results []Result3 `json:"list"`
}
type Result4 struct {
DataTime string `json:"dataTime"`
ImageUrl1 string `json:"imageUrl1"`
ImageUrl2 string `json:"imageUrl2"`
ImageUrl3 string `json:"imageUrl3"`
ImageUrl4 string `json:"imageUrl4"`
ImageUrl5 string `json:"imageUrl5"`
ImageUrl6 string `json:"imageUrl6"`
ImageUrl7 string `json:"imageUrl7"`
ImageUrl8 string `json:"imageUrl8"`
ImageUrl9 string `json:"imageUrl9"`
InformGrade string `json:"informGrade"`
InformOverall string `json:"informOverall"`
}
type Response4 struct {
Results []Result4 `json:"list"`
}
//측정소별 실시간 측정정보 조회
func getMsrstnAcctoRltmMesureDnsty(sName string) bool {
defaulturl := "http://openapi.airkorea.or.kr/openapi/services/rest/ArpltnInforInqireSvc/getMsrstnAcctoRltmMesureDnsty?"
params := "dataTerm=daily&pageNo=1&numOfRows=1&ver=1.3&_returnType=json"
stationName := fmt.Sprintf("&stationName=%s", sName)
ServiceKey := fmt.Sprintf("&ServiceKey=%s", API_Key)
sUrl := fmt.Sprintf("%s%s%s%s", defaulturl, params, stationName, ServiceKey)
res, err := http.Get(sUrl)
if err != nil {
log.Println(err)
return false
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
log.Println(err)
return false
}
var result Response1
err = json.Unmarshal(body, &result)
if err != nil {
log.Println(err)
return false
}
for _, val := range result.Results {
fmt.Println("MangName:"+val.MangName, "DataTime:"+val.DataTime)
fmt.Println("통합지수:" + val.KhaiValue)
fmt.Println("이산화질소:" + val.No2Value)
fmt.Println("오존:" + val.O3Value)
fmt.Println("아황산가스:" + val.So2Value)
fmt.Println("일산화탄소:" + val.CoValue)
fmt.Println("미세먼지(pm10):" + val.Pm10Value)
fmt.Println("미세먼지(pm25):" + val.Pm25Value)
}
return true
}
//통합대기환경지수 나쁨 이상 측정소 목록조회
func getUnityAirEnvrnIdexSnstiveAboveMsrstnList() bool {
defaulturl := "http://openapi.airkorea.or.kr/openapi/services/rest/ArpltnInforInqireSvc/getUnityAirEnvrnIdexSnstiveAboveMsrstnList?"
params := "ver=1.3&_returnType=json"
ServiceKey := fmt.Sprintf("&ServiceKey=%s", API_Key)
sUrl := fmt.Sprintf("%s%s%s", defaulturl, params, ServiceKey)
res, err := http.Get(sUrl)
if err != nil {
log.Println(err)
return false
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
log.Println(err)
return false
}
var result Response2
err = json.Unmarshal(body, &result)
if err != nil {
log.Println(err)
return false
}
for _, val := range result.Results {
//fmt.Println("StationName:"+val.StationName, "Addr:"+val.Addr)
getMsrstnAcctoRltmMesureDnsty(val.StationName)
}
return true
}
//시도별 실시간 측정정보 조회
func getCtprvnRltmMesureDnsty(sName string) (bool, Response3) {
var result Response3
defaulturl := "http://openapi.airkorea.or.kr/openapi/services/rest/ArpltnInforInqireSvc/getCtprvnRltmMesureDnsty?"
params := "pageNo=1&numOfRows=10&ver=1.3&_returnType=json"
sidoName := fmt.Sprintf("&sidoName=%s", sName)
ServiceKey := fmt.Sprintf("&ServiceKey=%s", API_Key)
sUrl := fmt.Sprintf("%s%s%s%s", defaulturl, params, sidoName, ServiceKey)
res, err := http.Get(sUrl)
if err != nil {
log.Println(err)
return false, result
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
log.Println(err)
return false, result
}
err = json.Unmarshal(body, &result)
if err != nil {
log.Println(err)
return false, result
}
return true, result
}
//대기질 예보통보 조회
func getMinuDustFrcstDspth(sData string) (bool, Response4) {
var result Response4
defaulturl := "http://openapi.airkorea.or.kr/openapi/services/rest/ArpltnInforInqireSvc/getMinuDustFrcstDspth?"
params := "ver=1.3&_returnType=json"
searchDate := fmt.Sprintf("&searchDate=%s", sData)
ServiceKey := fmt.Sprintf("&ServiceKey=%s", API_Key)
sUrl := fmt.Sprintf("%s%s%s%s", defaulturl, params, searchDate, ServiceKey)
res, err := http.Get(sUrl)
if err != nil {
log.Println(err)
return false, result
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
log.Println(err)
return false, result
}
err = json.Unmarshal(body, &result)
if err != nil {
log.Println(err)
return false, result
}
return true, result
}
func main() {
bVal, rResult4 := getMinuDustFrcstDspth("2017-06-26")
if bVal {
for _, val := range rResult4.Results {
fmt.Println("DataTime:" + val.DataTime)
fmt.Println("InformGrade:" + val.InformGrade)
fmt.Println("InformOverall:" + val.InformOverall)
fmt.Println("ImageUrl1:" + val.ImageUrl1)
fmt.Println("ImageUrl2:" + val.ImageUrl2)
fmt.Println("ImageUrl3:" + val.ImageUrl3)
fmt.Println("ImageUrl4:" + val.ImageUrl4)
fmt.Println("ImageUrl5:" + val.ImageUrl5)
fmt.Println("ImageUrl6:" + val.ImageUrl6)
fmt.Println("ImageUrl7:" + val.ImageUrl7)
fmt.Println("ImageUrl8:" + val.ImageUrl8)
fmt.Println("ImageUrl9:" + val.ImageUrl9)
fmt.Println()
}
}
bVal, rResult3 := getCtprvnRltmMesureDnsty("서울")
if bVal {
for _, val := range rResult3.Results {
fmt.Println("StationName:" + val.StationName)
fmt.Println("MangName:"+val.MangName, "DataTime:"+val.DataTime)
fmt.Println("통합지수:" + val.KhaiValue)
fmt.Println("이산화질소:" + val.No2Value)
fmt.Println("오존:" + val.O3Value)
fmt.Println("아황산가스:" + val.So2Value)
fmt.Println("일산화탄소:" + val.CoValue)
fmt.Println("미세먼지(pm10):" + val.Pm10Value)
fmt.Println("미세먼지(pm25):" + val.Pm25Value)
fmt.Println()
}
}
getUnityAirEnvrnIdexSnstiveAboveMsrstnList()
}
|
package hard
import "sort"
//given a list of chars (as a string), and target word length, output all possible word combination
func PickWords(charList string, wordLen int) []string {
subsets := Subset(charList, wordLen)
words := []string{}
for _, subset := range subsets {
words = append(words, Permutation(subset)...)
}
return words
}
//given a list of chars (as a string), output all unique subset of chars (as string)
func Subset(list string, subsetSize int) []string {
chars := []rune(list)
sort.Slice(chars, func(i, j int) bool {
return chars[i] < chars[j]
})
if subsetSize >= len(list) {
return []string{list}
}
result := []string{}
subsetRecur(chars, 0, subsetSize, []rune{}, &result)
return result
}
func subsetRecur(list []rune, currIndex, targetLen int, tmp []rune, result *[]string) {
if len(tmp) == targetLen {
*result = append(*result, string(tmp))
return
}
for i:= currIndex; i < len(list); i++ {
if i != currIndex && list[i] == list[i-1] {
continue
}
tmp = append(tmp, list[i])
subsetRecur(list, i+1, targetLen, tmp, result)
tmp = tmp[:len(tmp)-1]
}
}
//given a list of chars (as a string), output all unique permutation of the chars
func Permutation(str string) []string {
chars := []rune(str)
sort.Slice(chars, func(i, j int) bool {
return chars[i] < chars[j]
})
result := []string{}
permutationRecur(chars, 0, len(chars) - 1, &result)
return result
}
func permutationRecur(list []rune, start, end int, result *[]string) {
if start == end {
*result = append(*result, string(list))
return
}
for i:= start; i <= end; i++ {
if i != start && list[i] == list[i-1] {
continue
}
list[start], list[i] = list[i], list[start]
permutationRecur(list, start+1, end, result)
list[start], list[i] = list[i], list[start]
}
}
|
package template
type TemplateUsecase interface {
}
|
package main
import (
_ "github.com/mattn/go-sqlite3"
)
func main() {
data := csvToArray()
chart := makeChart(data)
saveFile(chart)
}
|
package requests
type EditGuestList struct {
Table int64 `json:"table"`
AccompanyingGuests int64 `json:"accompanying_guests"`
}
|
package container
import (
"fmt"
"math/rand"
"os"
"time"
gormProm "gorm.io/plugin/prometheus"
"go.uber.org/zap"
"github.com/lenvendo/ig-absolut-fake-sms/service"
"github.com/lenvendo/ig-absolut-fake-sms/lib/config"
"github.com/lenvendo/ig-absolut-fake-sms/lib/db"
"github.com/lenvendo/ig-absolut-fake-sms/lib/log"
learn "github.com/lenvendo/ig-absolut-fake-sms/web"
)
func (c *Container) Init() *Container {
println("Container building ...")
rand.Seed(time.Now().Unix()) // initialize global pseudo random generator
c.Config = config.InitConfig()
cfg := c.Config
logger := c.initLogger(cfg)
c.initGorm(cfg)
c.WorkerService = service.NewWorkerService(c.Gorm, c.Logger)
c.CodesHandler = learn.NewCodesHandler(c.Gorm, c.Logger)
logger.Info("Container is ready")
return c
}
func (c *Container) initLogger(cfg config.Config) *zap.Logger {
logger, err := log.NewLoggerFromConfig(cfg.Logger, cfg.IsDebug)
if err != nil {
fmt.Printf("unable to initialize logger: %v\n", err)
os.Exit(1)
}
c.Logger = logger
return c.Logger
}
func (c *Container) initGorm(cfg config.Config) {
gormDB, err := db.NewGorm(*cfg.Database, cfg.IsDebug)
if err != nil {
c.Logger.Fatal("gorm init failed", zap.Error(err))
}
err = gormDB.Use(gormProm.New(gormProm.Config{
DBName: cfg.Database.DatabaseName,
RefreshInterval: 5,
}))
if err != nil {
c.Logger.Fatal("gorm init failed", zap.Error(err))
}
c.Gorm = gormDB
}
|
//常數練習
package main
import "fmt"
const (
a int = 2017
b int = 2018
c int = 2019
d int = 2020
)
func main() {
fmt.Println(a)
fmt.Println(b)
}
|
// Copyright 2017 The go-interpreter Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package wasm
import (
"bytes"
"debug/dwarf"
"errors"
"fmt"
"io"
"reflect"
"strings"
"github.com/pgavlin/warp/wasm/internal/readpos"
)
var ErrInvalidMagic = errors.New("magic header not detected")
const (
Magic uint32 = 0x6d736100
Version uint32 = 0x1
)
// Function represents an entry in the function index space of a module.
type Function struct {
Sig *FunctionSig
Body *FunctionBody
Host reflect.Value
Name string
}
// IsHost indicates whether this function is a host function as defined in:
// https://webassembly.github.io/spec/core/exec/modules.html#host-functions
func (fct *Function) IsHost() bool {
return fct.Host != reflect.Value{}
}
// Module represents a parsed WebAssembly module:
// http://webassembly.org/docs/modules/
type Module struct {
Version uint32
Sections []Section
Types *SectionTypes
Import *SectionImports
Function *SectionFunctions
Table *SectionTables
Memory *SectionMemories
Global *SectionGlobals
Export *SectionExports
Start *SectionStartFunction
Elements *SectionElements
Code *SectionCode
Data *SectionData
Customs []*SectionCustom
}
// TableEntry represents a table index and tracks its initialized state.
type TableEntry struct {
Index uint32
Initialized bool
}
// Names returns the names section. If no names section exists, this function returns a MissingSectionError.
func (m *Module) Names() (*NameSection, error) {
s := m.Custom(CustomSectionName)
if s == nil {
return nil, MissingSectionError(0)
}
var names NameSection
if err := names.UnmarshalWASM(bytes.NewReader(s.Data)); err != nil {
return nil, err
}
return &names, nil
}
// DWARF returns the DWARF debugging info for the module, if any.
func (m *Module) DWARF() (*dwarf.Data, error) {
dwarfSuffix := func(s *SectionCustom) string {
switch {
case strings.HasPrefix(s.Name, ".debug_"):
return s.Name[7:]
default:
return ""
}
}
// There are many DWARF sections, but these are the ones
// the debug/dwarf package started with.
var dat = map[string][]byte{"abbrev": nil, "info": nil, "str": nil, "line": nil, "ranges": nil}
for _, s := range m.Customs {
suffix := dwarfSuffix(s)
if suffix == "" {
continue
}
if _, ok := dat[suffix]; !ok {
continue
}
dat[suffix] = s.Data
}
d, err := dwarf.New(dat["abbrev"], nil, nil, dat["info"], dat["line"], nil, dat["ranges"], dat["str"])
if err != nil {
return nil, err
}
// Look for DWARF4 .debug_types sections and DWARF5 sections.
for i, s := range m.Customs {
suffix := dwarfSuffix(s)
if suffix == "" {
continue
}
if _, ok := dat[suffix]; ok {
// Already handled.
continue
}
if suffix == "types" {
if err := d.AddTypes(fmt.Sprintf("types-%d", i), s.Data); err != nil {
return nil, err
}
} else {
if err := d.AddSection(".debug_"+suffix, s.Data); err != nil {
return nil, err
}
}
}
return d, nil
}
// Custom returns a custom section with a specific name, if it exists.
func (m *Module) Custom(name string) *SectionCustom {
for _, s := range m.Customs {
if s.Name == name {
return s
}
}
return nil
}
// NewModule creates a new empty module
func NewModule() *Module {
return &Module{
Types: &SectionTypes{},
Import: &SectionImports{},
Table: &SectionTables{},
Memory: &SectionMemories{},
Global: &SectionGlobals{},
Export: &SectionExports{},
Start: &SectionStartFunction{},
Elements: &SectionElements{},
Data: &SectionData{},
}
}
// ResolveFunc is a function that takes a module name and
// returns a valid resolved module.
type ResolveFunc func(name string) (*Module, error)
// DecodeModule decodes a WASM module.
func DecodeModule(r io.Reader) (*Module, error) {
reader := &readpos.ReadPos{
R: r,
CurPos: 0,
}
m := &Module{}
magic, err := readU32(reader)
if err != nil {
return nil, err
}
if magic != Magic {
return nil, ErrInvalidMagic
}
if m.Version, err = readU32(reader); err != nil {
return nil, err
}
if m.Version != Version {
return nil, errors.New("unknown binary version")
}
err = newSectionsReader(m).readSections(reader)
if err != nil {
return nil, err
}
return m, nil
}
// MustDecode decodes a WASM module and panics on failure.
func MustDecode(r io.Reader) *Module {
m, err := DecodeModule(r)
if err != nil {
panic(fmt.Errorf("decoding module: %w", err))
}
return m
}
|
package agent
import (
"os"
"path"
"strconv"
"strings"
"github.com/sirupsen/logrus"
"github.com/rancher/fleet/internal/config"
"github.com/rancher/wrangler/pkg/name"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
networkv1 "k8s.io/api/networking/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
var (
DebugLevel = 0
)
const (
DefaultName = "fleet-agent"
)
type ManifestOptions struct {
AgentEnvVars []corev1.EnvVar
AgentImage string // DefaultAgentImage = "rancher/fleet-agent" + ":" + version.Version
AgentImagePullPolicy string
AgentTolerations []corev1.Toleration
CheckinInterval string
PrivateRepoURL string // PrivateRepoURL = registry.yourdomain.com:5000
SystemDefaultRegistry string
AgentAffinity *corev1.Affinity
AgentResources *corev1.ResourceRequirements
}
// Manifest builds and returns a deployment manifest for the fleet-agent with a
// cluster role, two service accounts and a network policy
//
// It allows the downstream agent to create any resource on its cluster.
//
// This is called by both, import and manageagent.
func Manifest(namespace string, agentScope string, opts ManifestOptions) []runtime.Object {
if opts.AgentImage == "" {
opts.AgentImage = config.DefaultAgentImage
}
admin := serviceAccount(namespace, DefaultName)
logrus.Debugf("Building manifest for fleet-agent in namespace %s (sa: %s)", namespace, admin.Name)
defaultSa := serviceAccount(namespace, "default")
defaultSa.AutomountServiceAccountToken = new(bool)
clusterRole := []runtime.Object{
&rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: name.SafeConcatName(admin.Namespace, admin.Name, "role"),
},
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{rbacv1.VerbAll},
APIGroups: []string{rbacv1.APIGroupAll},
Resources: []string{rbacv1.ResourceAll},
},
},
},
&rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: name.SafeConcatName(admin.Namespace, admin.Name, "role", "binding"),
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: admin.Name,
Namespace: admin.Namespace,
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: "ClusterRole",
Name: name.SafeConcatName(admin.Namespace, admin.Name, "role"),
},
},
}
// if debug is enabled in controller, enable in agents too (unless otherwise specified)
propagateDebug, _ := strconv.ParseBool(os.Getenv("FLEET_PROPAGATE_DEBUG_SETTINGS_TO_AGENTS"))
debug := logrus.IsLevelEnabled(logrus.DebugLevel) && propagateDebug
deployment := agentDeployment(namespace, agentScope, opts, debug)
networkPolicy := &networkv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "default-allow-all",
Namespace: namespace,
},
Spec: networkv1.NetworkPolicySpec{
PolicyTypes: []networkv1.PolicyType{
networkv1.PolicyTypeIngress,
networkv1.PolicyTypeEgress,
},
Ingress: []networkv1.NetworkPolicyIngressRule{
{},
},
Egress: []networkv1.NetworkPolicyEgressRule{
{},
},
PodSelector: metav1.LabelSelector{},
},
}
var objs []runtime.Object
objs = append(objs, clusterRole...)
objs = append(objs, admin, defaultSa, deployment, networkPolicy)
return objs
}
func resolve(global, prefix, image string) string {
if global != "" && prefix != "" {
image = strings.TrimPrefix(image, global)
}
if prefix != "" && !strings.HasPrefix(image, prefix) {
return path.Join(prefix, image)
}
return image
}
func agentDeployment(namespace string, agentScope string, opts ManifestOptions, debug bool) *appsv1.Deployment {
name := DefaultName
serviceAccount := DefaultName
image := resolve(opts.SystemDefaultRegistry, opts.PrivateRepoURL, opts.AgentImage)
dep := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
},
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": name,
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": name,
},
},
Spec: corev1.PodSpec{
ServiceAccountName: serviceAccount,
Containers: []corev1.Container{
{
Name: name,
Image: image,
ImagePullPolicy: corev1.PullPolicy(opts.AgentImagePullPolicy),
Env: []corev1.EnvVar{
{
Name: "NAMESPACE",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.namespace",
},
},
},
{Name: "AGENT_SCOPE", Value: agentScope},
{Name: "CHECKIN_INTERVAL", Value: opts.CheckinInterval},
},
},
},
NodeSelector: map[string]string{"kubernetes.io/os": "linux"},
Affinity: &corev1.Affinity{
NodeAffinity: &corev1.NodeAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []corev1.PreferredSchedulingTerm{
{
Weight: 1,
Preference: corev1.NodeSelectorTerm{
MatchExpressions: []corev1.NodeSelectorRequirement{
{
Key: "fleet.cattle.io/agent",
Operator: corev1.NodeSelectorOpIn,
Values: []string{"true"},
},
},
},
},
},
},
},
Tolerations: []corev1.Toleration{
{
Key: "node.cloudprovider.kubernetes.io/uninitialized",
Operator: corev1.TolerationOpEqual,
Value: "true",
Effect: corev1.TaintEffectNoSchedule,
},
{
Key: "cattle.io/os",
Operator: corev1.TolerationOpEqual,
Value: "linux",
Effect: corev1.TaintEffectNoSchedule,
},
},
},
},
},
}
if !debug {
for _, container := range dep.Spec.Template.Spec.Containers {
container.SecurityContext = &corev1.SecurityContext{
AllowPrivilegeEscalation: &[]bool{false}[0],
ReadOnlyRootFilesystem: &[]bool{true}[0],
Privileged: &[]bool{false}[0],
Capabilities: &corev1.Capabilities{Drop: []corev1.Capability{"ALL"}},
}
}
dep.Spec.Template.Spec.SecurityContext = &corev1.PodSecurityContext{
RunAsNonRoot: &[]bool{true}[0],
RunAsUser: &[]int64{1000}[0],
RunAsGroup: &[]int64{1000}[0],
}
}
// additional tolerations from cluster
dep.Spec.Template.Spec.Tolerations = append(dep.Spec.Template.Spec.Tolerations, opts.AgentTolerations...)
// overwrite affinity if present on cluster
if opts.AgentAffinity != nil {
dep.Spec.Template.Spec.Affinity = opts.AgentAffinity
}
// set resources if present on cluster
if opts.AgentResources != nil {
dep.Spec.Template.Spec.Containers[0].Resources = *opts.AgentResources
}
// additional env vars from cluster
if opts.AgentEnvVars != nil {
dep.Spec.Template.Spec.Containers[0].Env = append(dep.Spec.Template.Spec.Containers[0].Env, opts.AgentEnvVars...)
}
if debug {
dep.Spec.Template.Spec.Containers[0].Command = []string{
"fleetagent",
"--debug",
"--debug-level",
strconv.Itoa(DebugLevel),
}
}
return dep
}
func serviceAccount(namespace, name string) *corev1.ServiceAccount {
return &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
}
}
|
package main
import (
"github.com/vicanso/elton"
staticServe "github.com/vicanso/elton-static-serve"
)
func main() {
e := elton.New()
sf := new(staticServe.FS)
// static file route
e.GET("/*file", staticServe.New(sf, staticServe.Config{
Path: "/tmp",
// 客户端缓存一年
MaxAge: 365 * 24 * 3600,
// 缓存服务器缓存一个小时
SMaxAge: 60 * 60,
DenyQueryString: true,
DisableLastModified: true,
// packr不支持Stat,因此需要用强ETag
EnableStrongETag: true,
}))
err := e.ListenAndServe(":3000")
if err != nil {
panic(err)
}
}
|
// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package arc
import (
"github.com/shirou/gopsutil/v3/process"
"chromiumos/tast/errors"
"chromiumos/tast/local/sysutil"
)
var errInitNotFound = errors.New("didn't find init process")
// getUserPath returns the user and the path to the entry point of ARC
func getUserPath() (user, path string, err error) {
vm, err := VMEnabled()
if err != nil {
return "", "", errors.Wrap(err, "failed to determine if ARCVM is enabled")
}
if vm {
return "crosvm", "/usr/bin/crosvm", nil
}
return "android-root", "/init", nil
}
// InitPID returns the PID (outside the guest) of the ARC init process.
// It returns an error in case process is not found.
func InitPID() (int32, error) {
u, initPath, err := getUserPath()
if err != nil {
return -1, err
}
uid, err := sysutil.GetUID(u)
if err != nil {
return -1, err
}
procs, err := process.Processes()
if err != nil {
return -1, errors.Wrap(err, "failed to list processes")
}
for _, p := range procs {
if uids, err := p.Uids(); err == nil && uint32(uids[0]) == uid {
if exe, err := p.Exe(); err == nil && exe == initPath {
return p.Pid, nil
}
}
}
return -1, errInitNotFound
}
// InitExists returns true in case ARC init process exists.
func InitExists() (bool, error) {
_, err := InitPID()
if err != nil {
if errors.Is(err, errInitNotFound) {
return false, nil
}
return false, err
}
return true, nil
}
|
package com
import (
"JsGo/JsHttp"
. "JsGo/JsLogger"
"JsGo/JsStore/JsRedis"
"JunSie/constant"
)
func InitShow() {
JsHttp.WhiteHttps("/getshowartone", GetShowArtOne) //获取首页产品板块
}
type Showone struct {
Title string //标题
UserHead string //用户头像
SubTitle string //副标题
Brief string //简介
Question string //问题
Showpic string //展示使用图
Timestamp int64 //时间
Type string //类型
IDs []string //ID
}
func GetShowArtOne(s *JsHttp.Session) {
data := make([]Showone, 0)
e := JsRedis.Redis_get(constant.SHOWARTONE, &data)
if e != nil {
Error(e.Error())
s.Forward("2", e.Error(), "")
return
}
s.Forward("0", "sucess", data)
}
|
package docker
import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"os"
"strings"
"time"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/volume"
"github.com/docker/docker/client"
"github.com/moby/moby/pkg/jsonmessage"
"github.com/moby/term"
)
// Agent is a Docker client for performing operations that interact
// with the Docker engine over REST
type Agent struct {
authGetter *AuthGetter
client *client.Client
ctx context.Context
label string
}
// CreateLocalVolumeIfNotExist creates a volume using driver type "local" with the
// given name if it does not exist. If the volume does exist but does not contain
// the required label (a.label), an error is thrown.
func (a *Agent) CreateLocalVolumeIfNotExist(name string) (*types.Volume, error) {
volListBody, err := a.client.VolumeList(a.ctx, filters.Args{})
if err != nil {
return nil, a.handleDockerClientErr(err, "Could not list volumes")
}
for _, _vol := range volListBody.Volumes {
if contains, ok := _vol.Labels[a.label]; ok && contains == "true" && _vol.Name == name {
return _vol, nil
} else if !ok && _vol.Name == name {
return nil, fmt.Errorf("volume conflict for %s: please remove existing volume and try again", name)
}
}
return a.CreateLocalVolume(name)
}
// CreateLocalVolume creates a volume using driver type "local" with no
// configured options. The equivalent of:
//
// docker volume create --driver local [name]
func (a *Agent) CreateLocalVolume(name string) (*types.Volume, error) {
labels := make(map[string]string)
labels[a.label] = "true"
opts := volume.VolumeCreateBody{
Name: name,
Driver: "local",
Labels: labels,
}
vol, err := a.client.VolumeCreate(a.ctx, opts)
if err != nil {
return nil, a.handleDockerClientErr(err, "Could not create volume "+name)
}
return &vol, nil
}
// RemoveLocalVolume removes a volume by name
func (a *Agent) RemoveLocalVolume(name string) error {
return a.client.VolumeRemove(a.ctx, name, true)
}
// CreateBridgeNetworkIfNotExist creates a volume using driver type "local" with the
// given name if it does not exist. If the volume does exist but does not contain
// the required label (a.label), an error is thrown.
func (a *Agent) CreateBridgeNetworkIfNotExist(name string) (id string, err error) {
networks, err := a.client.NetworkList(a.ctx, types.NetworkListOptions{})
if err != nil {
return "", a.handleDockerClientErr(err, "Could not list volumes")
}
for _, net := range networks {
if contains, ok := net.Labels[a.label]; ok && contains == "true" && net.Name == name {
return net.ID, nil
} else if !ok && net.Name == name {
return "", fmt.Errorf("network conflict for %s: please remove existing network and try again", name)
}
}
return a.CreateBridgeNetwork(name)
}
// CreateBridgeNetwork creates a volume using the default driver type (bridge)
// with the CLI label attached
func (a *Agent) CreateBridgeNetwork(name string) (id string, err error) {
labels := make(map[string]string)
labels[a.label] = "true"
opts := types.NetworkCreate{
Labels: labels,
Attachable: true,
}
net, err := a.client.NetworkCreate(a.ctx, name, opts)
if err != nil {
return "", a.handleDockerClientErr(err, "Could not create network "+name)
}
return net.ID, nil
}
// ConnectContainerToNetwork attaches a container to a specified network
func (a *Agent) ConnectContainerToNetwork(networkID, containerID, containerName string) error {
// check if the container is connected already
net, err := a.client.NetworkInspect(a.ctx, networkID, types.NetworkInspectOptions{})
if err != nil {
return a.handleDockerClientErr(err, "Could not inspect network"+networkID)
}
for _, cont := range net.Containers {
// if container is connected, just return
if cont.Name == containerName {
return nil
}
}
return a.client.NetworkConnect(a.ctx, networkID, containerID, &network.EndpointSettings{})
}
func (a *Agent) TagImage(old, new string) error {
return a.client.ImageTag(a.ctx, old, new)
}
// PullImageEvent represents a response from the Docker API with an image pull event
type PullImageEvent struct {
Status string `json:"status"`
Error string `json:"error"`
Progress string `json:"progress"`
ProgressDetail struct {
Current int `json:"current"`
Total int `json:"total"`
} `json:"progressDetail"`
}
var PullImageErrNotFound = fmt.Errorf("Requested image not found")
var PullImageErrUnauthorized = fmt.Errorf("Could not pull image: unauthorized")
// PullImage pulls an image specified by the image string
func (a *Agent) PullImage(image string) error {
opts, err := a.getPullOptions(image)
if err != nil {
return err
}
// pull the specified image
out, err := a.client.ImagePull(a.ctx, image, opts)
if err != nil {
if client.IsErrNotFound(err) {
return PullImageErrNotFound
} else if client.IsErrUnauthorized(err) {
return PullImageErrUnauthorized
} else {
return a.handleDockerClientErr(err, "Could not pull image "+image)
}
}
defer out.Close()
termFd, isTerm := term.GetFdInfo(os.Stderr)
return jsonmessage.DisplayJSONMessagesStream(out, os.Stderr, termFd, isTerm, nil)
}
// PushImage pushes an image specified by the image string
func (a *Agent) PushImage(image string) error {
opts, err := a.getPushOptions(image)
if err != nil {
return err
}
out, err := a.client.ImagePush(
context.Background(),
image,
opts,
)
if out != nil {
defer out.Close()
}
if err != nil {
return err
}
termFd, isTerm := term.GetFdInfo(os.Stderr)
return jsonmessage.DisplayJSONMessagesStream(out, os.Stderr, termFd, isTerm, nil)
}
func (a *Agent) getPullOptions(image string) (types.ImagePullOptions, error) {
// check if agent has an auth getter; otherwise, assume public usage
if a.authGetter == nil {
return types.ImagePullOptions{}, nil
}
// get using server url
serverURL, err := GetServerURLFromTag(image)
if err != nil {
return types.ImagePullOptions{}, err
}
user, secret, err := a.authGetter.GetCredentials(serverURL)
if err != nil {
return types.ImagePullOptions{}, err
}
var authConfig = types.AuthConfig{
Username: user,
Password: secret,
ServerAddress: "https://" + serverURL,
}
authConfigBytes, _ := json.Marshal(authConfig)
authConfigEncoded := base64.URLEncoding.EncodeToString(authConfigBytes)
return types.ImagePullOptions{
RegistryAuth: authConfigEncoded,
}, nil
}
func (a *Agent) getPushOptions(image string) (types.ImagePushOptions, error) {
pullOpts, err := a.getPullOptions(image)
return types.ImagePushOptions(pullOpts), err
}
func GetServerURLFromTag(image string) (string, error) {
named, err := reference.ParseNamed(image)
if err != nil {
return "", err
}
domain := reference.Domain(named)
// if domain name is empty, use index.docker.io/v1
if domain == "" {
return "index.docker.io/v1", nil
}
return domain, nil
// else if matches := ecrPattern.FindStringSubmatch(image); matches >= 3 {
// // if this matches ECR, just use the domain name
// return domain, nil
// } else if strings.Contains(image, "gcr.io") || strings.Contains(image, "registry.digitalocean.com") {
// // if this matches GCR or DOCR, use the first path component
// return fmt.Sprintf("%s/%s", domain, strings.Split(path, "/")[0]), nil
// }
// // otherwise, best-guess is to get components of path that aren't the image name
// pathParts := strings.Split(path, "/")
// nonImagePath := ""
// if len(pathParts) > 1 {
// nonImagePath = strings.Join(pathParts[0:len(pathParts)-1], "/")
// }
// if err != nil {
// return "", err
// }
// return fmt.Sprintf("%s/%s", domain, nonImagePath), nil
}
// func imagePush(dockerClient *client.Client) error {
// ctx, cancel := context.WithTimeout(context.Background(), time.Second*120)
// defer cancel()
// authConfigBytes, _ := json.Marshal(authConfig)
// authConfigEncoded := base64.URLEncoding.EncodeToString(authConfigBytes)
// tag := dockerRegistryUserID + "/node-hello"
// opts := types.ImagePushOptions{RegistryAuth: authConfigEncoded}
// rd, err := dockerClient.ImagePush(ctx, tag, opts)
// if err != nil {
// return err
// }
// defer rd.Close()
// err = print(rd)
// if err != nil {
// return err
// }
// return nil
// }
// WaitForContainerStop waits until a container has stopped to exit
func (a *Agent) WaitForContainerStop(id string) error {
// wait for container to stop before exit
statusCh, errCh := a.client.ContainerWait(a.ctx, id, container.WaitConditionNotRunning)
select {
case err := <-errCh:
if err != nil {
return a.handleDockerClientErr(err, "Error waiting for stopped container")
}
case <-statusCh:
}
return nil
}
// WaitForContainerHealthy waits until a container is returning a healthy status. Streak
// is the maximum number of failures in a row, while timeout is the length of time between
// checks.
func (a *Agent) WaitForContainerHealthy(id string, streak int) error {
for {
cont, err := a.client.ContainerInspect(a.ctx, id)
if err != nil {
return a.handleDockerClientErr(err, "Error waiting for stopped container")
}
health := cont.State.Health
if health == nil || health.Status == "healthy" {
return nil
} else if health.FailingStreak >= streak {
break
}
time.Sleep(time.Second)
}
return errors.New("container not healthy")
}
// ------------------------- AGENT HELPER FUNCTIONS ------------------------- //
func (a *Agent) handleDockerClientErr(err error, errPrefix string) error {
if strings.Contains(err.Error(), "Cannot connect to the Docker daemon") {
return fmt.Errorf("The Docker daemon must be running in order to start Porter: connection to %s failed", a.client.DaemonHost())
}
return fmt.Errorf("%s:%s", errPrefix, err.Error())
}
|
package sdf
import (
"fmt"
"image"
"github.com/macroblock/sdf/pkg/gfx"
)
type (
// TileBuilder -
TileBuilder struct {
prefix string
counter uint
}
// TileTemplateBuilder -
TileTemplateBuilder struct {
params []tileTemplateType
}
tileTemplateType struct {
offs int
extend *image.Rectangle
flip gfx.FlipMode
}
)
// BuildTileSet -
func BuildTileSet(prefix string, counter uint) *TileBuilder {
if prefix == "" {
prefix = "/"
}
return &TileBuilder{prefix, counter}
}
// Tile -
func (o *TileBuilder) Tile(offs int, extend *image.Rectangle, flip gfx.FlipMode) *TileBuilder {
if offs < 0 {
setError(fmt.Errorf("negative tile offset"))
return o
}
name := o.genName()
CreateTile(name, -(offs + 1), 0, extend, flip)
return o
}
// BuildTileTemplate -
func BuildTileTemplate() *TileTemplateBuilder {
return &TileTemplateBuilder{}
}
// Tile -
func (o *TileTemplateBuilder) Tile(offs int, extend *image.Rectangle, flip gfx.FlipMode) *TileTemplateBuilder {
if extend != nil {
ext := *extend
extend = &ext
}
o.params = append(o.params, tileTemplateType{offs: offs, extend: extend, flip: flip})
return o
}
// Build -
func (o *TileTemplateBuilder) Build(prefix string, baseOffs int, flip gfx.FlipMode) *TileBuilder {
builder := BuildTileSet(prefix, 0)
for i := range o.params {
params := &o.params[i]
builder.Tile(params.offs+baseOffs, params.extend, params.flip^flip)
}
return builder
}
func (o *TileBuilder) genName() string {
ret := joinPaths(o.prefix, fmt.Sprintf("%03d", o.counter))
ret = AbsTilePath(ret)
o.counter++
return ret
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.