text
stringlengths 11
4.05M
|
|---|
// Copyright 2018 Aleksandr Demakin. All rights reserved.
package printer
import (
"fmt"
"io"
"sync"
"github.com/avdva/unravel/card"
)
type cardData struct {
WebsiteUrl string
SessionId string
ResizeFrom card.Dimension
ResizeTo card.Dimension
CopyAndPaste map[string]bool
FormCompletionTime int
}
// Logger writes logs.
type Logger interface {
Printf(format string, args ...interface{})
}
// StringHasher returns hash of a string.
type StringHasher func(s string) []byte
// PrintHandler processes card events and prints them to 'out'.
type PrintHandler struct {
out io.Writer
hasher StringHasher
l Logger
m sync.RWMutex
data map[string]cardData
}
// New returns new PrintHandler.
func New(out io.Writer, hasher StringHasher, l Logger) *PrintHandler {
return &PrintHandler{
out: out,
hasher: hasher,
l: l,
data: make(map[string]cardData),
}
}
// OnResize is a window resize callback.
func (ph *PrintHandler) OnResize(h card.EventHeader, from, to card.Dimension) {
data := ph.doResize(h, from, to)
ph.doPrint(fmt.Sprintf("%+v\n", data))
}
func (ph *PrintHandler) doResize(h card.EventHeader, from, to card.Dimension) cardData {
ph.m.Lock()
defer ph.m.Unlock()
data := ph.ensureData(h)
data.ResizeFrom = from
data.ResizeTo = to
ph.writeData(data)
return copyData(&data)
}
// OnCopyPaste is a copt/paste event callback.
func (ph *PrintHandler) OnCopyPaste(h card.EventHeader, form string, pasted bool) {
data := ph.doCopyPaste(h, form, pasted)
ph.doPrint(fmt.Sprintf("%+v\n", data))
}
func (ph *PrintHandler) doCopyPaste(h card.EventHeader, form string, pasted bool) cardData {
ph.m.Lock()
defer ph.m.Unlock()
data := ph.ensureData(h)
data.CopyAndPaste[form] = pasted
ph.writeData(data)
return copyData(&data)
}
// OnSubmit is a submit button callback.
func (ph *PrintHandler) OnSubmit(h card.EventHeader, time int) {
data := ph.doSubmit(h, time)
ph.doPrint(fmt.Sprintf("%+v, hash = %X\n", data, ph.hasher(data.WebsiteUrl)))
}
func (ph *PrintHandler) doSubmit(h card.EventHeader, time int) cardData {
ph.m.Lock()
defer ph.m.Unlock()
data := ph.ensureData(h)
data.FormCompletionTime = time
ph.writeData(data)
return copyData(&data)
}
func (ph *PrintHandler) doPrint(s string) {
if _, err := ph.out.Write([]byte(s)); err != nil {
if ph.l != nil {
ph.l.Printf("ph: failed to write: %v", err)
}
}
}
// ensureData returns existing, or creates new cardData.
// needs ph.m to be Locked.
func (ph *PrintHandler) ensureData(h card.EventHeader) cardData {
data, found := ph.data[h.SessionID]
if !found {
data.SessionId = h.SessionID
data.WebsiteUrl = h.WebsiteURL
data.CopyAndPaste = make(map[string]bool)
}
return data
}
// writeData stores cardData.
// needs ph.m to be Locked.
func (ph *PrintHandler) writeData(data cardData) {
ph.data[data.SessionId] = data
}
// copyData copies the entire structure.
func copyData(data *cardData) cardData {
result := *data
result.CopyAndPaste = make(map[string]bool)
for formID, pasted := range data.CopyAndPaste {
result.CopyAndPaste[formID] = pasted
}
return result
}
|
package model
import (
"time"
"github.com/caos/zitadel/internal/model"
)
type OrgMemberView struct {
UserID string
OrgID string
UserName string
Email string
FirstName string
LastName string
DisplayName string
Roles []string
CreationDate time.Time
ChangeDate time.Time
Sequence uint64
}
type OrgMemberSearchRequest struct {
Offset uint64
Limit uint64
SortingColumn OrgMemberSearchKey
Asc bool
Queries []*OrgMemberSearchQuery
}
type OrgMemberSearchKey int32
const (
OrgMemberSearchKeyUnspecified OrgMemberSearchKey = iota
OrgMemberSearchKeyUserName
OrgMemberSearchKeyEmail
OrgMemberSearchKeyFirstName
OrgMemberSearchKeyLastName
OrgMemberSearchKeyOrgID
OrgMemberSearchKeyUserID
)
type OrgMemberSearchQuery struct {
Key OrgMemberSearchKey
Method model.SearchMethod
Value interface{}
}
type OrgMemberSearchResponse struct {
Offset uint64
Limit uint64
TotalResult uint64
Result []*OrgMemberView
Sequence uint64
Timestamp time.Time
}
func (r *OrgMemberSearchRequest) EnsureLimit(limit uint64) {
if r.Limit == 0 || r.Limit > limit {
r.Limit = limit
}
}
|
package main
import (
"fmt"
"net/http"
"os"
"os/exec"
"strings"
)
func main() {
p := os.Getenv("PORT")
if len(p) == 0 {
p = "8080"
}
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
path := r.URL.Path
host := r.Host
fmt.Fprintf(w, "(%s) (%s)", host, path)
})
http.HandleFunc("/exec", func(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query()
if qq,ok := q["q"]; !ok || len(qq) > 1 {
fmt.Fprintf(w, "bye")
return
} else {
qqq := strings.Split(qq[0], " ")
b, err := exec.Command(qqq[0], qqq[1:]...).Output()
if err != nil {
fmt.Fprintf(w, "%v", err)
}
fmt.Fprintf(w, "%s", b)
}
})
// 8080ポートで起動
http.ListenAndServe(":8080", nil)
}
|
package main
type response struct {
message string
index int
transactions []transaction
proof int
previousHash string
}
|
package test
import (
"reflect"
"runtime/debug"
"testing"
"github.com/juntaki/transparent"
"github.com/juntaki/transparent/simple"
)
// SimpleStorageFunc is Error message test for simple Storage
func SimpleStorageFunc(t *testing.T, storage transparent.BackendStorage) {
var err error
err = storage.Add(0, []byte("value"))
if typeErr, ok := err.(*simple.StorageInvalidKeyError); !ok {
t.Fatal(err)
} else if typeErr.Invalid != reflect.TypeOf(0) {
t.Fatal(typeErr)
}
err = storage.Add("test", 0)
if typeErr, ok := err.(*simple.StorageInvalidValueError); !ok {
t.Fatal(err)
} else if typeErr.Invalid != reflect.TypeOf(0) {
t.Fatal(typeErr)
}
_, err = storage.Get(0)
if typeErr, ok := err.(*simple.StorageInvalidKeyError); !ok {
t.Fatal(err)
} else if typeErr.Invalid != reflect.TypeOf(0) {
t.Fatal(typeErr)
}
err = storage.Remove(0)
if typeErr, ok := err.(*simple.StorageInvalidKeyError); !ok {
t.Fatal(err)
} else if typeErr.Invalid != reflect.TypeOf(0) {
t.Fatal(typeErr)
}
}
// BasicStorageFunc is Get, Add and Remove
func BasicStorageFunc(t *testing.T, storage transparent.BackendStorage) {
// Add and Get
err := storage.Add("test", []byte("value"))
if err != nil {
t.Fatal(err)
}
value, err := storage.Get("test")
if err != nil || string(value.([]byte)) != "value" {
t.Fatal(err, value)
}
// Remove and Get
storage.Remove("test")
value2, err := storage.Get("test")
storageErr, ok := err.(*transparent.KeyNotFoundError)
if ok {
if storageErr.Key != "test" {
debug.PrintStack()
t.Error("key is different", storageErr.Key)
}
} else {
t.Fatal(err, value2)
}
}
// BasicStackFunc is Get Remove and Sync
func BasicStackFunc(t *testing.T, s *transparent.Stack) {
err := s.Set("test", []byte("value"))
if err != nil {
t.Error(err)
}
value, err := s.Get("test")
if err != nil || string(value.([]byte)) != "value" {
t.Error(err)
t.Error(value)
}
err = s.Remove("test")
if err != nil {
t.Error(err)
}
value, err = s.Get("test")
if err == nil {
t.Error(err)
t.Error(value)
}
err = s.Sync()
if err != nil {
t.Error(err)
}
}
// BasicCacheFunc is test for transparent.Cache
func BasicCacheFunc(t *testing.T, c transparent.Layer) {
s := NewSource(0)
stack := transparent.NewStack()
stack.Stack(s)
stack.Stack(c)
stack.Start()
BasicStackFunc(t, stack)
stack.Stop()
}
// BasicSourceFunc is test for transparent.Source
func BasicSourceFunc(t *testing.T, s transparent.Layer) {
stack := transparent.NewStack()
stack.Stack(s)
stack.Start()
BasicStackFunc(t, stack)
stack.Stop()
}
// BasicTransmitterFunc is test for transparent.Transmitter
func BasicTransmitterFunc(t *testing.T, s transparent.Layer) {
stack := transparent.NewStack()
stack.Stack(s)
stack.Start()
BasicStackFunc(t, stack)
stack.Stop()
}
// BasicConsensusFunc is test for transparent.Consensus
func BasicConsensusFunc(t *testing.T, a1, a2 transparent.Layer) {
src1 := NewSource(0)
src2 := NewSource(0)
s1 := transparent.NewStack()
s2 := transparent.NewStack()
s1.Stack(src1)
s2.Stack(src2)
s1.Stack(a1)
s2.Stack(a2)
s1.Start()
s2.Start()
BasicStackFunc(t, s1)
BasicStackFunc(t, s2)
s1.Stop()
s2.Stop()
}
|
package parser
import (
"testing"
"zhenai-crawler/crawler/fetcher"
)
func TestParseProfile(t *testing.T) {
bytes, e := fetcher.Fetch("http://album.zhenai.com/u/1813331607")
if e != nil {
panic(e)
}
ParseProfile(bytes)
}
|
package main
import (
"encoding/json"
"flag"
"fmt"
"html/template"
"io/ioutil"
"log"
"net/http"
"os"
"os/signal"
"syscall"
"time"
)
//DailyHoroscope is a configuration struct for api response of daily horoscope based on user sunsign
type DailyHoroscope struct {
Date string `json:"date"`
Horoscope string `json:"horoscope"`
Sunsign string `json:"sunsign"`
}
//WeeklyHoroscope is a configuration struct for api response of weekly horoscope based on user sunsign
type WeeklyHoroscope struct {
Week string `json:"week"`
Horoscope string `json:"horoscope"`
Sunsign string `json:"sunsign"`
}
// DailyContent ...
type DailyContent struct {
DailyHoroscopes []DailyHoroscope
}
// WeeklyContent stores all yearly horoscopes for each sunsign to populate page
type WeeklyContent struct {
WeeklyHoroscopes []WeeklyHoroscope
}
var apiip string
var apiport string
func main() {
flag.StringVar(&apiip, "ip", "127.0.0.1", "Api ip addres or domain name. Defaults to loopback.")
flag.StringVar(&apiport, "p", "8080", "Api port number. Defaults to 8080.")
flag.Parse()
itmpl := template.Must(template.ParseFiles("web/index.html"))
dtmpl := template.Must(template.ParseFiles("web/daily.html"))
wtmpl := template.Must(template.ParseFiles("web/weekly.html"))
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/html")
itmpl.Execute(w, nil)
})
http.HandleFunc("/daily", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/html")
if r.Method == "POST" {
r.ParseForm()
sign := r.PostForm.Get("sign")
data := DailyContent{
DailyHoroscopes: restdaily(sign),
}
dtmpl.Execute(w, data)
} else {
dtmpl.Execute(w, nil)
}
})
http.HandleFunc("/weekly", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/html")
if r.Method == "POST" {
r.ParseForm()
sign := r.PostForm.Get("sign")
data := WeeklyContent{
WeeklyHoroscopes: restweekly(sign),
}
wtmpl.Execute(w, data)
} else {
wtmpl.Execute(w, nil)
}
})
// Start server & Setup channels
fmt.Println("Horoscope server is serving at port 8080...")
errorChan := make(chan error, 2)
go func() {
errorChan <- http.ListenAndServe(":8080", nil)
}()
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, syscall.SIGINT)
for {
select {
case err := <-errorChan:
if err != nil {
log.Fatalln(err)
}
case sig := <-signalChan:
fmt.Println("\nShutting down due to", sig)
os.Exit(0)
}
}
}
func restdaily(sign string) (daily []DailyHoroscope) {
dailyhoro := DailyHoroscope{}
rest := "http://" + apiip + ":" + apiport + "/daily?sign=" + sign
resp, err := http.Get(rest)
if err != nil {
dailyhoro.Sunsign = sign
dailyhoro.Horoscope = err.Error() + "\nERROR BAD API: " + rest
dailyhoro.Date = time.Now().String()
daily = append(daily, dailyhoro)
return
}
body, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
dailyhoro.Sunsign = sign
dailyhoro.Horoscope = "ERROR READING RESPONSE BODY"
dailyhoro.Date = time.Now().String()
daily = append(daily, dailyhoro)
return
}
err = json.Unmarshal(body, &dailyhoro)
if err != nil {
dailyhoro.Sunsign = sign
dailyhoro.Horoscope = "ERROR UNMARSHALING"
dailyhoro.Date = time.Now().String()
daily = append(daily, dailyhoro)
return
}
daily = append(daily, dailyhoro)
return
}
func restweekly(sign string) (weekly []WeeklyHoroscope) {
weeklyhoro := WeeklyHoroscope{}
rest := "http://" + apiip + ":" + apiport + "/weekly?sign=" + sign
resp, err := http.Get(rest)
if err != nil {
weeklyhoro.Sunsign = sign
weeklyhoro.Horoscope = err.Error() + "\nERROR BAD API: " + rest
weeklyhoro.Week = time.Now().String()
weekly = append(weekly, weeklyhoro)
return
}
body, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
weeklyhoro.Sunsign = sign
weeklyhoro.Horoscope = "ERROR READING RESPONSE BODY"
weeklyhoro.Week = time.Now().String()
weekly = append(weekly, weeklyhoro)
return
}
err = json.Unmarshal(body, &weeklyhoro)
if err != nil {
weeklyhoro.Sunsign = sign
weeklyhoro.Horoscope = "ERROR UNMARSHALING"
weeklyhoro.Week = time.Now().String()
weekly = append(weekly, weeklyhoro)
return
}
weekly = append(weekly, weeklyhoro)
return
}
|
package mal
import (
"sort"
)
type AnimeType int
const (
Tv AnimeType = iota + 1
Ova
Movie
Special
Ona
Music
)
func (t AnimeType) String() string {
types := [...]string{
Tv: "TV",
Ova: "OVA",
Movie: "Movie",
Special: "Special",
Ona: "ONA",
Music: "Music",
}
if t < 1 || int(t) >= len(types) {
return ""
}
return types[t]
}
type AnimeStatus int
const (
CurrentlyAiring AnimeStatus = iota + 1
FinishedAiring
NotYetAired
)
func (status AnimeStatus) String() string {
names := [...]string{
CurrentlyAiring: "CurrentlyAiring",
FinishedAiring: "FinishedAiring",
NotYetAired: "NotYetAired",
}
if status < 1 || int(status) >= len(names) {
return ""
}
return names[status]
}
type AnimeScore int
const (
NotRatedYet AnimeScore = iota
Appalling
Horrible
VeryBad
Bad
Average
Fine
Good
VeryGood
Great
Masterpiece
)
type MyStatus int
const (
All MyStatus = iota
Watching
Completed
OnHold
Dropped
PlanToWatch MyStatus = 6 //Apparently MAL stores this as 6
)
func (status MyStatus) String() string {
names := [...]string{
All: "All",
Watching: "Watching",
Completed: "Completed",
OnHold: "OnHold",
Dropped: "Dropped",
5: "",
PlanToWatch: "PlanToWatch",
}
if status < 0 || int(status) >= len(names) {
return ""
}
return names[status]
}
type Anime struct {
ID int `xml:"series_animedb_id"`
Title string `xml:"series_title"`
Synonyms string `xml:"series_synonyms"`
Type AnimeType `xml:"series_type"`
Episodes int `xml:"series_episodes"`
Status AnimeStatus `xml:"series_status"`
SeriesStart string `xml:"series_start"`
SeriesEnd string `xml:"series_end"`
ImageURL string `xml:"series_image"`
MyID int `xml:"my_id"`
WatchedEpisodes int `xml:"my_watched_episodes"`
MyStart string `xml:"my_start_date"`
MyFinish string `xml:"my_finish_date"`
MyScore AnimeScore `xml:"my_score"`
MyStatus MyStatus `xml:"my_status"`
MyRewatching int `xml:"my_rewatching"`
MyRewatchingEpisode int `xml:"my_rewatching_ep"`
LastUpdated int64 `xml:"my_last_updated"`
MyTags string `xml:"my_tags"`
}
type AnimeDetails struct {
JapaneseTitle string
Related []Related
Synopsis string
Background string
Characters []Character
Staff []Staff
OpeningThemes []string
EndingThemes []string
Premiered string
Broadcast string
Producers []string
Licensors []string
Studios []string
Source string
Genres []string
Duration string
Rating string
Score float64
ScoreVoters int
Ranked int
Popularity int
Members int
Favorites int
}
type Character struct {
Name string
Role string
VoiceActor string
VoiceActorOrigin string
}
type Staff struct {
Name string
Position string
}
type Related struct {
Relation string
Title string
Url string
}
const AnimeXMLTemplate = `<?xml version="1.0" encoding="UTF-8"?>
<entry>
<episode>{{.WatchedEpisodes}}</episode>
<status>{{ printf "%d" .MyStatus }}</status>
<score>{{.MyScore}}</score>
<times_rewatched>{{.MyRewatching}}</times_rewatched>
<rewatch_value>{{.MyRewatchingEpisode}}</rewatch_value>
<date_start>{{.MyStart}}</date_start>
<date_finish>{{.MyFinish}}</date_finish>
<tags>{{.MyTags}}</tags>
</entry>`
type AnimeCustomSort struct {
List []*Anime
LessF func(x, y *Anime) bool
}
func (acs AnimeCustomSort) Len() int {
return len(acs.List)
}
func (acs AnimeCustomSort) Less(i, j int) bool {
return acs.LessF(acs.List[i], acs.List[j])
}
func (acs AnimeCustomSort) Swap(i, j int) {
acs.List[i], acs.List[j] = acs.List[j], acs.List[i]
}
func AnimeSortByLastUpdated(list []*Anime) sort.Interface {
return AnimeCustomSort{list, func(x, y *Anime) bool {
return x.LastUpdated > y.LastUpdated
}}
}
func AnimeSortByTitle(list []*Anime) sort.Interface {
return AnimeCustomSort{list, func(x, y *Anime) bool {
return x.Title < y.Title
}}
}
func AnimeSortByWatchedEpisodes(list []*Anime) sort.Interface {
return AnimeCustomSort{list, func(x, y *Anime) bool {
return x.WatchedEpisodes < y.WatchedEpisodes
}}
}
func AnimeSortByScore(list []*Anime) sort.Interface {
return AnimeCustomSort{list, func(x, y *Anime) bool {
return x.MyScore < y.MyScore
}}
}
|
// Copyright 2018 David Sansome
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package api
import (
"encoding/json"
"fmt"
"log"
"net/http"
"net/url"
"github.com/davidsansome/tsurukame/utils"
)
const (
urlBase = "https://api.wanikani.com/v2"
)
type Client struct {
token string
client *http.Client
}
func New(token string) (*Client, error) {
if len(token) != 36 {
return nil, fmt.Errorf("Bad length API token: %s", token)
}
return &Client{
token: token,
client: &http.Client{},
}, nil
}
func (c *Client) get(u *url.URL) (*http.Response, error) {
log.Printf("Fetching %s", u)
resp, err := c.client.Do(&http.Request{
URL: u,
Header: map[string][]string{
"Authorization": []string{"Token token=" + c.token},
},
})
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("Request for %s failed: HTTP %s", u, resp.Status)
}
return resp, nil
}
type subjectsCursor struct {
c *Client
next *url.URL
ret []*SubjectObject
}
func (c *Client) Subjects(typ string) *subjectsCursor {
u := utils.MustParseURL(urlBase + "/subjects")
if typ != "" {
q := url.Values{}
q.Set("type", typ)
u.RawQuery = q.Encode()
}
return &subjectsCursor{
c: c,
next: u,
}
}
func (c *subjectsCursor) Next() (*SubjectObject, error) {
if len(c.ret) == 0 {
if c.next == nil {
return nil, nil
}
resp, err := c.c.get(c.next)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var coll subjectCollection
d := json.NewDecoder(resp.Body)
if err := d.Decode(&coll); err != nil {
return nil, err
}
if coll.Pages.NextURL == "" {
c.next = nil
} else {
c.next, err = url.Parse(coll.Pages.NextURL)
if err != nil {
return nil, err
}
}
c.ret = coll.Data
}
ret := c.ret[0]
c.ret = c.ret[1:len(c.ret)]
return ret, nil
}
|
package worker
import (
"log"
"net/http"
"os"
"testing"
"github.com/ory/dockertest/v3"
)
var testUrlPrefix string
func TestMain(m *testing.M) {
// Why mock database driver or orm when You can just run real thing
// Awesome library! https://github.com/ory/dockertest
// modified version of example from His github
// uses a sensible default on windows (tcp/http) and linux/osx (socket)
pool, err := dockertest.NewPool("")
if err != nil {
log.Fatalf("Could not connect to docker: %s", err)
}
// pulls an image, creates a container based on it and runs it
resource, err := pool.Run("kennethreitz/httpbin", "latest", []string{})
if err != nil {
log.Fatalf("Could not start resource: %s", err)
}
// outside port of httpbin can be different
testUrlPrefix = "http://localhost:" + resource.GetPort("80/tcp")
// exponential backoff-retry, because the application in the container might not be ready to accept connections yet
if err := pool.Retry(func() (err error) {
_, err = http.Get(testUrlPrefix + "/get")
return err
}); err != nil {
log.Fatalf("Could not connect to docker: %s", err)
}
code := m.Run()
// You can't defer this because os.Exit doesn't care for defer
if err := pool.Purge(resource); err != nil {
log.Fatalf("Could not purge resource: %s", err)
}
os.Exit(code)
}
|
package boot
// Param contains options for discovery queries. Options passed to DiscoverPeers
// first populate a Param struct. Fields are exported for the sake of 3rd-party
// discovery implementations.
type Param struct {
Limit int
// Custom provides a place for 3rd-party Strategies to set implementation-specific
// options. As with context.context, developers SHOULD use unexported types as keys
// to avoid collisions.
//
// Note that Custom may be nil.
Custom map[interface{}]interface{}
}
// Apply options.
func (p *Param) Apply(opt []Option) (err error) {
for _, f := range opt {
if err = f(p); err != nil {
break
}
}
return
}
// Option modifies the behavior of DiscoverPeers. Note that the behavior depends on
// the implementation of DiscoverPeers. Certain options may even be ignored.
type Option func(*Param) error
// WithLimit caps the number of records that can be returned.
func WithLimit(lim int) Option {
return func(p *Param) error {
p.Limit = lim
return nil
}
}
|
// Copyright 2019 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build arm64
// +build arm64
package cpuid
import (
"io/ioutil"
"runtime"
"strconv"
"strings"
"gvisor.dev/gvisor/pkg/log"
)
// hostFeatureSet is initialized at startup.
//
// This is copied for HostFeatureSet, below.
var hostFeatureSet FeatureSet
// HostFeatureSet returns a copy of the host FeatureSet.
func HostFeatureSet() FeatureSet {
return hostFeatureSet
}
// Fixed returns the same feature set.
func (fs FeatureSet) Fixed() FeatureSet {
return fs
}
// Reads CPU information from host /proc/cpuinfo.
//
// Must run before syscall filter installation. This value is used to create
// the fake /proc/cpuinfo from a FeatureSet.
func initCPUInfo() {
if runtime.GOOS != "linux" {
// Don't try to read Linux-specific /proc files or
// warn about them not existing.
return
}
cpuinfob, err := ioutil.ReadFile("/proc/cpuinfo")
if err != nil {
// Leave everything at 0, nothing can be done.
log.Warningf("Could not read /proc/cpuinfo: %v", err)
return
}
cpuinfo := string(cpuinfob)
// We get the value straight from host /proc/cpuinfo.
for _, line := range strings.Split(cpuinfo, "\n") {
switch {
case strings.Contains(line, "BogoMIPS"):
splitMHz := strings.Split(line, ":")
if len(splitMHz) < 2 {
log.Warningf("Could not read /proc/cpuinfo: malformed BogoMIPS")
break
}
// If there was a problem, leave cpuFreqMHz as 0.
var err error
hostFeatureSet.cpuFreqMHz, err = strconv.ParseFloat(strings.TrimSpace(splitMHz[1]), 64)
if err != nil {
hostFeatureSet.cpuFreqMHz = 0.0
log.Warningf("Could not parse BogoMIPS value %v: %v", splitMHz[1], err)
}
case strings.Contains(line, "CPU implementer"):
splitImpl := strings.Split(line, ":")
if len(splitImpl) < 2 {
log.Warningf("Could not read /proc/cpuinfo: malformed CPU implementer")
break
}
// If there was a problem, leave cpuImplHex as 0.
var err error
hostFeatureSet.cpuImplHex, err = strconv.ParseUint(strings.TrimSpace(splitImpl[1]), 0, 64)
if err != nil {
hostFeatureSet.cpuImplHex = 0
log.Warningf("Could not parse CPU implementer value %v: %v", splitImpl[1], err)
}
case strings.Contains(line, "CPU architecture"):
splitArch := strings.Split(line, ":")
if len(splitArch) < 2 {
log.Warningf("Could not read /proc/cpuinfo: malformed CPU architecture")
break
}
// If there was a problem, leave cpuArchDec as 0.
var err error
hostFeatureSet.cpuArchDec, err = strconv.ParseUint(strings.TrimSpace(splitArch[1]), 0, 64)
if err != nil {
hostFeatureSet.cpuArchDec = 0
log.Warningf("Could not parse CPU architecture value %v: %v", splitArch[1], err)
}
case strings.Contains(line, "CPU variant"):
splitVar := strings.Split(line, ":")
if len(splitVar) < 2 {
log.Warningf("Could not read /proc/cpuinfo: malformed CPU variant")
break
}
// If there was a problem, leave cpuVarHex as 0.
var err error
hostFeatureSet.cpuVarHex, err = strconv.ParseUint(strings.TrimSpace(splitVar[1]), 0, 64)
if err != nil {
hostFeatureSet.cpuVarHex = 0
log.Warningf("Could not parse CPU variant value %v: %v", splitVar[1], err)
}
case strings.Contains(line, "CPU part"):
splitPart := strings.Split(line, ":")
if len(splitPart) < 2 {
log.Warningf("Could not read /proc/cpuinfo: malformed CPU part")
break
}
// If there was a problem, leave cpuPartHex as 0.
var err error
hostFeatureSet.cpuPartHex, err = strconv.ParseUint(strings.TrimSpace(splitPart[1]), 0, 64)
if err != nil {
hostFeatureSet.cpuPartHex = 0
log.Warningf("Could not parse CPU part value %v: %v", splitPart[1], err)
}
case strings.Contains(line, "CPU revision"):
splitRev := strings.Split(line, ":")
if len(splitRev) < 2 {
log.Warningf("Could not read /proc/cpuinfo: malformed CPU revision")
break
}
// If there was a problem, leave cpuRevDec as 0.
var err error
hostFeatureSet.cpuRevDec, err = strconv.ParseUint(strings.TrimSpace(splitRev[1]), 0, 64)
if err != nil {
hostFeatureSet.cpuRevDec = 0
log.Warningf("Could not parse CPU revision value %v: %v", splitRev[1], err)
}
}
}
}
// archInitialize initializes hostFeatureSet.
func archInitialize() {
initCPUInfo()
initHWCap()
}
|
// Copyright 2021 The CUE Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"path/filepath"
)
const (
// commonPathBin is the name of the directory that will be used
// at the module root of the directory specified in the absolute path
// version
commonPathBin = ".unity-bin"
)
// absolutePathResolver resolves a CUE version that is an absolute directory
// path, as uses the Go modules within that directory to resolve a CUE version.
// cue is then built within a .unity-bin directory at the Go module root
type absolutePathResolver struct {
cp *commonPathResolver
}
func newAbsolutePathResolver(c resolverConfig) (resolver, error) {
res := &absolutePathResolver{
cp: c.commonPathResolver,
}
return res, nil
}
func (a *absolutePathResolver) resolve(version, dir, workingDir, target string) (string, error) {
if !filepath.IsAbs(version) {
return "", errNoMatch
}
return a.cp.resolve(version, target)
}
|
package exregctl
import (
"context"
"errors"
"github.com/operator-framework/operator-lib/status"
regv1 "github.com/tmax-cloud/registry-operator/api/v1"
"github.com/tmax-cloud/registry-operator/internal/schemes"
"github.com/tmax-cloud/registry-operator/internal/utils"
corev1 "k8s.io/api/core/v1"
k8serr "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
)
type RegistryJob struct {
job *regv1.RegistryJob
logger *utils.RegistryLogger
}
// Handle is to create external registry job.
func (r *RegistryJob) Handle(c client.Client, exreg *regv1.ExternalRegistry, patchExreg *regv1.ExternalRegistry, scheme *runtime.Scheme) error {
loginCondition := exreg.Status.Conditions.GetCondition(regv1.ConditionTypeExRegistryLoginSecretExist)
if loginCondition != nil && !loginCondition.IsTrue() {
err := errors.New("login secret hasn't been made yet")
return err
}
if exreg.Status.Conditions.GetCondition(regv1.ConditionTypeExRegistryInitialized).Status == corev1.ConditionTrue {
return nil
}
if err := r.get(c, exreg); err != nil {
if k8serr.IsNotFound(err) {
if err := r.create(c, exreg, patchExreg, scheme); err != nil {
r.logger.Error(err, "create external registry job error")
return err
}
} else {
r.logger.Error(err, "external registry job error")
return err
}
}
return nil
}
// Ready is to check if the external registry job is ready
func (r *RegistryJob) Ready(c client.Client, exreg *regv1.ExternalRegistry, patchExreg *regv1.ExternalRegistry, useGet bool) error {
if exreg.Status.Conditions.GetCondition(regv1.ConditionTypeExRegistryInitialized).Status == corev1.ConditionTrue {
return nil
}
var err error = nil
condition := &status.Condition{
Status: corev1.ConditionFalse,
Type: regv1.ConditionTypeExRegistryInitialized,
}
defer utils.SetCondition(err, patchExreg, condition)
if useGet {
if err = r.get(c, exreg); err != nil {
r.logger.Error(err, "get external registry job error")
return err
}
}
r.logger.Info("Ready")
condition.Status = corev1.ConditionTrue
return nil
}
func (r *RegistryJob) create(c client.Client, exreg *regv1.ExternalRegistry, patchExreg *regv1.ExternalRegistry, scheme *runtime.Scheme) error {
if err := controllerutil.SetControllerReference(exreg, r.job, scheme); err != nil {
r.logger.Error(err, "SetOwnerReference Failed")
return err
}
r.logger.Info("Create external registry job")
if err := c.Create(context.TODO(), r.job); err != nil {
r.logger.Error(err, "Creating external registry job is failed.")
return err
}
return nil
}
func (r *RegistryJob) get(c client.Client, exreg *regv1.ExternalRegistry) error {
r.job = schemes.ExternalRegistryJob(exreg)
r.logger = utils.NewRegistryLogger(*r, r.job.Namespace, r.job.Name)
req := types.NamespacedName{Name: r.job.Name, Namespace: r.job.Namespace}
err := c.Get(context.TODO(), req, r.job)
if err != nil {
r.logger.Error(err, "Get external registry job is failed")
return err
}
return nil
}
func (r *RegistryJob) compare(reg *regv1.ExternalRegistry) []utils.Diff {
diff := []utils.Diff{}
// TODO
return diff
}
func (r *RegistryJob) patch(c client.Client, exreg *regv1.ExternalRegistry, patchExreg *regv1.ExternalRegistry, diff []utils.Diff) error {
return nil
}
func (r *RegistryJob) delete(c client.Client, patchExreg *regv1.ExternalRegistry) error {
if err := c.Delete(context.TODO(), r.job); err != nil {
r.logger.Error(err, "Unknown error delete deployment")
return err
}
return nil
}
|
package main
import "fmt"
func main() {
var n uint64
fmt.Scanln(&n)
s := make([]int64,n)
result := int64(0)
for i := 0; i < int(n); i++ {
fmt.Scanln(&s[i])
result += s[i]
}
fmt.Println(result)
}
|
package controller
import (
"fmt"
"net/http"
)
type Index struct {
}
func NewIndex() *Index {
return &Index{}
}
func (i *Index) Handle(w http.ResponseWriter, r *http.Request) {
_, _ = fmt.Fprintf(w, "Hi there, %s!", r.URL.Path[1:])
}
|
package main // import "github.com/Jguer/yay"
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
pacmanconf "github.com/Morganamilo/go-pacmanconf"
alpm "github.com/jguer/go-alpm"
)
func setPaths() error {
if configHome = os.Getenv("XDG_CONFIG_HOME"); configHome != "" {
configHome = filepath.Join(configHome, "yay")
} else if configHome = os.Getenv("HOME"); configHome != "" {
configHome = filepath.Join(configHome, ".config/yay")
} else {
return fmt.Errorf("XDG_CONFIG_HOME and HOME unset")
}
if cacheHome = os.Getenv("XDG_CACHE_HOME"); cacheHome != "" {
cacheHome = filepath.Join(cacheHome, "yay")
} else if cacheHome = os.Getenv("HOME"); cacheHome != "" {
cacheHome = filepath.Join(cacheHome, ".cache/yay")
} else {
return fmt.Errorf("XDG_CACHE_HOME and HOME unset")
}
configFile = filepath.Join(configHome, configFileName)
vcsFile = filepath.Join(cacheHome, vcsFileName)
return nil
}
func initConfig() error {
cfile, err := os.Open(configFile)
if !os.IsNotExist(err) && err != nil {
return fmt.Errorf("Failed to open config file '%s': %s", configFile, err)
}
defer cfile.Close()
if !os.IsNotExist(err) {
decoder := json.NewDecoder(cfile)
if err = decoder.Decode(&config); err != nil {
return fmt.Errorf("Failed to read config '%s': %s", configFile, err)
}
}
return nil
}
func initVCS() error {
vfile, err := os.Open(vcsFile)
if !os.IsNotExist(err) && err != nil {
return fmt.Errorf("Failed to open vcs file '%s': %s", vcsFile, err)
}
defer vfile.Close()
if !os.IsNotExist(err) {
decoder := json.NewDecoder(vfile)
if err = decoder.Decode(&savedInfo); err != nil {
return fmt.Errorf("Failed to read vcs '%s': %s", vcsFile, err)
}
}
return nil
}
func initHomeDirs() error {
if _, err := os.Stat(configHome); os.IsNotExist(err) {
if err = os.MkdirAll(configHome, 0755); err != nil {
return fmt.Errorf("Failed to create config directory '%s': %s", configHome, err)
}
} else if err != nil {
return err
}
if _, err := os.Stat(cacheHome); os.IsNotExist(err) {
if err = os.MkdirAll(cacheHome, 0755); err != nil {
return fmt.Errorf("Failed to create cache directory '%s': %s", cacheHome, err)
}
} else if err != nil {
return err
}
return nil
}
func initBuildDir() error {
if _, err := os.Stat(config.BuildDir); os.IsNotExist(err) {
if err = os.MkdirAll(config.BuildDir, 0755); err != nil {
return fmt.Errorf("Failed to create BuildDir directory '%s': %s", config.BuildDir, err)
}
} else if err != nil {
return err
}
return nil
}
func initAlpm() error {
var err error
var stderr string
root := "/"
if value, _, exists := cmdArgs.getArg("root", "r"); exists {
root = value
}
pacmanConf, stderr, err = pacmanconf.PacmanConf("--config", config.PacmanConf, "--root", root)
if err != nil {
return fmt.Errorf("%s", stderr)
}
if value, _, exists := cmdArgs.getArg("dbpath", "b"); exists {
pacmanConf.DBPath = value
}
if value, _, exists := cmdArgs.getArg("arch"); exists {
pacmanConf.Architecture = value
}
if value, _, exists := cmdArgs.getArg("ignore"); exists {
pacmanConf.IgnorePkg = append(pacmanConf.IgnorePkg, strings.Split(value, ",")...)
}
if value, _, exists := cmdArgs.getArg("ignoregroup"); exists {
pacmanConf.IgnoreGroup = append(pacmanConf.IgnoreGroup, strings.Split(value, ",")...)
}
//TODO
//current system does not allow duplicate arguments
//but pacman allows multiple cachedirs to be passed
//for now only handle one cache dir
if value, _, exists := cmdArgs.getArg("cachedir"); exists {
pacmanConf.CacheDir = []string{value}
}
if value, _, exists := cmdArgs.getArg("gpgdir"); exists {
pacmanConf.GPGDir = value
}
if err = initAlpmHandle(); err != nil {
return err
}
if value, _, _ := cmdArgs.getArg("color"); value == "always" {
useColor = true
} else if value == "auto" {
useColor = isTty()
} else if value == "never" {
useColor = false
} else {
useColor = pacmanConf.Color && isTty()
}
return nil
}
func initAlpmHandle() error {
var err error
if alpmHandle != nil {
if err := alpmHandle.Release(); err != nil {
return err
}
}
if alpmHandle, err = alpm.Initialize(pacmanConf.RootDir, pacmanConf.DBPath); err != nil {
return fmt.Errorf("Unable to CreateHandle: %s", err)
}
if err = configureAlpm(pacmanConf); err != nil {
return err
}
alpmHandle.SetQuestionCallback(questionCallback)
alpmHandle.SetLogCallback(logCallback)
return nil
}
func exitOnError(err error) {
if err != nil {
if str := err.Error(); str != "" {
fmt.Fprintln(os.Stderr, str)
}
cleanup()
os.Exit(1)
}
}
func cleanup() int {
if alpmHandle != nil {
if err := alpmHandle.Release(); err != nil {
fmt.Fprintln(os.Stderr, err)
return 1
}
}
return 0
}
func main() {
if 0 == os.Geteuid() {
fmt.Fprintln(os.Stderr, "Please avoid running pi as root/sudo.")
}
exitOnError(setPaths())
config = defaultSettings()
exitOnError(initHomeDirs())
exitOnError(initConfig())
exitOnError(cmdArgs.parseCommandLine())
if shouldSaveConfig {
config.saveConfig()
}
config.expandEnv()
exitOnError(initBuildDir())
exitOnError(initVCS())
exitOnError(initAlpm())
exitOnError(handleCmd())
os.Exit(cleanup())
}
|
package controllers
import (
"net/http"
"testing"
"github.com/gavv/httpexpect"
)
const (
URL_ROOT = "http://localhost:8889"
)
// APIGET -
func APIGET(t *testing.T, path string) *httpexpect.Object {
return httpexpect.New(t, URL_ROOT).
GET("/api"+path).
Expect().
Status(http.StatusOK).
JSON().
Object().
ContainsKey("code").
ValueEqual("code", 0).
ContainsKey("message").
ValueEqual("message", "成功").
ContainsKey("data").
Value("data").
Object()
}
// APIGETList -
func APIGETList(t *testing.T, path string) *httpexpect.Array {
return APIGET(t, path).
ContainsKey("list").
Value("list").
Array()
}
|
package downloader
import (
"errors"
"net/http"
log "github.com/Sirupsen/logrus"
"github.com/l-dandelion/cwgo/data"
"github.com/l-dandelion/cwgo/module"
)
//重试次数
var RetryTimes = 3
//只能通过new创建downloader
func New(client *http.Client) module.Downloader {
return &myDownloader{
ModuleInternal: module.NewModuleInternal(),
httpClient: client,
}
}
type myDownloader struct {
module.ModuleInternal
httpClient *http.Client
}
/*
* 根据请求下载并返回响应
*/
func (downloader *myDownloader) Download(req *data.Request) (*data.Response, error) {
downloader.IncrHandlingNumber()
defer downloader.DecrHandlingNumber()
downloader.IncrCalledCount()
//检查请求参数
if req == nil {
return nil, errors.New("Nil request.")
}
if req.HTTPReq() == nil {
return nil, errors.New("Nil HTTP request.")
}
downloader.IncrAcceptedCount()
var (
httpResp *http.Response
err error
)
log.Infof("Do the request (URL: %s, depth: %d)... \n",
req.HTTPReq().URL, req.Depth())
//尝试下载
for i := 0; i < RetryTimes; i++ {
httpResp, err = downloader.httpClient.Do(req.HTTPReq())
if err == nil {
break
}
}
if err != nil {
return nil, err
}
resp := data.NewResponse(req, httpResp)
downloader.IncrCompletedCount()
return resp, nil
}
|
package cart
import (
"encoding/json"
"net/http"
"net/url"
"shopping-cart/pkg/controllers/common"
"shopping-cart/pkg/service"
"shopping-cart/types"
"shopping-cart/utils/applog"
"github.com/gorilla/mux"
"gopkg.in/mgo.v2/bson"
)
// AddItem : handler function for PATCH /v1/cart call
func AddItem(w http.ResponseWriter, r *http.Request) {
// authenticating user
_,err := common.CheckAuthorized(w, r)
if err!=nil {
return
}
errs := url.Values{}
// proccess add to cart request
params := mux.Vars(r)
cartid := params["listid"]
if cartid == "" && !bson.IsObjectIdHex(cartid){
errs.Add("listid", "list id is required")
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
response := map[string]interface{}{"errors": errs, "status": 0}
json.NewEncoder(w).Encode(response)
return
}
reqItem := types.Item{}
if err := json.NewDecoder(r.Body).Decode(&reqItem); err != nil {
errs.Add("data", "Invalid data")
applog.Errorf("invalid add to cart request for list %s", cartid)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
response := map[string]interface{}{"errors": errs, "status": 0}
json.NewEncoder(w).Encode(response)
return
}
if reqItem.ID == "" {
applog.Debug("unable to find item")
errs.Add("id", "item id is required")
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
response := map[string]interface{}{"errors": errs, "status": 0}
json.NewEncoder(w).Encode(response)
return
}
applog.Info("adding item to cart")
crt := service.CartService{}
cartService := crt.NewCartService()
cart, err := cartService.FindUserCart(cartid)
if err!=nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
response := map[string]interface{}{"errors": errs, "status": 0}
json.NewEncoder(w).Encode(response)
return
}
err = cartService.Validate(&reqItem, cart)
if err!=nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
response := map[string]interface{}{"errors": errs, "status": 0}
json.NewEncoder(w).Encode(response)
return
}
err = cartService.AddToCart(cart)
if err !=nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
response := map[string]interface{}{"errors": err.Error(), "status": 0}
json.NewEncoder(w).Encode(response)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
response := map[string]interface{}{"data": cart, "status": 1}
json.NewEncoder(w).Encode(response)
applog.Info("add to cart request completed")
}
// ViewCart Get All items in cart
func ViewCart(w http.ResponseWriter, r *http.Request) {
_,err := common.CheckAuthorized(w, r)
if err!=nil {
return
}
param := mux.Vars(r)
cartid := param["listid"]
applog.Info("get all items in list %s ", cartid)
crt := service.CartService{}
cartService := crt.NewCartService()
cart, err := cartService.FindUserCart(cartid)
if err!=nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
response := map[string]interface{}{"errors": err.Error(), "status": 0}
json.NewEncoder(w).Encode(response)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
response := map[string]interface{}{"data": cart.Items, "status": 1}
json.NewEncoder(w).Encode(response)
}
// RemoveItem : delete item from cart
func RemoveItem(w http.ResponseWriter, r *http.Request) {
_,err := common.CheckAuthorized(w, r)
if err!=nil {
return
}
errs := url.Values{}
params := mux.Vars(r)
cartid := params["listid"]
if cartid == "" && !bson.IsObjectIdHex(cartid){
errs.Add("listid", "list id is required")
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
response := map[string]interface{}{"errors": errs, "status": 0}
json.NewEncoder(w).Encode(response)
return
}
itemid := params["itemid"]
if itemid == "" && !bson.IsObjectIdHex(itemid){
errs.Add("itemid", "item id is required")
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
response := map[string]interface{}{"errors": errs, "status": 0}
json.NewEncoder(w).Encode(response)
return
}
crt := service.CartService{}
cartService := crt.NewCartService()
cart, err := cartService.FindUserCart(cartid)
if err!=nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
response := map[string]interface{}{"errors": err.Error(), "status": 0}
json.NewEncoder(w).Encode(response)
return
}
err = cartService.RemoveItem(cart, itemid)
if err!=nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
response := map[string]interface{}{"errors": err.Error(), "status": 0}
json.NewEncoder(w).Encode(response)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
response := map[string]interface{}{"data": cart, "message": "Item Deleted Successfully", "status": 1}
json.NewEncoder(w).Encode(response)
}
// DeleteCart : delete item from cart
func DeleteCart(w http.ResponseWriter, r *http.Request) {
_,err := common.CheckAuthorized(w, r)
if err!=nil {
return
}
errs := url.Values{}
params := mux.Vars(r)
cartid := params["listid"]
if cartid == "" && !bson.IsObjectIdHex(cartid){
errs.Add("listid", "list id is required")
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
response := map[string]interface{}{"errors": errs, "status": 0}
json.NewEncoder(w).Encode(response)
return
}
crt := service.CartService{}
cartService := crt.NewCartService()
err= cartService.DeleteCart(bson.ObjectIdHex(cartid))
if err!= nil{
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
response := map[string]interface{}{"errors": err.Error(), "status": 0}
json.NewEncoder(w).Encode(response)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
response := map[string]interface{}{"data": cartid, "message": "Deleted Successfully", "status": 1}
json.NewEncoder(w).Encode(response)
}
// CreateNewCart : create new empty cart for user
func CreateNewCart(w http.ResponseWriter, r *http.Request) {
_,err := common.CheckAuthorized(w, r)
if err!=nil {
return
}
errs := url.Values{}
cart := &types.Cart{}
if err := json.NewDecoder(r.Body).Decode(&cart); err != nil {
errs.Add("data", "Invalid data")
applog.Error("invalid request for create cart")
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
response := map[string]interface{}{"errors": errs, "status": 0}
json.NewEncoder(w).Encode(response)
return
}
as := service.AuthService{}
authService := as.NewAuthService()
cart.UserID = authService.GetUser().ID
crt := service.CartService{}
cartService := crt.NewCartService()
err = cartService.CreateCart(cart)
if err!= nil{
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
response := map[string]interface{}{"errors": err.Error(), "status": 0}
json.NewEncoder(w).Encode(response)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
response := map[string]interface{}{"data": cart, "message": "Cart Create Successfully", "status": 1}
json.NewEncoder(w).Encode(response)
}
// UpdateUserCart : update user cart
func UpdateUserCart(w http.ResponseWriter, r *http.Request) {
_,err := common.CheckAuthorized(w, r)
if err!=nil {
return
}
errs := url.Values{}
params := mux.Vars(r)
cartid := params["listid"]
if cartid == "" && !bson.IsObjectIdHex(cartid){
errs.Add("listid", "list id is required")
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
response := map[string]interface{}{"errors": errs, "status": 0}
json.NewEncoder(w).Encode(response)
return
}
cart := &types.Cart{}
if err := json.NewDecoder(r.Body).Decode(&cart); err != nil {
errs.Add("data", "Invalid cart details")
applog.Error("invalid request for update cart")
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
response := map[string]interface{}{"errors": errs, "status": 0}
json.NewEncoder(w).Encode(response)
return
}
cart.ID = bson.ObjectIdHex(cartid)
as := service.AuthService{}
authService := as.NewAuthService()
cart.UserID = authService.GetUser().ID
crt := service.CartService{}
cartService := crt.NewCartService()
err = cartService.UpdateCart(cart)
if err!= nil{
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
response := map[string]interface{}{"errors": err.Error(), "status": 0}
json.NewEncoder(w).Encode(response)
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
response := map[string]interface{}{"data": cart, "message": "Cart Updated Successfully", "status": 1}
json.NewEncoder(w).Encode(response)
}
// GetAllUserCarts : get all list
func GetAllUserCarts(w http.ResponseWriter, r *http.Request) {
_,err := common.CheckAuthorized(w, r)
if err!=nil {
return
}
crt := service.CartService{}
cartService := crt.NewCartService()
as := service.AuthService{}
authService := as.NewAuthService()
userid := authService.GetUser().ID
carts ,err := cartService.ViewAllCarts(userid)
if err!=nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
response := map[string]interface{}{"errors": err.Error(), "status": 0}
json.NewEncoder(w).Encode(response)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
response := map[string]interface{}{"data": carts, "status": 1}
json.NewEncoder(w).Encode(response)
}
|
package main
import "math/rand"
// Leetcode 528. (medium)
type Solution struct {
sum []int
}
func Constructor(w []int) Solution {
s := Solution{
sum: make([]int, len(w)),
}
tmp := 0
for i := range w {
tmp += w[i]
s.sum[i] = tmp
}
return s
}
func (this *Solution) PickIndex() int {
left, right := 0, len(this.sum)-1
r := rand.Intn(this.sum[right])
r++
for left <= right {
mid := left + (right-left)/2
if r == this.sum[mid] {
left = mid
break
} else if r < this.sum[mid] {
right = mid - 1
} else {
left = mid + 1
}
}
return left
}
|
package model
import (
"errors"
"lhc.go.game.center/libs/mysql"
"time"
)
type Menu struct {
Id int `json:"id" form:"id"`
Mark string `json:"mark" form:"mark"`
Type string `json:"type" form:"type"`
Name string `json:"name" form:"name"`
Url string `json:"url" form:"url"`
Parent int `json:"parent" gorm:"default:0" form:"parent"`
Icon string `json:"icon" form:"icon"`
Sort int `json:"sort" form:"sort"`
Status int `json:"status" form:"status"`
Author string `json:"author"`
CreateTime int64 `json:"create_time" `
UpdateTime int64 `json:"update_time" `
}
type MenuTree struct {
Id int `json:"id"`
Mark string `json:"mark"`
Type string `json:"type"`
Name string `json:"name"`
Url string `json:"url"`
Parent int `json:"parent" gorm:"default:0"`
Icon string `json:"icon"`
Sort int `json:"sort"`
Status int `json:"status"`
Children []*MenuTree `json:"children"`
}
func NewMenu() *Menu {
return &Menu{}
}
func (this *Menu) Get() error {
if err := mysql.MysqlConnet.Model(&this).Where("id = ?",this.Id).First(&this).Error;err!=nil{
return err
}
return nil
}
func GetParentMenu(parent int) (menu []*Menu) {
mysql.MysqlConnet.Table("yxyy_menu").Where("parent = ?",parent).Order("sort").Order("create_time").Find(&menu)
return
}
func GetMeunTree(parent int) (menuList []*MenuTree) {
var menu []Menu
mysql.MysqlConnet.Table("yxyy_menu").Where("parent = ?",parent).Order("sort").Order("create_time").Find(&menu)
for _,v := range menu {
Children := GetMeunTree(v.Id) // 拿到每个父菜单的子菜单
node := &MenuTree{
Id: v.Id,
Name: v.Name,
Url: v.Url,
Icon: v.Icon,
Mark: v.Mark,
Sort: v.Sort,
Parent: v.Parent,
Status: v.Status,
}
node.Children = Children
menuList = append(menuList,node)
}
return
}
func (this *Menu) GetMeunTree(parent int) (menuList []*MenuTree) {
var menu []Menu
Db := mysql.MysqlConnet.Table("yxyy_menu").Where("parent = ?",parent)
if this.Name != "" && this.Parent == 0 {
Db = Db.Where("name like ","%"+this.Name+"%")
}
Db.Order("sort").Order("create_time").Find(&menu)
for _,v := range menu {
Children := this.GetMeunTree(v.Id) // 拿到每个父菜单的子菜单
node := &MenuTree{
Id: v.Id,
Name: v.Name,
Url: v.Url,
Icon: v.Icon,
Mark: v.Mark,
Sort: v.Sort,
Parent: v.Parent,
Status: v.Status,
Children:Children,
}
menuList = append(menuList,node)
}
return
}
func (this *Menu) GetMen() {
Db := mysql.MysqlConnet.Table("yxyy_menu")
if this.Name != "" {
Db = Db.Where("name like ")
}
}
func (m *Menu) UpdateData () error {
if m.Name=="" {
return errors.New("名称不能为空")
}
if m.Url=="" {
return errors.New("url不能为空")
}
m.UpdateTime=time.Now().Unix()
if m.Id !=0 {
if err:=mysql.MysqlConnet.Model(&m).Where("id = ?",m.Id).Update(&m).Error;err!=nil{
return err
}
}else {
m.CreateTime=time.Now().Unix()
if err:=mysql.MysqlConnet.Model(&m).Create(&m).Error;err!=nil{
return err
}
}
return nil
}
|
/*
Copyright 2019 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tag
import (
"context"
"crypto/md5"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/docker"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/graph"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/output/log"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/schema/latest"
)
type inputDigestTagger struct {
cfg docker.Config
cache graph.SourceDependenciesCache
}
func NewInputDigestTagger(cfg docker.Config, ag graph.ArtifactGraph) (Tagger, error) {
return NewInputDigestTaggerWithSourceCache(cfg, graph.NewSourceDependenciesCache(cfg, nil, ag))
}
func NewInputDigestTaggerWithSourceCache(cfg docker.Config, cache graph.SourceDependenciesCache) (Tagger, error) {
return &inputDigestTagger{
cfg: cfg,
cache: cache,
}, nil
}
func (t *inputDigestTagger) GenerateTag(ctx context.Context, image latest.Artifact) (string, error) {
var inputs []string
srcFiles, err := t.cache.TransitiveArtifactDependencies(ctx, &image)
if err != nil {
return "", err
}
// must sort as hashing is sensitive to the order in which files are processed
sort.Strings(srcFiles)
for _, d := range srcFiles {
h, err := fileHasher(d, image.Workspace)
if err != nil {
if os.IsNotExist(err) {
log.Entry(ctx).Tracef("skipping dependency %q for artifact cache calculation: %v", d, err)
continue // Ignore files that don't exist
}
return "", fmt.Errorf("getting hash for %q: %w", d, err)
}
inputs = append(inputs, h)
}
return encode(inputs)
}
func encode(inputs []string) (string, error) {
// get a key for the hashes
hasher := sha256.New()
enc := json.NewEncoder(hasher)
if err := enc.Encode(inputs); err != nil {
return "", err
}
return hex.EncodeToString(hasher.Sum(nil)), nil
}
// fileHasher hashes the contents and name of a file
func fileHasher(path string, workspacePath string) (string, error) {
h := md5.New()
fi, err := os.Lstat(path)
if err != nil {
return "", err
}
// Always try to use the file path relative to workspace when calculating hash.
// This will ensure we will always get the same hash independent of workspace location and hierarchy.
pathToHash, err := filepath.Rel(workspacePath, path)
if err != nil {
pathToHash = path
}
h.Write([]byte(pathToHash))
if fi.Mode().IsRegular() {
f, err := os.Open(path)
if err != nil {
return "", err
}
defer f.Close()
if _, err := io.Copy(h, f); err != nil {
return "", err
}
}
return hex.EncodeToString(h.Sum(nil)), nil
}
|
package cards
import "errors"
type Suit uint8
const (
Hearts Suit = iota
Diamonds
Clubs
Spades
none_suit
)
type Rank uint8
const (
Ace Rank = iota
King
Queen
Jack
Ten
Nine
Eight
Seven
Six
Five
Four
Three
Two
none_rank
)
const Joker Rank = 255
type Color uint8
const (
Red Color = iota
Black
None
)
type Card struct {
suit Suit
rank Rank
}
var noneCard = Card{ none_suit, none_rank}
func NewJoker() Card {
return Card {none_suit, Joker}
}
func NewCard(suit Suit, rank Rank) (Card, error) {
if suit >= none_suit {
return Card {none_suit, none_rank}, errors.New("Invalid suit!")
}
if rank >= none_rank {
return Card {none_suit, none_rank}, errors.New("Invalid rank!")
}
if rank == Joker {
return Card {none_suit, none_rank}, errors.New("Use NewJoker() to create ace!")
}
return Card {suit, rank}, nil
}
func (card Card) Suit() (Suit, error) {
if card.suit == none_suit {
return none_suit, errors.New("Jockers do not have suit!")
}
return card.suit, nil
}
func (card Card) Rank() Rank {
return card.rank
}
func (card Card) Color() Color {
if card.rank == Joker {
return None
}
if card.suit == Diamonds || card.suit == Hearts {
return Red
}
return Black
}
|
package v2
import (
"errors"
"log"
"net/http"
"github.com/labstack/echo/v4"
"github.com/traPtitech/trap-collection-server/src/domain/values"
"github.com/traPtitech/trap-collection-server/src/handler/v2/openapi"
"github.com/traPtitech/trap-collection-server/src/service"
)
type Seat struct {
seatService service.Seat
}
func NewSeat(seatService service.Seat) *Seat {
return &Seat{
seatService: seatService,
}
}
// 座席一覧の取得
// (GET /seats)
func (seat *Seat) GetSeats(c echo.Context) error {
seats, err := seat.seatService.GetSeats(c.Request().Context())
if err != nil {
log.Printf("error: failed to get seats: %v\n", err)
return echo.NewHTTPError(http.StatusInternalServerError, "failed to get seats")
}
res := make([]*openapi.Seat, 0, len(seats))
for _, seat := range seats {
var status openapi.SeatStatus
switch seat.Status() {
case values.SeatStatusEmpty:
status = openapi.Empty
case values.SeatStatusInUse:
status = openapi.InUse
default:
log.Printf("error: invalid seat status: %v\n", seat.Status())
continue
}
res = append(res, &openapi.Seat{
Id: openapi.SeatID(seat.ID()),
Status: status,
})
}
return c.JSON(http.StatusOK, res)
}
// 席数の変更
// (POST /seats)
func (seat *Seat) PostSeat(c echo.Context) error {
var req openapi.PostSeatRequest
err := c.Bind(&req)
if err != nil {
log.Printf("error: failed to bind request: %v\n", err)
return echo.NewHTTPError(http.StatusBadRequest, "failed to bind request")
}
seats, err := seat.seatService.UpdateSeatNum(c.Request().Context(), uint(req.Num))
if err != nil {
log.Printf("error: failed to post seat: %v\n", err)
return echo.NewHTTPError(http.StatusInternalServerError, "failed to post seat")
}
res := make([]*openapi.Seat, 0, len(seats))
for _, seat := range seats {
var status openapi.SeatStatus
switch seat.Status() {
case values.SeatStatusEmpty:
status = openapi.Empty
case values.SeatStatusInUse:
status = openapi.InUse
default:
log.Printf("error: invalid seat status: %v\n", seat.Status())
continue
}
res = append(res, &openapi.Seat{
Id: openapi.SeatID(seat.ID()),
Status: status,
})
}
return c.JSON(http.StatusOK, res)
}
// 席の変更
// (PATCH /seats/{seatID})
func (seat *Seat) PatchSeatStatus(c echo.Context, seatID openapi.SeatIDInPath) error {
var req openapi.PatchSeatStatusRequest
err := c.Bind(&req)
if err != nil {
log.Printf("error: failed to bind request: %v\n", err)
return echo.NewHTTPError(http.StatusBadRequest, "failed to bind request")
}
var status values.SeatStatus
switch req.Status {
case openapi.Empty:
status = values.SeatStatusEmpty
case openapi.InUse:
status = values.SeatStatusInUse
default:
log.Printf("error: invalid seat status: %v\n", req.Status)
return echo.NewHTTPError(http.StatusBadRequest, "invalid seat status")
}
domainSeat, err := seat.seatService.UpdateSeatStatus(c.Request().Context(), values.SeatID(seatID), status)
if errors.Is(err, service.ErrNoSeat) || errors.Is(err, service.ErrInvalidSeatStatus) {
return echo.NewHTTPError(http.StatusNotFound, "no seat")
}
if err != nil {
log.Printf("error: failed to patch seat status: %v\n", err)
return echo.NewHTTPError(http.StatusInternalServerError, "failed to patch seat status")
}
var resStatus openapi.SeatStatus
switch domainSeat.Status() {
case values.SeatStatusEmpty:
resStatus = openapi.Empty
case values.SeatStatusInUse:
resStatus = openapi.InUse
default:
log.Printf("error: invalid seat status: %v\n", domainSeat.Status())
return echo.NewHTTPError(http.StatusInternalServerError, "invalid seat status")
}
return c.JSON(http.StatusOK, openapi.Seat{
Id: openapi.SeatID(domainSeat.ID()),
Status: resStatus,
})
}
|
package solutions
func longestPalindrome(s string) int {
cache := make(map[byte]struct{})
for i := range s {
if _, ok := cache[s[i]]; ok {
delete(cache, s[i])
} else {
cache[s[i]] = struct{}{}
}
}
if len(cache) == 0 {
return len(s)
}
return len(s) - len(cache) + 1
}
|
package handlers
import (
"net/http"
"github.com/gorilla/mux"
khttp "github.com/kiali/k-charted/http"
"k8s.io/apimachinery/pkg/api/errors"
"github.com/kiali/kiali/business"
"github.com/kiali/kiali/prometheus"
)
// AppList is the API handler to fetch all the apps to be displayed, related to a single namespace
func AppList(w http.ResponseWriter, r *http.Request) {
params := mux.Vars(r)
// Get business layer
business, err := getBusiness(r)
if err != nil {
RespondWithError(w, http.StatusInternalServerError, "Apps initialization error: "+err.Error())
return
}
namespace := params["namespace"]
// Fetch and build apps
appList, err := business.App.GetAppList(namespace)
if err != nil {
RespondWithError(w, http.StatusInternalServerError, err.Error())
return
}
RespondWithJSON(w, http.StatusOK, appList)
}
// AppDetails is the API handler to fetch all details to be displayed, related to a single app
func AppDetails(w http.ResponseWriter, r *http.Request) {
params := mux.Vars(r)
// Get business layer
business, err := getBusiness(r)
if err != nil {
RespondWithError(w, http.StatusInternalServerError, "Services initialization error: "+err.Error())
return
}
namespace := params["namespace"]
app := params["app"]
// Fetch and build app
appDetails, err := business.App.GetApp(namespace, app)
if err != nil {
if errors.IsNotFound(err) {
RespondWithError(w, http.StatusNotFound, err.Error())
} else {
RespondWithError(w, http.StatusInternalServerError, err.Error())
}
return
}
RespondWithJSON(w, http.StatusOK, appDetails)
}
// AppMetrics is the API handler to fetch metrics to be displayed, related to an app-label grouping
func AppMetrics(w http.ResponseWriter, r *http.Request) {
getAppMetrics(w, r, defaultPromClientSupplier)
}
// getAppMetrics (mock-friendly version)
func getAppMetrics(w http.ResponseWriter, r *http.Request, promSupplier promClientSupplier) {
vars := mux.Vars(r)
namespace := vars["namespace"]
app := vars["app"]
prom, namespaceInfo := initClientsForMetrics(w, r, promSupplier, namespace)
if prom == nil {
// any returned value nil means error & response already written
return
}
params := prometheus.IstioMetricsQuery{Namespace: namespace, App: app}
err := extractIstioMetricsQueryParams(r, ¶ms, namespaceInfo)
if err != nil {
RespondWithError(w, http.StatusBadRequest, err.Error())
return
}
metrics := prom.GetMetrics(¶ms)
RespondWithJSON(w, http.StatusOK, metrics)
}
// CustomDashboard is the API handler to fetch runtime metrics to be displayed, related to a single app
func CustomDashboard(w http.ResponseWriter, r *http.Request) {
cfg := business.DashboardsConfig()
khttp.DashboardHandler(r.URL.Query(), mux.Vars(r), w, cfg)
}
// AppDashboard is the API handler to fetch Istio dashboard, related to a single app
func AppDashboard(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
namespace := vars["namespace"]
app := vars["app"]
prom, namespaceInfo := initClientsForMetrics(w, r, defaultPromClientSupplier, namespace)
if prom == nil {
// any returned value nil means error & response already written
return
}
params := prometheus.IstioMetricsQuery{Namespace: namespace, App: app}
err := extractIstioMetricsQueryParams(r, ¶ms, namespaceInfo)
if err != nil {
RespondWithError(w, http.StatusBadRequest, err.Error())
return
}
svc := business.NewDashboardsService(prom)
dashboard, err := svc.GetIstioDashboard(params)
if err != nil {
RespondWithError(w, http.StatusInternalServerError, err.Error())
return
}
RespondWithJSON(w, http.StatusOK, dashboard)
}
|
package main
import "purelight/kafka2/consumer"
func main() {
//consumer.Exec()
consumer.GroupExec()
}
|
//+build !js
package html
type Event struct{}
type MouseEvent struct{}
|
package main
import (
"fmt"
"math"
"math/cmplx"
)
// ## TIPOS
// bool
// string
// int int8 int16 int32 int64
// uint uint8 uint16 uint32 uint64 uintptr
// byte // pseudônimo para uint8
// rune // pseudônimo para int32 representa um ponto de código Unicode
// float32 float64
// complex64 complex128
// Declaração de variaveis no pacote
var c, python, java bool
var ii, jj int = 1, 2
const Pi = 3.14
var (
ToBe bool = false
MaxInt uint64 = 1<<64 - 1
z complex128 = cmplx.Sqrt(-5 + 12i)
)
func main() {
// Declaração de variaveis em uma função
var i int
fmt.Println(i, c, python, java)
k := 3 // atribução curta dentro de uma função
var c1, python1, java1 = true, false, "no!"
fmt.Println(ii, jj, k, c1, python1, java1)
fmt.Printf("Type: %T Value: %v\n", ToBe, ToBe)
fmt.Printf("Type: %T Value: %v\n", MaxInt, MaxInt)
fmt.Printf("Type: %T Value: %v\n", z, z)
// Convertndo tipos
var f float64 = math.Sqrt(float64(jj*jj + k*k))
var z uint = uint(f)
fmt.Println(f, z)
// Constatnte
fmt.Println(Pi)
}
|
package main
import (
"github.com/barchart/common-go/pkg/logger"
"github.com/barchart/common-go/pkg/parameters"
)
// go run main.go --STAGE=DEV
//
// go run main.go --STAGE=DEV --HOST="some host" --PORT=1234 --DATABASE=database_name --LOCAL=true
//
// STAGE=DEV go run main.go --HOST="some host" --PORT=1234 --DATABASE=database_name --LOCAL=true
var log = logger.Log
func main() {
// Defining parameters
// params.Add() is the alias to params.AddString()
parameters.Add("STAGE", "DEV", "A stage parameter.", true)
parameters.AddString("HOST", "", "A host of database", false)
parameters.AddInt("PORT", 5432, "A port of database", false)
parameters.Add("DATABASE", "", "A name of database", false)
parameters.AddBool("LOCAL", false, "Run application locally", false)
// Parse all parameters. Will get value from flags, environment variables and AWS Secrets Manager.
// parameters.Parse executes flag.Parse() under the hood. DON'T USE flag.parse()
// parameters.Parse() returns map[string]interface{}
myParams := parameters.Parse()
// Work with parameters. Remember all values are interface{}. You should use a type assertion
var local bool
local = myParams["LOCAL"].(bool)
log.Println("Assigns a parameter to a variable")
log.Printf("LOCAL: %v \n", local)
log.Println("Reads all parameter using for ... range:")
log.Println("_______________")
for k, value := range myParams {
log.Printf("%v: %+v", k, value)
}
log.Println("_______________")
}
|
package go_wasm_exec
import (
"errors"
"io"
"os"
"github.com/pgavlin/warp/wasi"
)
type fsObject struct {
Object
fs wasi.FS
files map[int]wasi.File
}
func (fs *fsObject) read(fd int, b []byte, offset int64) (uint32, error) {
f, ok := fs.files[fd]
if !ok {
return 0, errors.New("bad file descriptor")
}
if offset == -1 {
return f.Readv([][]byte{b})
}
return f.Pread([][]byte{b}, offset)
}
func (fs *fsObject) write(fd int, b []byte, offset int64) (uint32, error) {
f, ok := fs.files[fd]
if !ok {
return 0, errors.New("bad file descriptor")
}
if offset == -1 {
return f.Writev([][]byte{b})
}
return f.Pwrite([][]byte{b}, offset)
}
func NewFS(stdin, stdout, stderr wasi.File, fs wasi.FS) Value {
o := &fsObject{
fs: fs,
files: map[int]wasi.File{
0: stdin,
1: stdout,
2: stderr,
},
}
o.Object = NewObject(ObjectClass, map[string]Value{
"constants": ValueOf(map[string]Value{
"O_WRONLY": ValueOf(os.O_WRONLY),
"O_RDWR": ValueOf(os.O_RDWR),
"O_CREAT": ValueOf(os.O_CREATE),
"O_TRUNC": ValueOf(os.O_TRUNC),
"O_APPEND": ValueOf(os.O_APPEND),
"O_EXCL": ValueOf(os.O_EXCL),
}),
"read": ValueOf(func(args []Value) (Value, error) {
fd, start, end := args[0].Int(), args[2].Int(), args[3].Int()
b, _ := args[1].Uint8Array()
cb, _ := args[5].Function()
offset := int64(-1)
if args[4].Type() == TypeNumber && args[4].Int() != -1 {
offset = int64(args[4].Int())
}
n, err := o.read(fd, b[start:end], offset)
if err == io.EOF {
err = nil
}
return cb.Invoke([]Value{Undefined(), ValueOf(err), ValueOf(n), args[1]})
}),
"write": ValueOf(func(args []Value) (Value, error) {
fd, start, end := args[0].Int(), args[2].Int(), args[3].Int()
b, _ := args[1].Uint8Array()
cb, _ := args[5].Function()
offset := int64(-1)
if args[4].Type() == TypeNumber && args[4].Int() != -1 {
offset = int64(args[4].Int())
}
n, err := o.write(fd, b[start:end], offset)
return cb.Invoke([]Value{Undefined(), ValueOf(err), ValueOf(n), args[1]})
}),
})
return ValueOf(o)
}
|
// Copyright 2016 Zhang Peihao <zhangpeihao@gmail.com>
package util
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestBufferPool(t *testing.T) {
buff := GetBuffer()
buff.WriteString("do be do be do")
assert.Equal(t, "do be do be do", buff.String())
PutBuffer(buff)
assert.Equal(t, 0, buff.Len())
}
|
package main
import (
"log"
"net/http"
"github.com/hzy/web/framework"
"github.com/hzy/web/framework/middleware"
)
func main() {
core := framework.NewCore()
core.Use(middleware.Recovery())
core.Use(middleware.Cost())
registerRouter(core)
server := &http.Server{
Addr: ":8080",
Handler: core,
}
if err := server.ListenAndServe(); err != nil {
log.Println(err)
}
}
|
/*******************************************************************************
* Copyright 2017 Samsung Electronics All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*******************************************************************************/
// Package messenger provides abstracted interfaces for HTTP messages,
// including requests and responses.
package messenger
import (
"bytes"
"commons/logger"
"commons/url"
"net/http"
"sort"
"sync"
)
func init() {
sendHttpRequest = httpRequester
httpInterface = useHttp
}
var sendHttpRequest func(method string, urls []string, dataOptional ...string) []httpResponse
// A httpResponse represents an HTTP response received from remote device.
type httpResponse struct {
index int
resp *http.Response
err string
}
type sortRespSlice []httpResponse
type SdamMsgrImpl struct{}
// DeployApp make a url using /api/v1/deploy and send a HTTP(POST) request.
func (SdamMsgrImpl) DeployApp(members []map[string]interface{}, data string) (respCode []int, respBody []string) {
logger.Logging(logger.DEBUG, "IN")
defer logger.Logging(logger.DEBUG, "OUT")
urls := setUrlList(members, url.Deploy())
respList := sendHttpRequest("POST", urls, data)
return changeToReturnValue(respList)
}
// InfoApp make a url using /api/v1/apps/{appId} and send a HTTP(GET) request.
func (SdamMsgrImpl) InfoApp(members []map[string]interface{}, appId string) (respCode []int, respBody []string) {
logger.Logging(logger.DEBUG, "IN")
defer logger.Logging(logger.DEBUG, "OUT")
urls := setUrlList(members, url.Apps(), "/", appId)
respList := sendHttpRequest("GET", urls)
return changeToReturnValue(respList)
}
// DeleteApp make a url using /api/v1/apps/{appId} and send a HTTP(DELETE) request.
func (SdamMsgrImpl) DeleteApp(members []map[string]interface{}, appId string) (respCode []int, respBody []string) {
logger.Logging(logger.DEBUG, "IN")
defer logger.Logging(logger.DEBUG, "OUT")
urls := setUrlList(members, url.Apps(), "/", appId)
respList := sendHttpRequest("DELETE", urls)
return changeToReturnValue(respList)
}
// StartApp make a url using /api/v1/apps/{appId}/start and send a HTTP(POST) request.
func (SdamMsgrImpl) StartApp(members []map[string]interface{}, appId string) (respCode []int, respBody []string) {
logger.Logging(logger.DEBUG, "IN")
defer logger.Logging(logger.DEBUG, "OUT")
urls := setUrlList(members, url.Apps(), "/", appId, url.Start())
respList := sendHttpRequest("POST", urls)
return changeToReturnValue(respList)
}
// StopApp make a url using /api/v1/apps/{appId}/stop and send a HTTP(POST) request.
func (SdamMsgrImpl) StopApp(members []map[string]interface{}, appId string) (respCode []int, respBody []string) {
logger.Logging(logger.DEBUG, "IN")
defer logger.Logging(logger.DEBUG, "OUT")
urls := setUrlList(members, url.Apps(), "/", appId, url.Stop())
respList := sendHttpRequest("POST", urls)
return changeToReturnValue(respList)
}
// UpdateApp make a url using /api/v1/apps/{appId}/update and send a HTTP(POST) request.
func (SdamMsgrImpl) UpdateApp(members []map[string]interface{}, appId string) (respCode []int, respBody []string) {
logger.Logging(logger.DEBUG, "IN")
defer logger.Logging(logger.DEBUG, "OUT")
urls := setUrlList(members, url.Apps(), "/", appId, url.Update())
respList := sendHttpRequest("POST", urls)
return changeToReturnValue(respList)
}
// InfoApps make a url using /api/v1/apps and send a HTTP(GET) request.
func (SdamMsgrImpl) InfoApps(member []map[string]interface{}) (respCode []int, respBody []string) {
logger.Logging(logger.DEBUG, "IN")
defer logger.Logging(logger.DEBUG, "OUT")
urls := setUrlList(member, url.Apps())
respList := sendHttpRequest("GET", urls)
return changeToReturnValue(respList)
}
// UpdateAppInfo make a url using /api/v1/apps/{appId} and send a HTTP(POST) request.
func (SdamMsgrImpl) UpdateAppInfo(member []map[string]interface{}, appId string, data string) (respCode []int, respBody []string) {
logger.Logging(logger.DEBUG, "IN")
defer logger.Logging(logger.DEBUG, "OUT")
urls := setUrlList(member, url.Apps(), "/", appId)
respList := sendHttpRequest("POST", urls, data)
return changeToReturnValue(respList)
}
// Unregister make a url using /api/v1/unregister and send a HTTP(POST) request.
func (SdamMsgrImpl) Unregister(member []map[string]interface{}) (respCode []int, respBody []string) {
logger.Logging(logger.DEBUG, "IN")
defer logger.Logging(logger.DEBUG, "OUT")
urls := setUrlList(member, url.Unregister())
respList := sendHttpRequest("POST", urls)
return changeToReturnValue(respList)
}
// Len returns length of httpResponse.
func (arr sortRespSlice) Len() int {
return len(arr)
}
// Less returns whether the its first argument compares less than the second.
func (arr sortRespSlice) Less(i, j int) bool {
return arr[i].index < arr[j].index
}
// Swap exchange its first argument with the second.
func (arr sortRespSlice) Swap(i, j int) {
arr[i], arr[j] = arr[j], arr[i]
}
// _HTTPInterface is an interface including the function to execute a single HTTP transaction.
type _HTTPInterface interface {
DoWrapper(req *http.Request) (*http.Response, error)
}
type _UseHttp struct{}
var httpInterface _HTTPInterface
var useHttp _UseHttp
// DoWrapper calls Do function of http.DefaultClient to send an HTTP request.
func (useHttp _UseHttp) DoWrapper(req *http.Request) (*http.Response, error) {
return http.DefaultClient.Do(req)
}
// httpRequester make a new request given a method, url, and optional body.
// and send a request to target device.
// A list of httpResponse structure will be returned by this function.
func httpRequester(method string, urls []string, dataOptional ...string) []httpResponse {
var wg sync.WaitGroup
wg.Add(len(urls))
respChannel := make(chan httpResponse, len(urls))
for i := range urls {
go func(idx int) {
logger.Logging(logger.DEBUG, "sending http request:", urls[idx])
var err error
var req *http.Request
var resp httpResponse
resp.index = idx
switch len(dataOptional) {
case 0:
req, err = http.NewRequest(method, urls[idx], bytes.NewBuffer(nil))
case 1:
req, err = http.NewRequest(method, urls[idx], bytes.NewBuffer([]byte(dataOptional[0])))
}
if err != nil {
resp.resp = nil
resp.err = err.Error()
respChannel <- resp
} else {
resp.resp, err = httpInterface.DoWrapper(req)
if err != nil {
resp.err = err.Error()
} else {
resp.err = ""
}
respChannel <- resp
}
defer wg.Done()
}(i)
}
wg.Wait()
var respList []httpResponse
for range urls {
respList = append(respList, <-respChannel)
}
sort.Sort(sortRespSlice(respList))
return respList
}
// changeToReturnValue parses a response code and body from httpResponse structure.
func changeToReturnValue(respList []httpResponse) (respCode []int, respBody []string) {
var buf bytes.Buffer
for i := 0; i < len(respList); i++ {
buf.Reset()
if respList[i].resp == nil {
message := `{"message":"` + respList[i].err + `"}`
respBody = append(respBody, message)
respCode = append(respCode, 500)
} else {
buf.ReadFrom(respList[i].resp.Body)
respBody = append(respBody, buf.String())
respCode = append(respCode, respList[i].resp.StatusCode)
}
}
return respCode, respBody
}
// setUrlList make a list of urls that can be used to send a http request.
func setUrlList(members []map[string]interface{}, api_parts ...string) (urls []string) {
var httpTag string = "http://"
var full_url bytes.Buffer
for i := range members {
full_url.Reset()
full_url.WriteString(httpTag + members[i]["host"].(string) +
":" + members[i]["port"].(string) +
url.Base())
for _, api_part := range api_parts {
full_url.WriteString(api_part)
}
urls = append(urls, full_url.String())
}
return urls
}
|
package main
import "fmt"
/*
6.29日给我通知, 说coding有较为严重的bug,不给通过。。。 fail了,准备了两个月的g就这样解释了。。。
*/
/*
2018.6.28 3:30pm-4:15pm
Please use this Google doc to code during your interview. To free your hands for coding, we recommend that you use a headset or a phone with speaker option.
https://docs.google.com/document/d/1LlYtpwjase1rM4ycE0uVV3NGRUdSyLyRvsALTpS3ewc/edit?dac=1
flxdns.com.
A Weighted Item is a pair of int "id" and float "weight".
Weighted Item Array: An array with no repeated id, and the weights are in descending order.
Input: N Weighted Item array.
Output: A new Weighted Item array.
A: (id: 1, w: 2) (id: 5, w: 1)
B: (id: 3, w: 3) (id: 6, w: 1.5)
output: (id 3, w: 3), (id: 1, w: 2) (id: 6, w: 1.5) (id 5, w: 1)
A: (id: 1, w: 5) (id: 5, w: 4)
B: (id: 5, w: 5) (id: 1, w: 1.5)
output: (id 1, w: 3) (id: 6, w: 1.5) (id 5, w: 1)
O(len(A)+len(B))
Merge sort O(MlogN) N is arrary num, M array length sum
*/
type item struct {
id int
w float32
}
func main() {
mm := mergeN([][]*item{
[]*item{&item{1, 5}, &item{5, 4}},
[]*item{&item{5, 5}, &item{1, 1.5}},
[]*item{&item{2, 5}, &item{3, 1.5}},
})
print(mm)
mm = mergeN(nil)
print(mm)
}
func mergeN(items [][]*item) []*item {
nitem := mergeNR(items)
return nitem[0]
}
func mergeNR(items [][]*item) [][]*item {
if len(items) <= 0 {
return [][]*item{nil}
}
if len(items) <= 1 {
return items
}
nitems := make([][]*item, 0)
if len(items)%2 == 1 {
items = append(items, nil)
}
// 0,1,2,3
for j := 0; j+1 < len(items); j += 2 {
ni := mergeTwo(items[j], items[j+1])
nitems = append(nitems, ni)
}
return mergeNR(nitems)
}
func print(it []*item) {
str := ""
for _, i := range it {
str += fmt.Sprintf("%d,%f;", i.id, i.w)
}
fmt.Println(str)
}
func mergeTwo(items1, items2 []*item) []*item {
// items1,items2 could be nil.
ret := make([]*item, 0)
ids := make(map[int]bool)
i1, i2 := 0, 0
for i1 < len(items1) && i2 < len(items2) {
if items1[i1].id == items2[i2].id {
if ids[items1[i1].id] == false {
if items1[i1].w > items2[i2].w {
ret = append(ret, items1[i1])
} else {
ret = append(ret, items2[i2])
}
ids[items1[i1].id] = true
}
i1, i2 = i1+1, i2+1
} else {
if items1[i1].w > items2[i2].w {
if ids[items1[i1].id] == false {
ret = append(ret, items1[i1])
ids[items1[i1].id] = true
}
i1 += 1
} else {
if ids[items2[i2].id] == false {
ret = append(ret, items2[i2])
ids[items2[i2].id] = true
}
i2 += 1
}
}
}
for i1 < len(items1) {
if ids[items1[i1].id] == false {
ret = append(ret, items1[i1])
ids[items1[i1].id] = true
}
i1 += 1
}
/*
// 错误的代码,循环的出口没看仔细,导致无法走出循环。。。。
// 并且在ids设置i2之前,i2已经变化了。
// i2++ 需要在任何时候都进行。
for i2 < len(items2) {
if ids[items2[i2].id] == false {
ret = append(ret,items2[i2])
i2 += 1
ids[items2[i2].id]=true
}
}
*/
for i2 < len(items2) {
if ids[items2[i2].id] == false {
ret = append(ret, items2[i2])
ids[items2[i2].id] = true
}
i2 += 1
}
return ret
}
|
package profiles
import (
"errors"
"fmt"
"github.com/classmethod/aurl/utils"
ini "github.com/rakyll/goini"
)
var (
Name = "aurl"
Version = "dev"
)
type Profile struct {
Name string
ClientId string
ClientSecret string
AuthorizationEndpoint string
TokenEndpoint string
RedirectURI string
GrantType string
Scope string
Username string
Password string
DefaultContentType string
UserAgent string
}
const (
DEFAULT_CONFIG_FILE = "~/.aurl/profiles"
CLIENT_ID = "client_id"
CLIENT_SECRET = "client_secret"
AUTH_SERVER_AUTH_ENDPOINT = "auth_server_auth_endpoint"
AUTH_SERVER_TOKEN_ENDPOINT = "auth_server_token_endpoint"
REDIRECT = "redirect"
GRANT_TYPE = "grant_type"
SCOPES = "scopes"
USERNAME = "username"
PASSWORD = "password"
DEFAULT_CONTENT_TYPE = "default_content_type"
DEFAULT_USER_AGENT = "default_user_agent"
//SOURCE_PROFILE = "source_profile"
DEFAULT_CLIENT_ID = "aurl"
DEFAULT_CLIENT_SECRET = "aurl"
DEFAULT_GRANT_TYPE = "authorization_code"
DEFAULT_SCOPES = "root"
)
func LoadProfile(profileName string) (Profile, error) {
if dict, err := loadConfig(); err != nil {
return Profile{}, err
} else if p, ok := dict[profileName]; ok {
return Profile{
Name: profileName,
ClientId: getOrDefault(p, CLIENT_ID, DEFAULT_CLIENT_ID),
ClientSecret: getOrDefault(p, CLIENT_SECRET, DEFAULT_CLIENT_SECRET),
AuthorizationEndpoint: getOrDefault(p, AUTH_SERVER_AUTH_ENDPOINT, ""),
TokenEndpoint: getOrDefault(p, AUTH_SERVER_TOKEN_ENDPOINT, ""),
RedirectURI: getOrDefault(p, REDIRECT, ""),
GrantType: getOrDefault(p, GRANT_TYPE, DEFAULT_GRANT_TYPE),
Scope: getOrDefault(p, SCOPES, DEFAULT_SCOPES),
Username: getOrDefault(p, USERNAME, ""),
Password: getOrDefault(p, PASSWORD, ""),
DefaultContentType: getOrDefault(p, DEFAULT_CONTENT_TYPE, ""),
UserAgent: getOrDefault(p, DEFAULT_USER_AGENT, fmt.Sprintf("%s-%s", Name, Version)),
}, nil
} else {
return Profile{}, errors.New("Unknown profile: " + profileName)
}
}
func loadConfig() (map[string]map[string]string, error) {
return ini.Load(utils.ExpandPath(DEFAULT_CONFIG_FILE))
}
func (p Profile) String() string {
return fmt.Sprintf("{name:%s, clientId:%s, authEndpoint:%s, tokendEndpoint:%s, redirect:%s, grantType:%s, scooe:%s}",
p.Name, p.ClientId, p.AuthorizationEndpoint, p.TokenEndpoint, p.RedirectURI, p.GrantType, p.Scope)
}
func getOrDefault(dict map[string]string, key string, defaultValue string) string {
if v, ok := dict[key]; ok {
return v
}
return defaultValue
}
|
package password
import (
passwordResetModel "go_simpleweibo/app/models/password_reset"
userModel "go_simpleweibo/app/models/user"
"go_simpleweibo/app/requests"
)
// PassWordResetForm -
type PassWordResetForm struct {
Email string
Token string
Password string
PasswordConfirmation string
}
func (p *PassWordResetForm) tokenExistValidator() requests.ValidatorFunc {
return func() (msg string) {
if m, err := passwordResetModel.GetByToken(p.Token); err == nil {
p.Email = m.Email
return ""
}
return "该 token 不存在"
}
}
// Validate : 验证函数
func (p *PassWordResetForm) Validate() (errors []string) {
errors = requests.RunValidators(
requests.ValidatorMap{
"password": {
requests.RequiredValidator(p.Password),
requests.MixLengthValidator(p.Password, 6),
requests.EqualValidator(p.Password, p.PasswordConfirmation),
},
"token": {
requests.RequiredValidator(p.Token),
p.tokenExistValidator(),
},
},
requests.ValidatorMsgArr{
"password": {
"密码不能为空",
"密码长度不能小于 6 个字符",
"两次输入的密码不一致",
},
"token": {
"token 不能为空",
"该 token 不存在",
},
},
)
return errors
}
// ValidateAndUpdateUser 验证参数并且创建验证 pwd 的 token
func (p *PassWordResetForm) ValidateAndUpdateUser() (user *userModel.User, errors []string) {
errors = p.Validate()
if len(errors) != 0 {
return nil, errors
}
// 验证成功,删除 token
if err := passwordResetModel.DeleteByToken(p.Token); err != nil {
errors = append(errors, "重置密码失败: "+err.Error())
return nil, errors
}
// 更新用户密码
user, err := userModel.GetByEmail(p.Email)
if err != nil {
errors = append(errors, "重置密码失败: "+err.Error())
return nil, errors
}
user.Password = p.Password
if err = user.Update(true); err != nil {
errors = append(errors, "重置密码失败: "+err.Error())
return nil, errors
}
return user, []string{}
}
|
package discovery
import (
"encoding/json"
"fmt"
"log"
"sync"
"time"
"github.com/coreos/etcd/client"
"golang.org/x/net/context"
)
type Master struct {
members map[string]*Member
KeysAPI client.KeysAPI
}
// Member is a client machine
type Member struct {
InGroup bool
IP string
Name string
}
func NewMaster(endpoints []string) *Master {
cfg := client.Config{
Endpoints: endpoints,
Transport: client.DefaultTransport,
HeaderTimeoutPerRequest: time.Second,
}
etcdClient, err := client.New(cfg)
if err != nil {
log.Fatal("Error: cannot connec to etcd:", err)
}
master := &Master{
members: make(map[string]*Member),
KeysAPI: client.NewKeysAPI(etcdClient),
}
//go master.WatchWorkers()
return master
}
func (m *Master) AddWorker(info *WorkerInfo) {
member := &Member{
InGroup: true,
IP: info.IP,
Name: info.Name,
}
m.members[member.Name] = member
}
func (m *Master) UpdateWorker(info *WorkerInfo) {
member := m.members[info.Name]
member.InGroup = true
}
func NodeToWorkerInfo(node *client.Node) *WorkerInfo {
info := &WorkerInfo{}
err := json.Unmarshal([]byte(node.Value), info)
if err != nil {
log.Print(err)
}
fmt.Println(node)
return info
}
func NodeToKV(node *client.Node) *WorkerInfo {
if node!=nil{
return &WorkerInfo{node.Key,node.Value}
}else{
return nil
}
}
func (m *Master) WatchWorkers() {
api := m.KeysAPI
watcher := api.Watcher("peers/", &client.WatcherOptions{
Recursive: true,
})
for {
res, err := watcher.Next(context.Background())
if err != nil {
log.Println("Error watch workers:", err)
break
}
if res.Action == "expire" {
info := NodeToWorkerInfo(res.PrevNode)
log.Println("Expire worker ", info.Name)
member, ok := m.members[info.Name]
if ok {
member.InGroup = false
}
//删除map中的value
} else if res.Action == "set" {
log.Println(res)
info := NodeToWorkerInfo(res.Node)
if _, ok := m.members[info.Name]; ok {
log.Println("Update worker ", info.Name)
m.UpdateWorker(info)
//update
} else {
log.Println("Add worker ", info.Name)
m.AddWorker(info)
//add
}
} else if res.Action == "delete" {
info := NodeToWorkerInfo(res.Node)
log.Println("Delete worker ", info.Name)
delete(m.members, info.Name)
}
}
}
func (m *Master) WatchPeers(key string,mp* sync.Map){
api := m.KeysAPI
//初始化
res,err:=api.Get(context.Background(),key,nil)
if res==nil||err!=nil{
log.Println("watch peers err",err)
}else{
for _,val:=range res.Node.Nodes{
mp.Store(val.Key,val.Value)
log.Print(val.Value," ",)
}
log.Println()
}
watcher := api.Watcher(key, &client.WatcherOptions{
Recursive: true,
})
//test
go func(){
for false{
fmt.Println("peers")
mp.Range(
func(a,b interface{})bool{
fmt.Println(a,b)
return true
})
time.Sleep(time.Second*3)
}
}()
//监听
for {
res, err := watcher.Next(context.Background())
if err != nil {
log.Println("Error watch peers:", err)
break
}
if res.Action == "expire" {
//expire /peers/47.112.33.105 47.112.33.105:11000 {Key: /peers/47.112.33.105, CreatedIndex: 4525, ModifiedIndex: 4538, TTL: 0}
//fmt.Println("expire",res.PrevNode.Key,res.PrevNode.Value,res.PrevNode)
mp.Delete(res.PrevNode.Key)
//info := NodeToWorkerInfo(res.PrevNode)
info := NodeToKV(res.PrevNode)
if info==nil{
log.Println("no key and value")
continue
}
log.Println("Expired peer:",info.Name,info.IP)
member, ok := m.members[info.Name]
if ok {
member.InGroup = false
}
mp.Delete(info.Name+"@"+info.IP)
//删除map中的value
} else if res.Action == "set" {
//{Key: /peers/0.0.0.0, CreatedIndex: 4540, ModifiedIndex: 4540, TTL: 60} /peers/0.0.0.0 0.0.0.0:11000
//fmt.Println(res.Node,res.Node.Key,res.Node.Value)
//info := NodeToWorkerInfo(res.Node)
info := NodeToKV(res.Node)
if info==nil{
log.Println("no key and value")
}
if _, ok := m.members[info.Name]; ok {
//update
m.UpdateWorker(info)
} else {
//add
log.Println("Discover peer:", info.Name,info.IP)
m.AddWorker(info)
}
mp.Store(info.Name+"@"+info.IP,info.IP)
//mp.Store(info.Name+"@"+info.IP,info.IP)
} else if res.Action == "delete" {
//fmt.Println(res.Node,res.Node.Key,res.Node.Value)
//info := NodeToWorkerInfo(res.Node)
info := NodeToKV(res.Node)
if info==nil{
log.Println("no key and value")
continue
}
log.Println("Delete peer:", info.Name,info.IP)
//删除掉成员
delete(m.members, info.Name)
mp.Delete(info.Name+"@"+info.IP)
}else{
//其他信息
log.Println("other info",res.Action,res)
}
}
}
|
package main
import (
"github.com/DuongVu089x/golang-heroku/action"
"github.com/DuongVu089x/golang-heroku/config"
"github.com/labstack/echo"
"gopkg.in/telegram-bot-api.v4"
"log"
"net/http"
"os"
)
var bot *tgbotapi.BotAPI
func main() {
port := os.Getenv("PORT")
config.Init()
userChanel := make(map[int64]*chan struct{})
config.UserChanel = &userChanel
if port == "" {
log.Fatal("$PORT must be set")
}
// telegram
initTelegram()
if bot == nil {
return
}
e := echo.New()
e.GET("/", func(c echo.Context) error {
return c.String(http.StatusOK, "Hello, World!")
})
e.POST("/" + bot.Token, action.WebhookHandler)
e.Logger.Fatal(e.Start(":"+port))
}
func initTelegram() {
var err error
bot, err = tgbotapi.NewBotAPI(config.Config.Key["bot-token"])
if err != nil {
log.Println(err)
return
}
url := config.Config.OutboundURL["base-url"] + bot.Token
_, err = bot.SetWebhook(tgbotapi.NewWebhook(url))
if err != nil {
log.Println(err)
}
config.Bot = bot
}
|
package main
import "fmt"
import "math"
// const 用于声明一个常量
const s string = "constant"
func main() {
fmt.Println(s)
// const 语句可以出现在任何var 语句可以出现的地方
const n = 50000000
//常数表达式可以执行任意精度的运算
const d = 3e20 / n
fmt.Println(d)
//数值型常量是没有确定的类型的,直到它们被给定了一个类型,比如说一次显示的类型转化。
fmt.Println(int64(d))
//当上下文需要时,一个数可以被给定一个类型,比如变量复制或者函数调用。
//举个例子,这里的math.Sin
//函数需要一个float64的参数。
fmt.Println(math.Sin(n))
}
|
package logfmt_test
import (
"errors"
"fmt"
"os"
"strings"
"time"
"github.com/go-logfmt/logfmt"
)
func ExampleEncoder() {
check := func(err error) {
if err != nil {
panic(err)
}
}
e := logfmt.NewEncoder(os.Stdout)
check(e.EncodeKeyval("id", 1))
check(e.EncodeKeyval("dur", time.Second+time.Millisecond))
check(e.EndRecord())
check(e.EncodeKeyval("id", 1))
check(e.EncodeKeyval("path", "/path/to/file"))
check(e.EncodeKeyval("err", errors.New("file not found")))
check(e.EndRecord())
// Output:
// id=1 dur=1.001s
// id=1 path=/path/to/file err="file not found"
}
func ExampleDecoder() {
in := `
id=1 dur=1.001s
id=1 path=/path/to/file err="file not found"
`
d := logfmt.NewDecoder(strings.NewReader(in))
for d.ScanRecord() {
for d.ScanKeyval() {
fmt.Printf("k: %s v: %s\n", d.Key(), d.Value())
}
fmt.Println()
}
if d.Err() != nil {
panic(d.Err())
}
// Output:
// k: id v: 1
// k: dur v: 1.001s
//
// k: id v: 1
// k: path v: /path/to/file
// k: err v: file not found
}
|
package apiControllers
import (
"github.com/gin-gonic/gin"
"github.com/PROJECTS/user_application_go/dbConfig"
"github.com/PROJECTS/user_application_go/libs"
"github.com/PROJECTS/user_application_go/models"
"log"
"net/http"
"time"
)
func LoginUser(c *gin.Context) {
var loginUser models.Users
var form = struct {
Email string `json:"email"`
Password string `json:"password"`
Hash string `json:"-"` // hash diye bir value json'dan gelmiyor.
}{}
err := c.BindJSON(&form)
if err != nil {
c.JSON(http.StatusBadRequest, "İşlem yaparken bir hata oluştu. Hata: "+err.Error())
return
}
form.Hash = libs.GetMD5Hash(form.Password)
if form.Email == "" {
c.JSON(http.StatusUnauthorized, "Kullanıcı adı girilmemiş")
return
}
if form.Password == "" {
c.JSON(http.StatusUnauthorized, "Şifre girilmemiş")
return
}
dbConfig.DB.Where("email = ? and hash = ?", form.Email, form.Hash).Find(&loginUser)
if loginUser.ID == 0 {
c.JSON(http.StatusUnauthorized, "Kullanıcı adı veya şifre hatalı")
return
} else {
strToken, err := libs.CreateJWT(loginUser)
if err == nil {
timeVal, _ := time.Parse("2006-01-02T15:04:05.000Z", libs.ParseToken(strToken, "expd").(string))
c.JSON(http.StatusOK,
models.LoginResp{
TokenVal: strToken,
Expire: timeVal,
Email: loginUser.Email,
},
)
return
} else {
log.Println(err)
c.JSON(http.StatusBadRequest, "Şifre oluşturma servisinde bir sorun var.")
}
return
}
}
|
package upgrade
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"strings"
"time"
gversion "github.com/mcuadros/go-version"
"github.com/sirupsen/logrus"
"github.com/harvester/harvester/pkg/settings"
)
const (
syncInterval = time.Hour
)
type CheckUpgradeRequest struct {
HarvesterVersion string `json:"harvesterVersion"`
}
type CheckUpgradeResponse struct {
Versions []Version `json:"versions"`
}
type Version struct {
Name string `json:"name"` // must be in semantic versioning
ReleaseDate string `json:"releaseDate"`
MinUpgradableVersion string `json:"minUpgradableVersion,omitempty"`
Tags []string `json:"tags"`
}
type versionSyncer struct {
ctx context.Context
httpClient *http.Client
}
func newVersionSyncer(ctx context.Context) *versionSyncer {
return &versionSyncer{
ctx: ctx,
httpClient: &http.Client{
Timeout: 30 * time.Second,
},
}
}
func (s *versionSyncer) start() {
ticker := time.NewTicker(syncInterval)
for {
select {
case <-ticker.C:
if err := s.sync(); err != nil {
logrus.Warnf("failed syncing upgrade versions: %v", err)
}
case <-s.ctx.Done():
ticker.Stop()
return
}
}
}
func (s *versionSyncer) sync() error {
upgradeCheckerEnabled := settings.UpgradeCheckerEnabled.Get()
upgradeCheckerURL := settings.UpgradeCheckerURL.Get()
if upgradeCheckerEnabled != "true" || upgradeCheckerURL == "" || !settings.IsRelease() {
return nil
}
req := &CheckUpgradeRequest{
HarvesterVersion: settings.ServerVersion.Get(),
}
var requestBody bytes.Buffer
if err := json.NewEncoder(&requestBody).Encode(req); err != nil {
return err
}
resp, err := s.httpClient.Post(upgradeCheckerURL, "application/json", &requestBody)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("expected 200 response but got %d checking upgrades", resp.StatusCode)
}
var checkResp CheckUpgradeResponse
if err := json.NewDecoder(resp.Body).Decode(&checkResp); err != nil {
return err
}
current := settings.ServerVersion.Get()
versions, err := getUpgradableVersions(checkResp, current)
if err != nil {
return err
}
return settings.UpgradableVersions.Set(versions)
}
func getUpgradableVersions(resp CheckUpgradeResponse, currentVersion string) (string, error) {
var upgradableVersions []string
for _, v := range resp.Versions {
if gversion.Compare(currentVersion, v.Name, "<") && gversion.Compare(currentVersion, v.MinUpgradableVersion, ">=") {
upgradableVersions = append(upgradableVersions, v.Name)
}
}
return strings.Join(upgradableVersions, ","), nil
}
|
package ante_test
import (
"math/big"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/tharsis/ethermint/app/ante"
"github.com/tharsis/ethermint/tests"
evmtypes "github.com/tharsis/ethermint/x/evm/types"
"github.com/ethereum/go-ethereum/common"
ethtypes "github.com/ethereum/go-ethereum/core/types"
)
func nextFn(ctx sdk.Context, _ sdk.Tx, _ bool) (sdk.Context, error) {
return ctx, nil
}
func (suite AnteTestSuite) TestEthSigVerificationDecorator() {
dec := ante.NewEthSigVerificationDecorator(suite.app.EvmKeeper)
addr, privKey := tests.NewAddrKey()
signedTx := evmtypes.NewTxContract(suite.app.EvmKeeper.ChainID(), 1, big.NewInt(10), 1000, big.NewInt(1), nil, nil, nil, nil)
signedTx.From = addr.Hex()
err := signedTx.Sign(suite.ethSigner, tests.NewSigner(privKey))
suite.Require().NoError(err)
testCases := []struct {
name string
tx sdk.Tx
reCheckTx bool
expPass bool
}{
{"ReCheckTx", nil, true, false},
{"invalid transaction type", &invalidTx{}, false, false},
{
"invalid sender",
evmtypes.NewTx(suite.app.EvmKeeper.ChainID(), 1, &addr, big.NewInt(10), 1000, big.NewInt(1), nil, nil, nil, nil),
false,
false,
},
{"successful signature verification", signedTx, false, true},
}
for _, tc := range testCases {
suite.Run(tc.name, func() {
_, err := dec.AnteHandle(suite.ctx.WithIsReCheckTx(tc.reCheckTx), tc.tx, false, nextFn)
if tc.expPass {
suite.Require().NoError(err)
} else {
suite.Require().Error(err)
}
})
}
}
func (suite AnteTestSuite) TestNewEthAccountVerificationDecorator() {
dec := ante.NewEthAccountVerificationDecorator(
suite.app.AccountKeeper, suite.app.BankKeeper, suite.app.EvmKeeper,
)
addr := tests.GenerateAddress()
tx := evmtypes.NewTxContract(suite.app.EvmKeeper.ChainID(), 1, big.NewInt(10), 1000, big.NewInt(1), nil, nil, nil, nil)
tx.From = addr.Hex()
testCases := []struct {
name string
tx sdk.Tx
malleate func()
checkTx bool
expPass bool
}{
{"not CheckTx", nil, func() {}, false, true},
{"invalid transaction type", &invalidTx{}, func() {}, true, false},
{
"sender not set to msg",
evmtypes.NewTxContract(suite.app.EvmKeeper.ChainID(), 1, big.NewInt(10), 1000, big.NewInt(1), nil, nil, nil, nil),
func() {},
true,
false,
},
{
"sender not EOA",
tx,
func() {
// set not as an EOA
suite.app.EvmKeeper.SetCode(addr, []byte("1"))
},
true,
false,
},
{
"not enough balance to cover tx cost",
tx,
func() {
// reset back to EOA
suite.app.EvmKeeper.SetCode(addr, nil)
},
true,
false,
},
{
"success new account",
tx,
func() {
suite.app.EvmKeeper.AddBalance(addr, big.NewInt(1000000))
},
true,
true,
},
{
"success existing account",
tx,
func() {
acc := suite.app.AccountKeeper.NewAccountWithAddress(suite.ctx, addr.Bytes())
suite.app.AccountKeeper.SetAccount(suite.ctx, acc)
suite.app.EvmKeeper.AddBalance(addr, big.NewInt(1000000))
},
true,
true,
},
}
for _, tc := range testCases {
suite.Run(tc.name, func() {
tc.malleate()
_, err := dec.AnteHandle(suite.ctx.WithIsCheckTx(tc.checkTx), tc.tx, false, nextFn)
if tc.expPass {
suite.Require().NoError(err)
} else {
suite.Require().Error(err)
}
})
}
}
func (suite AnteTestSuite) TestEthNonceVerificationDecorator() {
dec := ante.NewEthNonceVerificationDecorator(suite.app.AccountKeeper)
addr := tests.GenerateAddress()
tx := evmtypes.NewTxContract(suite.app.EvmKeeper.ChainID(), 1, big.NewInt(10), 1000, big.NewInt(1), nil, nil, nil, nil)
tx.From = addr.Hex()
testCases := []struct {
name string
tx sdk.Tx
malleate func()
reCheckTx bool
expPass bool
}{
{"ReCheckTx", nil, func() {}, true, true},
{"invalid transaction type", &invalidTx{}, func() {}, false, false},
{"sender account not found", tx, func() {}, false, false},
{
"sender nonce missmatch",
tx,
func() {
acc := suite.app.AccountKeeper.NewAccountWithAddress(suite.ctx, addr.Bytes())
suite.app.AccountKeeper.SetAccount(suite.ctx, acc)
},
false,
false,
},
{
"success",
tx,
func() {
acc := suite.app.AccountKeeper.NewAccountWithAddress(suite.ctx, addr.Bytes())
suite.Require().NoError(acc.SetSequence(1))
suite.app.AccountKeeper.SetAccount(suite.ctx, acc)
},
false,
true,
},
}
for _, tc := range testCases {
suite.Run(tc.name, func() {
tc.malleate()
_, err := dec.AnteHandle(suite.ctx.WithIsReCheckTx(tc.reCheckTx), tc.tx, false, nextFn)
if tc.expPass {
suite.Require().NoError(err)
} else {
suite.Require().Error(err)
}
})
}
}
func (suite AnteTestSuite) TestEthGasConsumeDecorator() {
dec := ante.NewEthGasConsumeDecorator(suite.app.EvmKeeper)
addr := tests.GenerateAddress()
tx := evmtypes.NewTxContract(suite.app.EvmKeeper.ChainID(), 1, big.NewInt(10), 1000, big.NewInt(1), nil, nil, nil, nil)
tx.From = addr.Hex()
tx2 := evmtypes.NewTxContract(suite.app.EvmKeeper.ChainID(), 1, big.NewInt(10), 1000000, big.NewInt(1), nil, nil, nil, ðtypes.AccessList{{Address: addr, StorageKeys: nil}})
tx2.From = addr.Hex()
testCases := []struct {
name string
tx sdk.Tx
malleate func()
expPass bool
expPanic bool
}{
{"invalid transaction type", &invalidTx{}, func() {}, false, false},
{
"sender not found",
evmtypes.NewTxContract(suite.app.EvmKeeper.ChainID(), 1, big.NewInt(10), 1000, big.NewInt(1), nil, nil, nil, nil),
func() {},
false, false,
},
{
"gas limit too low",
tx,
func() {
acc := suite.app.AccountKeeper.NewAccountWithAddress(suite.ctx, addr.Bytes())
suite.app.AccountKeeper.SetAccount(suite.ctx, acc)
},
false, false,
},
{
"not enough balance for fees",
tx2,
func() {
acc := suite.app.AccountKeeper.NewAccountWithAddress(suite.ctx, addr.Bytes())
suite.app.AccountKeeper.SetAccount(suite.ctx, acc)
},
false, false,
},
{
"not enough tx gas",
tx2,
func() {
acc := suite.app.AccountKeeper.NewAccountWithAddress(suite.ctx, addr.Bytes())
suite.app.AccountKeeper.SetAccount(suite.ctx, acc)
suite.app.EvmKeeper.AddBalance(addr, big.NewInt(1000000))
},
false, true,
},
{
"not enough block gas",
tx2,
func() {
acc := suite.app.AccountKeeper.NewAccountWithAddress(suite.ctx, addr.Bytes())
suite.app.AccountKeeper.SetAccount(suite.ctx, acc)
suite.app.EvmKeeper.AddBalance(addr, big.NewInt(1000000))
suite.ctx = suite.ctx.WithBlockGasMeter(sdk.NewGasMeter(1))
},
false, true,
},
{
"success",
tx2,
func() {
acc := suite.app.AccountKeeper.NewAccountWithAddress(suite.ctx, addr.Bytes())
suite.app.AccountKeeper.SetAccount(suite.ctx, acc)
suite.app.EvmKeeper.AddBalance(addr, big.NewInt(1000000))
suite.ctx = suite.ctx.WithBlockGasMeter(sdk.NewGasMeter(10000000000000000000))
},
true, false,
},
}
for _, tc := range testCases {
suite.Run(tc.name, func() {
tc.malleate()
if tc.expPanic {
suite.Require().Panics(func() {
_, _ = dec.AnteHandle(suite.ctx.WithIsCheckTx(true).WithGasMeter(sdk.NewGasMeter(1)), tc.tx, false, nextFn)
})
return
}
_, err := dec.AnteHandle(suite.ctx.WithIsCheckTx(true), tc.tx, false, nextFn)
if tc.expPass {
suite.Require().NoError(err)
} else {
suite.Require().Error(err)
}
})
}
}
func (suite AnteTestSuite) TestCanTransferDecorator() {
dec := ante.NewCanTransferDecorator(suite.app.EvmKeeper, suite.app.FeeMarketKeeper)
addr, privKey := tests.NewAddrKey()
tx := evmtypes.NewTxContract(suite.app.EvmKeeper.ChainID(), 1, big.NewInt(10), 1000, big.NewInt(1), nil, nil, nil, ðtypes.AccessList{})
tx2 := evmtypes.NewTxContract(suite.app.EvmKeeper.ChainID(), 1, big.NewInt(10), 1000, big.NewInt(1), nil, nil, nil, ðtypes.AccessList{})
tx.From = addr.Hex()
err := tx.Sign(suite.ethSigner, tests.NewSigner(privKey))
suite.Require().NoError(err)
testCases := []struct {
name string
tx sdk.Tx
malleate func()
expPass bool
}{
{"invalid transaction type", &invalidTx{}, func() {}, false},
{"AsMessage failed", tx2, func() {}, false},
{
"evm CanTransfer failed",
tx,
func() {
acc := suite.app.AccountKeeper.NewAccountWithAddress(suite.ctx, addr.Bytes())
suite.app.AccountKeeper.SetAccount(suite.ctx, acc)
},
false,
},
{
"success",
tx,
func() {
acc := suite.app.AccountKeeper.NewAccountWithAddress(suite.ctx, addr.Bytes())
suite.app.AccountKeeper.SetAccount(suite.ctx, acc)
suite.app.EvmKeeper.AddBalance(addr, big.NewInt(1000000))
},
true,
},
}
for _, tc := range testCases {
suite.Run(tc.name, func() {
tc.malleate()
_, err := dec.AnteHandle(suite.ctx.WithIsCheckTx(true), tc.tx, false, nextFn)
if tc.expPass {
suite.Require().NoError(err)
} else {
suite.Require().Error(err)
}
})
}
}
func (suite AnteTestSuite) TestAccessListDecorator() {
dec := ante.NewAccessListDecorator(suite.app.EvmKeeper)
addr := tests.GenerateAddress()
al := ðtypes.AccessList{
{Address: addr, StorageKeys: []common.Hash{{}}},
}
tx := evmtypes.NewTxContract(suite.app.EvmKeeper.ChainID(), 1, big.NewInt(10), 1000, big.NewInt(1), nil, nil, nil, nil)
tx2 := evmtypes.NewTxContract(suite.app.EvmKeeper.ChainID(), 1, big.NewInt(10), 1000, big.NewInt(1), nil, nil, nil, al)
tx.From = addr.Hex()
tx2.From = addr.Hex()
testCases := []struct {
name string
tx sdk.Tx
malleate func()
expPass bool
}{
{"invalid transaction type", &invalidTx{}, func() {}, false},
{
"success - no access list",
tx,
func() {
acc := suite.app.AccountKeeper.NewAccountWithAddress(suite.ctx, addr.Bytes())
suite.app.AccountKeeper.SetAccount(suite.ctx, acc)
suite.app.EvmKeeper.AddBalance(addr, big.NewInt(1000000))
},
true,
},
{
"success - with access list",
tx2,
func() {
acc := suite.app.AccountKeeper.NewAccountWithAddress(suite.ctx, addr.Bytes())
suite.app.AccountKeeper.SetAccount(suite.ctx, acc)
suite.app.EvmKeeper.AddBalance(addr, big.NewInt(1000000))
},
true,
},
}
for _, tc := range testCases {
suite.Run(tc.name, func() {
tc.malleate()
_, err := dec.AnteHandle(suite.ctx.WithIsCheckTx(true), tc.tx, false, nextFn)
if tc.expPass {
suite.Require().NoError(err)
} else {
suite.Require().Error(err)
}
})
}
}
func (suite AnteTestSuite) TestEthIncrementSenderSequenceDecorator() {
dec := ante.NewEthIncrementSenderSequenceDecorator(suite.app.AccountKeeper)
addr, privKey := tests.NewAddrKey()
contract := evmtypes.NewTxContract(suite.app.EvmKeeper.ChainID(), 0, big.NewInt(10), 1000, big.NewInt(1), nil, nil, nil, nil)
contract.From = addr.Hex()
to := tests.GenerateAddress()
tx := evmtypes.NewTx(suite.app.EvmKeeper.ChainID(), 0, &to, big.NewInt(10), 1000, big.NewInt(1), nil, nil, nil, nil)
tx.From = addr.Hex()
err := contract.Sign(suite.ethSigner, tests.NewSigner(privKey))
suite.Require().NoError(err)
err = tx.Sign(suite.ethSigner, tests.NewSigner(privKey))
suite.Require().NoError(err)
testCases := []struct {
name string
tx sdk.Tx
malleate func()
expPass bool
expPanic bool
}{
{
"invalid transaction type",
&invalidTx{},
func() {},
false, false,
},
{
"no signers",
evmtypes.NewTx(suite.app.EvmKeeper.ChainID(), 1, &to, big.NewInt(10), 1000, big.NewInt(1), nil, nil, nil, nil),
func() {},
false, true,
},
{
"account not set to store",
tx,
func() {},
false, false,
},
{
"success - create contract",
contract,
func() {
acc := suite.app.AccountKeeper.NewAccountWithAddress(suite.ctx, addr.Bytes())
suite.app.AccountKeeper.SetAccount(suite.ctx, acc)
},
true, false,
},
{
"success - call",
tx,
func() {},
true, false,
},
}
for _, tc := range testCases {
suite.Run(tc.name, func() {
tc.malleate()
if tc.expPanic {
suite.Require().Panics(func() {
_, _ = dec.AnteHandle(suite.ctx, tc.tx, false, nextFn)
})
return
}
_, err := dec.AnteHandle(suite.ctx, tc.tx, false, nextFn)
if tc.expPass {
suite.Require().NoError(err)
msg := tc.tx.(*evmtypes.MsgEthereumTx)
txData, err := evmtypes.UnpackTxData(msg.Data)
suite.Require().NoError(err)
nonce := suite.app.EvmKeeper.GetNonce(addr)
if txData.GetTo() == nil {
suite.Require().Equal(txData.GetNonce(), nonce)
} else {
suite.Require().Equal(txData.GetNonce()+1, nonce)
}
} else {
suite.Require().Error(err)
}
})
}
}
func (suite AnteTestSuite) TestEthSetupContextDecorator() {
dec := ante.NewEthSetUpContextDecorator()
tx := evmtypes.NewTxContract(suite.app.EvmKeeper.ChainID(), 1, big.NewInt(10), 1000, big.NewInt(1), nil, nil, nil, nil)
testCases := []struct {
name string
tx sdk.Tx
expPass bool
}{
{"invalid transaction type - does not implement GasTx", &invalidTx{}, false},
{
"success - transaction implement GasTx",
tx,
true,
},
}
for _, tc := range testCases {
suite.Run(tc.name, func() {
_, err := dec.AnteHandle(suite.ctx, tc.tx, false, nextFn)
if tc.expPass {
suite.Require().NoError(err)
} else {
suite.Require().Error(err)
}
})
}
}
|
//go:generate go run generator.go
package box
import (
"sort"
)
type Box interface{
// Add a file content to box
Add(file string, content []byte)
// Get a file from box
Get(file string) []byte
// Has a file in box
Has(file string) bool
// List files in box
List() []string
}
type embedBox struct {
storage map[string][]byte
}
// Embed box expose
var Boxed Box = newEmbedBox()
// Create new box for embed files
func newEmbedBox() *embedBox {
return &embedBox{storage: make(map[string][]byte)}
}
// Add a file to box
func (e *embedBox) Add(file string, content []byte) {
e.storage[file] = content
}
// Get file's content
// Always use / for looking up
// For example: /init/README.md is actually configs/init/README.md
func (e *embedBox) Get(file string) []byte {
if f, ok := e.storage[file]; ok {
return f
}
return nil
}
// Find for a file
func (e *embedBox) Has(file string) bool {
if _, ok := e.storage[file]; ok {
return true
}
return false
}
func (e *embedBox) List() []string {
filenames := make([]string, len(e.storage))
i:=0
for key, _ := range e.storage {
filenames[i] = key
i++
}
sort.Strings(filenames)
return filenames
}
|
// Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package kvserver
import (
"math"
"time"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
)
const (
replStatsRotateInterval = 5 * time.Minute
decayFactor = 0.8
// MinStatsDuration defines a lower bound on how long users of replica stats
// should wait before using those stats for anything. If the duration of a
// measurement has been less than MinStatsDuration, these methods could easily
// return outlier/anomalous data.
MinStatsDuration = 5 * time.Second
)
type localityOracle func(roachpb.NodeID) string
// perLocalityCounts maps from the string representation of a locality to count.
type perLocalityCounts map[string]float64
// replicaStats maintains statistics about the work done by a replica. Its
// initial use is tracking the number of requests received from each
// cluster locality in order to inform lease transfer decisions.
type replicaStats struct {
clock *hlc.Clock
getNodeLocality localityOracle
// We use a set of time windows in order to age out old stats without having
// to do hard resets. The `requests` array is a circular buffer of the last
// N windows of stats. We rotate through the circular buffer every so often
// as determined by `replStatsRotateInterval`.
//
// We could alternatively use a forward decay approach here, but it would
// require more memory than this slightly less precise windowing method:
// http://dimacs.rutgers.edu/~graham/pubs/papers/fwddecay.pdf
mu struct {
syncutil.Mutex
idx int
requests [6]perLocalityCounts
lastRotate time.Time
lastReset time.Time
}
}
func newReplicaStats(clock *hlc.Clock, getNodeLocality localityOracle) *replicaStats {
rs := &replicaStats{
clock: clock,
getNodeLocality: getNodeLocality,
}
rs.mu.requests[rs.mu.idx] = make(perLocalityCounts)
rs.mu.lastRotate = timeutil.Unix(0, rs.clock.PhysicalNow())
rs.mu.lastReset = rs.mu.lastRotate
return rs
}
// splitRequestCounts divides the current replicaStats object in two for the
// purposes of splitting a range. It modifies itself to have half its requests
// and the provided other to have the other half.
//
// Note that assuming a 50/50 split is optimistic, but it's much better than
// resetting both sides upon a split.
// TODO(a-robinson): Write test for this.
func (rs *replicaStats) splitRequestCounts(other *replicaStats) {
other.mu.Lock()
defer other.mu.Unlock()
rs.mu.Lock()
defer rs.mu.Unlock()
other.mu.idx = rs.mu.idx
other.mu.lastRotate = rs.mu.lastRotate
other.mu.lastReset = rs.mu.lastReset
for i := range rs.mu.requests {
if rs.mu.requests[i] == nil {
other.mu.requests[i] = nil
continue
}
other.mu.requests[i] = make(perLocalityCounts)
for k := range rs.mu.requests[i] {
newVal := rs.mu.requests[i][k] / 2.0
rs.mu.requests[i][k] = newVal
other.mu.requests[i][k] = newVal
}
}
}
func (rs *replicaStats) record(nodeID roachpb.NodeID) {
rs.recordCount(1, nodeID)
}
func (rs *replicaStats) recordCount(count float64, nodeID roachpb.NodeID) {
var locality string
if rs.getNodeLocality != nil {
locality = rs.getNodeLocality(nodeID)
}
now := timeutil.Unix(0, rs.clock.PhysicalNow())
rs.mu.Lock()
defer rs.mu.Unlock()
rs.maybeRotateLocked(now)
rs.mu.requests[rs.mu.idx][locality] += count
}
func (rs *replicaStats) maybeRotateLocked(now time.Time) {
if now.Sub(rs.mu.lastRotate) >= replStatsRotateInterval {
rs.rotateLocked()
rs.mu.lastRotate = now
}
}
func (rs *replicaStats) rotateLocked() {
rs.mu.idx = (rs.mu.idx + 1) % len(rs.mu.requests)
rs.mu.requests[rs.mu.idx] = make(perLocalityCounts)
}
// perLocalityDecayingQPS returns the per-locality QPS and the amount of time
// over which the stats were accumulated.
// Note that the QPS stats are exponentially decayed such that newer requests
// are weighted more heavily than older requests.
func (rs *replicaStats) perLocalityDecayingQPS() (perLocalityCounts, time.Duration) {
now := timeutil.Unix(0, rs.clock.PhysicalNow())
rs.mu.Lock()
defer rs.mu.Unlock()
rs.maybeRotateLocked(now)
// Use the fraction of time since the last rotation as a smoothing factor to
// avoid jarring changes in request count immediately before/after a rotation.
timeSinceRotate := now.Sub(rs.mu.lastRotate)
fractionOfRotation := float64(timeSinceRotate) / float64(replStatsRotateInterval)
counts := make(perLocalityCounts)
var duration time.Duration
for i := range rs.mu.requests {
// We have to add len(rs.mu.requests) to the numerator to avoid getting a
// negative result from the modulus operation when rs.mu.idx is small.
requestsIdx := (rs.mu.idx + len(rs.mu.requests) - i) % len(rs.mu.requests)
if cur := rs.mu.requests[requestsIdx]; cur != nil {
decay := math.Pow(decayFactor, float64(i)+fractionOfRotation)
if i == 0 {
duration += time.Duration(float64(timeSinceRotate) * decay)
} else {
duration += time.Duration(float64(replStatsRotateInterval) * decay)
}
for k, v := range cur {
counts[k] += v * decay
}
}
}
if duration.Seconds() > 0 {
for k := range counts {
counts[k] = counts[k] / duration.Seconds()
}
}
return counts, now.Sub(rs.mu.lastReset)
}
// avgQPS returns the average requests-per-second and the amount of time
// over which the stat was accumulated. Note that these averages are exact,
// not exponentially decayed (there isn't a ton of justification for going
// one way or the other, but not decaying makes the average more stable,
// which is probably better for avoiding rebalance thrashing).
func (rs *replicaStats) avgQPS() (float64, time.Duration) {
now := timeutil.Unix(0, rs.clock.PhysicalNow())
rs.mu.Lock()
defer rs.mu.Unlock()
rs.maybeRotateLocked(now)
// First accumulate the counts, then divide by the total number of seconds.
var sum float64
var windowsUsed int
for i := range rs.mu.requests {
// We have to add len(rs.mu.requests) to the numerator to avoid getting a
// negative result from the modulus operation when rs.mu.idx is small.
requestsIdx := (rs.mu.idx + len(rs.mu.requests) - i) % len(rs.mu.requests)
if cur := rs.mu.requests[requestsIdx]; cur != nil {
windowsUsed++
for _, v := range cur {
sum += v
}
}
}
if windowsUsed <= 0 {
return 0, 0
}
duration := now.Sub(rs.mu.lastRotate) + time.Duration(windowsUsed-1)*replStatsRotateInterval
if duration == 0 {
return 0, 0
}
return sum / duration.Seconds(), duration
}
func (rs *replicaStats) resetRequestCounts() {
rs.mu.Lock()
defer rs.mu.Unlock()
for i := range rs.mu.requests {
rs.mu.requests[i] = nil
}
rs.mu.requests[rs.mu.idx] = make(perLocalityCounts)
rs.mu.lastRotate = timeutil.Unix(0, rs.clock.PhysicalNow())
rs.mu.lastReset = rs.mu.lastRotate
}
|
package cmd
import (
docker "github.com/mdelapenya/lpn/docker"
liferay "github.com/mdelapenya/lpn/liferay"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
func init() {
rootCmd.AddCommand(rmCmd)
subcommands := []*cobra.Command{rmCECmd, rmCommerceCmd, rmDXPCmd, rmNightlyCmd, rmReleaseCmd}
for i := 0; i < len(subcommands); i++ {
subcommand := subcommands[i]
rmCmd.AddCommand(subcommand)
subcommand.PersistentFlags().BoolVarP(&verbose, "verbose", "V", false, "Runs commands with Debug log level")
subcommand.VisitParents(addVerboseFlag)
}
}
var rmCmd = &cobra.Command{
Use: "rm",
Short: "Removes the Liferay Portal nook instance",
Long: `Removes the Liferay Portal nook instance, identified by [lpn] plus each image type.`,
PersistentPreRun: func(cmd *cobra.Command, args []string) {
enableDebugLevel()
},
Run: func(cmd *cobra.Command, args []string) {
SubCommandInfo()
},
}
var rmCECmd = &cobra.Command{
Use: "ce",
Short: "Removes the Liferay Portal CE instance",
Long: `Removes the Liferay Portal CE instance, identified by [lpn-ce].`,
Run: func(cmd *cobra.Command, args []string) {
ce := liferay.CE{}
removeDockerContainer(ce)
},
}
var rmCommerceCmd = &cobra.Command{
Use: "commerce",
Short: "Removes the Liferay Portal Commerce instance",
Long: `Removes the Liferay Portal Commerce instance, identified by [lpn-commerce].`,
Run: func(cmd *cobra.Command, args []string) {
commerce := liferay.Commerce{}
removeDockerContainer(commerce)
},
}
var rmDXPCmd = &cobra.Command{
Use: "dxp",
Short: "Removes the Liferay DXP instance",
Long: `Removes the Liferay DXP instance, identified by [lpn-dxp].`,
Run: func(cmd *cobra.Command, args []string) {
dxp := liferay.DXP{}
removeDockerContainer(dxp)
},
}
var rmNightlyCmd = &cobra.Command{
Use: "nightly",
Short: "Removes the Liferay Portal Nightly Build instance",
Long: `Removes the Liferay Portal Nightly Build instance, identified by [lpn-nightly].`,
Run: func(cmd *cobra.Command, args []string) {
nightly := liferay.Nightly{}
removeDockerContainer(nightly)
},
}
var rmReleaseCmd = &cobra.Command{
Use: "release",
Short: "Removes the Liferay Portal Release instance",
Long: `Removes the Liferay Portal Release instance, identified by [lpn-release].`,
Run: func(cmd *cobra.Command, args []string) {
release := liferay.Release{}
removeDockerContainer(release)
},
}
// removeDockerContainer removes the running container
func removeDockerContainer(image liferay.Image) {
err := docker.RemoveDockerContainer(image)
if err != nil {
log.WithFields(log.Fields{
"container": image.GetContainerName(),
"error": err,
}).Warn("Impossible to remove the container")
}
}
|
package main
import (
"fmt"
"strings"
)
// var name string
// var age int
// 批量赋值变量
var (
name string
age int
)
const pi = 3.1415926 //常量赋值
// 批量赋值常量
const (
a1 = 100
a2 //无值时继承前一个变量
a3
)
// iota是go语言的常量计数器,只能在常量的表达式中使用。
const (
b1 = iota //iota = 0
b2 //b2 = iota = 1
_ //_ = b2 = iota = 2
b3 // b3 = _ = iota = 3
)
// iota在const关键字出现时将被重置为0, const中每新增一行常量声明将使iota计数一次(iota可理解为const语句块中的行索引)
const (
c1, c2 = iota + 1, iota + 2 // iota=0
c3, c4 = iota + 1, iota + 2 // iota=1
)
// 使用iota能简化定义,在定义枚举时很有用
const (
_ = iota
KB = 1 << (10 * iota) // 00000100000000000
MB = 1 << (10 * iota)
GB = 1 << (10 * iota)
TB = 1 << (10 * iota)
)
func main() {
name = "jerry"
age = 10
gender := "male"
var grade = 1 // 类型推导
fmt.Printf("nam is %s \n", name)
fmt.Printf("age is %d", age)
fmt.Println()
fmt.Println(pi)
fmt.Println(gender)
fmt.Println(grade)
fmt.Println(a1, a2, a3)
fmt.Println(b1, b2, b3)
fmt.Println(c1, c2, c3, c4)
fmt.Println(KB, MB, GB, TB)
i1 := 10 //十进制
fmt.Printf("%b \n", i1) //十进制转二进制
fmt.Printf("%o \n", i1) //十进制转八进制
fmt.Printf("%x \n", i1) //十进制转16进制
fmt.Printf("%T \n", i1) //查看变量类型
fmt.Println("--------------")
i2 := 066 //八进制
fmt.Printf("%d \n", i2)
fmt.Printf("%x \n", i2)
fmt.Printf("%b \n", i2)
fmt.Printf("%T \n", i2)
fmt.Println("--------------")
i3 := 0xffff //十六进制
fmt.Printf("%d \n", i3)
fmt.Printf("%o \n", i3)
fmt.Printf("%b \n", i3)
fmt.Printf("%T \n", i3)
fmt.Println("--------------")
i4 := int8(10)
fmt.Printf("%o \n", i4)
fmt.Printf("%b \n", i4)
fmt.Printf("%T \n", i4)
fmt.Println("--------------")
f1 := 1.23456
f2 := float32(1.23456)
f3 := float64(1.23456)
// f4 := f2 + f3 // float32 ,float64 类型不同,不能运算
fmt.Printf("%T \n", f1)
fmt.Printf("%T \n", f2)
fmt.Printf("%T \n", f3)
b1 := true
var b2 bool
fmt.Printf("%T \n", b2)
fmt.Printf("%v \n", b1) // %value ,万能占位符
fmt.Printf("%v \n", b2)
s1 := "\"E:\\Projects\\Go\""
s2 := `E:\Projects\Go\` //多行字符串
s3 := `
hello,
world!
`
fmt.Println(len(s1))
fmt.Println(s2)
fmt.Println(s3)
s4 := name + gender //字符串拼接
fmt.Println(len(s4)) //长度
fmt.Printf("%s %s", name, gender) //直接返回终端
s5 := fmt.Sprintf("%s %s", name, gender) //返回字符串变量
fmt.Println(s5)
s6 := strings.Split(s2, "\\") //字符串分割
fmt.Println(strings.Contains(s2, "pro")) //包含
fmt.Println(strings.HasPrefix(s2, "E")) //前缀
fmt.Println(strings.Index(s1, "o"))
fmt.Println(strings.LastIndex(s1, "o")) //最后出现的索引位置
fmt.Println(strings.Join(s6, "\\"))
//字符串修改
s7 := "星期日"
s8 := []rune(s7) //把字符串转换成rune切片 [星 期 日] 三个字符
s8[2] = '一' // '' 代表字符
fmt.Println(string(s8)) //将rune切片转换为string输出
/* Go 语言的字符有以下两种:
uint8类型,或者叫 byte 型,代表了ASCII码的一个字符。
rune类型,代表一个 UTF-8字符。
*/
s9 := 'A' //byte int32
s10 := "A" //string
fmt.Printf("%T, %T", s9, s10)
}
|
package pool
import (
"fmt"
v1 "github.com/tmax-cloud/registry-operator/api/v1"
"github.com/tmax-cloud/registry-operator/pkg/structs"
"sync"
)
// JobPool stores current status of v1.RegistryJobs, who are in Pending status or Running status
// All operations for this pool should be done in thread-safe manner, using Lock and Unlock methods
type JobPool struct {
jobMap jobMap
Pending *structs.SortedUniqueList
Running *structs.SortedUniqueList
scheduleChan chan struct{}
lock sync.Mutex
}
// NewJobPool is a constructor for a JobPool
func NewJobPool(ch chan struct{}, compareFunc structs.CompareFunc) *JobPool {
return &JobPool{
jobMap: jobMap{},
Pending: structs.NewSortedUniqueQueue(compareFunc),
Running: structs.NewSortedUniqueQueue(nil),
scheduleChan: ch,
lock: sync.Mutex{},
}
}
// Lock locks JobPool
func (j *JobPool) Lock() {
j.lock.Lock()
}
// Unlock unlocks JobPool
func (j *JobPool) Unlock() {
j.lock.Unlock()
}
// SyncJob syncs JobPool with an incoming IntegrationJob job, considering its status
func (j *JobPool) SyncJob(job *v1.RegistryJob) {
// If job state is not set, return
if job.Status.State == "" {
return
}
nodeID := getNodeID(job)
oldStatus := v1.RegistryJobState("")
newStatus := job.Status.State
// Make / fetch node pointer
var node *JobNode
candidate, exist := j.jobMap[nodeID]
if exist {
node = candidate
oldStatus = candidate.Status.State
candidate.RegistryJob = job.DeepCopy()
} else {
node = &JobNode{
RegistryJob: job.DeepCopy(),
}
}
j.jobMap[nodeID] = node
// If there's deletion timestamp, dismiss it
if node.DeletionTimestamp != nil {
j.Pending.Delete(node)
j.Running.Delete(node)
delete(j.jobMap, nodeID)
j.sendSchedule()
return
}
// If status is not changed, do nothing
if exist && oldStatus == newStatus {
return
}
// If it is newly created, put it in proper list
if !exist {
switch newStatus {
case v1.RegistryJobStatePending:
j.Pending.Add(node)
case v1.RegistryJobStateRunning:
j.Running.Add(node)
}
j.sendSchedule()
return
}
// Pending -> Running / Failed
if oldStatus == v1.RegistryJobStatePending {
j.Pending.Delete(node)
if newStatus == v1.RegistryJobStateRunning {
j.Running.Add(node)
}
return
}
// Running -> The others
// If it WAS running and not now, dismiss it (it is completed for some reason)
if oldStatus == v1.RegistryJobStateRunning {
j.Running.Delete(node)
if newStatus == v1.RegistryJobStatePending {
j.Pending.Add(node)
} else {
delete(j.jobMap, nodeID)
}
j.sendSchedule()
return
}
}
func (j *JobPool) sendSchedule() {
if len(j.scheduleChan) < cap(j.scheduleChan) {
j.scheduleChan <- struct{}{}
}
}
// JobNode is a node to be stored in jobMap and JobPool
type JobNode struct {
*v1.RegistryJob
}
// Equals implements Item's method
func (f *JobNode) Equals(another structs.Item) bool {
fj, ok := another.(*JobNode)
if !ok {
return false
}
if f == nil || fj == nil {
return false
}
return f.Name == fj.Name && f.Namespace == fj.Namespace
}
// DeepCopy implements Item's method
func (f *JobNode) DeepCopy() structs.Item {
return &JobNode{
RegistryJob: f.RegistryJob.DeepCopy(),
}
}
// Priority returns Item's priority
func (f *JobNode) Priority() int {
return f.RegistryJob.Spec.Priority
}
func getNodeID(j *v1.RegistryJob) string {
return fmt.Sprintf("%s_%s", j.Namespace, j.Name)
}
type jobMap map[string]*JobNode
|
package main
import "net"
type User struct {
Name string
Addr string
C chan string
conn net.Conn
server *Server
}
func NewUser(conn net.Conn, server *Server) *User {
userAddr := conn.RemoteAddr().String()
user := &User{
Name: userAddr,
Addr: userAddr,
C: make(chan string),
conn: conn,
server: server,
}
go user.ListenMessage()
return user
}
func (this *User) Online() {
this.server.mapLock.Lock()
this.server.OnlineMap[this.Name] = this
this.server.mapLock.Unlock()
this.server.BroadCastMsg(this, "已上线")
}
func (this *User) Offline() {
this.server.mapLock.Lock()
delete(this.server.OnlineMap, this.Name)
this.server.mapLock.Unlock()
this.server.BroadCastMsg(this, "已下线")
}
func (this *User) SendMsg(msg string) {
this.conn.Write([]byte(msg))
}
//用户处理消息的业务
func (this *User) DoMessage(msg string) {
if msg == "who" {
//查询当前有哪些用户
for _, user := range this.server.OnlineMap {
this.SendMsg(user.Name + ":" + user.Addr + "\n")
}
} else {
//广播消息
this.server.BroadCastMsg(this, msg)
}
}
func (this *User) ListenMessage() {
for {
msg := <-this.C
this.conn.Write([]byte(msg))
}
}
|
package csvingest
import (
"os"
"testing"
)
func Benchmark_IngestCsv_10Lines(b *testing.B) {
noopIngester := noopIngester{}
file, _ := os.Open("./test-ingestions/benchmarks/10-lines.csv")
for i := 0; i < b.N; i++ {
ingestCsv(file, noopIngester)
}
}
func Benchmark_IngestCsv_1kLines(b *testing.B) {
noopIngester := noopIngester{}
file, _ := os.Open("./test-ingestions/benchmarks/1k-lines.csv")
for i := 0; i < b.N; i++ {
ingestCsv(file, noopIngester)
}
}
func Benchmark_IngestCsv_20kLines(b *testing.B) {
noopIngester := noopIngester{}
file, _ := os.Open("./test-ingestions/benchmarks/20k-lines.csv")
for i := 0; i < b.N; i++ {
ingestCsv(file, noopIngester)
}
}
func Benchmark_IngestCsv_100kLines(b *testing.B) {
noopIngester := noopIngester{}
file, _ := os.Open("./test-ingestions/benchmarks/100k-lines.csv")
for i := 0; i < b.N; i++ {
ingestCsv(file, noopIngester)
}
}
type noopIngester struct{}
func (ni noopIngester) IngestRow(row csvRow) {}
func (ni noopIngester) HandleParseError(rowErr csvRowParseError) {}
|
package sort
import (
"sort"
"testing"
)
func TestQuickSort(t *testing.T) {
is := sort.IntSlice{1, 2, 3, 23, 34, 2345, 12, 23}
QuickSort(is)
t.Log(is)
}
|
package certs
import (
corev1 "k8s.io/api/core/v1"
)
const (
RootCACert = "ca.crt"
RootCAPriv = "ca.key"
)
func CAData(secret *corev1.Secret) ([]byte, []byte) {
if secret == nil {
return nil, nil
}
return secret.Data[RootCACert], secret.Data[RootCAPriv]
}
|
// SPDX-License-Identifier: MIT
package cmd
import (
"bytes"
"testing"
"github.com/issue9/assert/v3"
"github.com/caixw/apidoc/v7/internal/docs"
)
func TestCmdCheckSyntax(t *testing.T) {
a := assert.New(t, false)
buf := new(bytes.Buffer)
cmd := Init(buf)
erro, _, succ, _ := resetPrinters()
err := cmd.Exec([]string{"syntax", "-d", docs.Dir().Append("example").String()})
a.NotError(err)
a.Empty(buf.String()).
Empty(erro.String()).
NotEmpty(succ.String())
}
|
package chconn
import (
"strconv"
"strings"
"github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter"
)
// Setting is a setting for the clickhouse query.
//
// The list of setting is here: https://clickhouse.com/docs/en/operations/settings/settings/
// Some of settings doesn't have effect. for example `http_zlib_compression_level`
// because chconn use TCP connection to send data not HTTP.
type Setting struct {
Name, Value string
Important, Custom, Obsolete bool
}
const (
settingFlagImportant = 0x01
settingFlagCustom = 0x02
settingFlagObsolete = 0x04
)
// Settings is a list of settings for the clickhouse query.
type Settings []Setting
func (st Setting) write(w *readerwriter.Writer) {
w.String(st.Name)
var flag uint8
if st.Important {
flag |= settingFlagImportant
}
if st.Custom {
flag |= settingFlagCustom
}
if st.Obsolete {
flag |= settingFlagObsolete
}
w.Uint8(flag)
w.String(st.Value)
}
func (s Settings) write(w *readerwriter.Writer) {
for _, st := range s {
st.write(w)
}
}
// Parameters is a list of params for the clickhouse query.
type Parameters struct {
params []Setting
}
type Parameter func() Setting
func NewParameters(input ...Parameter) *Parameters {
params := make([]Setting, len(input))
for i, p := range input {
params[i] = p()
}
return &Parameters{
params: params,
}
}
// IntParameter get int query parameter.
func IntParameter[T ~int | ~int8 | ~int16 | ~int32 | ~int64](name string, v T) Parameter {
return func() Setting {
return Setting{
Name: name,
Value: "'" + strconv.FormatInt(int64(v), 10) + "'",
Custom: true,
}
}
}
// IntSliceParameter get int query parameter.
func IntSliceParameter[T ~int | ~int8 | ~int16 | ~int32 | ~int64](name string, v []T) Parameter {
return func() Setting {
var b strings.Builder
b.WriteString("[")
for i, v := range v {
if i > 0 {
b.WriteString(",")
}
b.WriteString(strconv.FormatInt(int64(v), 10))
}
b.WriteString("]")
return Setting{
Name: name,
Value: "'" + b.String() + "'",
Custom: true,
}
}
}
// UintParameter get uint query parameter.
func UintParameter[T ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64](name string, v T) Parameter {
return func() Setting {
return Setting{
Name: name,
Value: "'" + strconv.FormatUint(uint64(v), 10) + "'",
Custom: true,
}
}
}
// UintSliceParameter get uint slice query parameter.
func UintSliceParameter[T ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64](name string, v []T) Parameter {
return func() Setting {
var b strings.Builder
b.WriteString("[")
for i, v := range v {
if i > 0 {
b.WriteString(",")
}
b.WriteString(strconv.FormatUint(uint64(v), 10))
}
b.WriteString("]")
return Setting{
Name: name,
Value: "'" + b.String() + "'",
Custom: true,
}
}
}
// Float32Parameter get float32 query parameter.
func Float32Parameter[T ~float32](name string, v T) Parameter {
return func() Setting {
return Setting{
Name: name,
Value: "'" + strconv.FormatFloat(float64(v), 'f', -1, 32) + "'",
Custom: true,
}
}
}
// Float32SliceParameter get float32 slice query parameter.
func Float32SliceParameter[T ~float32](name string, v []T) Parameter {
return func() Setting {
var b strings.Builder
b.WriteString("[")
for i, v := range v {
if i > 0 {
b.WriteString(",")
}
b.WriteString(strconv.FormatFloat(float64(v), 'f', -1, 32))
}
b.WriteString("]")
return Setting{
Name: name,
Value: "'" + b.String() + "'",
Custom: true,
}
}
}
// Float64Parameter get float64 query parameter.
func Float64Parameter[T ~float64](name string, v T) Parameter {
return func() Setting {
return Setting{
Name: name,
Value: "'" + strconv.FormatFloat(float64(v), 'f', -1, 64) + "'",
Custom: true,
}
}
}
// Float64SliceParameter get float64 slice query parameter.
func Float64SliceParameter[T ~float64](name string, v []T) Parameter {
return func() Setting {
var b strings.Builder
b.WriteString("[")
for i, v := range v {
if i > 0 {
b.WriteString(",")
}
b.WriteString(strconv.FormatFloat(float64(v), 'f', -1, 64))
}
b.WriteString("]")
return Setting{
Name: name,
Value: "'" + b.String() + "'",
Custom: true,
}
}
}
func addSlashes(str string) string {
var tmpRune []rune
for _, ch := range str {
switch ch {
case '\\', '\'':
tmpRune = append(tmpRune, '\\', ch)
default:
tmpRune = append(tmpRune, ch)
}
}
return string(tmpRune)
}
// StringParameter get string query parameter.
func StringParameter(name, v string) Parameter {
return func() Setting {
return Setting{
Name: name,
Value: "'" + addSlashes(v) + "'",
Custom: true,
}
}
}
// StringSliceParameter get string array query parameter.
func StringSliceParameter(name string, v []string) Parameter {
return func() Setting {
var b strings.Builder
b.WriteString("[")
for i, v := range v {
if i > 0 {
b.WriteString(",")
}
b.WriteString("'" + addSlashes(v) + "'")
}
b.WriteString("]")
return Setting{
Name: name,
Value: "'" + addSlashes(b.String()) + "'",
Custom: true,
}
}
}
func (p *Parameters) Params() []Setting {
return p.params
}
func (p *Parameters) hasParam() bool {
return p != nil && len(p.params) > 0
}
func (p *Parameters) write(w *readerwriter.Writer) {
if p == nil {
return
}
for _, st := range p.params {
st.write(w)
}
}
|
package sim
import (
"fmt"
"image/color"
"gonum.org/v1/gonum/mat"
"gonum.org/v1/plot"
"gonum.org/v1/plot/plotter"
"gonum.org/v1/plot/vg"
"gonum.org/v1/plot/vg/draw"
)
// New2DPlot creates new plot of the simulation from the three data sources:
// model: idealised model values
// measure: measurement values
// filter: filter values
// It returns error if the plot fails to be created. This can be due to either of the following conditions:
// * either of the supplied data matrices is nil
// * either of the supplied data matrices does not have at least 2 columns
// * gonum plot fails to be created
func New2DPlot(model, measure, filter *mat.Dense) (*plot.Plot, error) {
if model == nil || measure == nil || filter == nil {
return nil, fmt.Errorf("Invalid data supplied")
}
_, cmd := model.Dims()
_, cms := model.Dims()
_, cmf := model.Dims()
if cmd < 2 || cms < 2 || cmf < 2 {
return nil, fmt.Errorf("Invalid data dimensions")
}
p := plot.New()
p.Title.Text = "Simulation"
p.X.Label.Text = "X"
p.Y.Label.Text = "Y"
legend := plot.NewLegend()
legend.Top = true
p.Legend = legend
p.X.Max = 55
// Make a scatter plotter for model data
modelData := makePoints(model)
modelScatter, err := plotter.NewScatter(modelData)
if err != nil {
return nil, err
}
modelScatter.GlyphStyle.Color = color.RGBA{R: 255, B: 128, A: 255}
modelScatter.Shape = draw.PyramidGlyph{}
modelScatter.GlyphStyle.Radius = vg.Points(3)
p.Add(modelScatter)
p.Legend.Add("model", modelScatter)
// Make a scatter plotter for measurement data
measData := makePoints(measure)
measScatter, err := plotter.NewScatter(measData)
if err != nil {
return nil, err
}
measScatter.GlyphStyle.Color = color.RGBA{G: 255, A: 128}
measScatter.GlyphStyle.Radius = vg.Points(3)
p.Add(measScatter)
p.Legend.Add("measurement", measScatter)
// Make a scatter plotter for filter data
filterPoints := makePoints(filter)
filterScatter, err := plotter.NewScatter(filterPoints)
if err != nil {
return nil, fmt.Errorf("Failed to create scatter: %v", err)
}
filterScatter.GlyphStyle.Color = color.RGBA{R: 169, G: 169, B: 169}
filterScatter.Shape = draw.CrossGlyph{}
filterScatter.GlyphStyle.Radius = vg.Points(3)
p.Add(filterScatter)
p.Legend.Add("filtered", filterScatter)
return p, nil
}
func makePoints(m *mat.Dense) plotter.XYs {
r, _ := m.Dims()
pts := make(plotter.XYs, r)
for i := 0; i < r; i++ {
pts[i].X = m.At(i, 0)
pts[i].Y = m.At(i, 1)
}
return pts
}
|
package timex_test
import (
"context"
"errors"
"time"
"github.com/socialpoint-labs/bsk/timex"
)
func ExampleParse() {
_, err := timex.Parse("")
if err != nil {
return
}
_, err = timex.Parse("2016-04-23 12:56")
if err != nil {
return
}
_, err = timex.Parse("-10 days")
if err != nil {
return
}
_, err = timex.Parse("-5 hours")
if err != nil {
return
}
_, err = timex.Parse("1464876005")
if err != nil {
return
}
// Output:
}
func ExampleParseFromDate() {
_, err := timex.ParseFromDate("2016-04-23 12:56")
if err != nil {
return
}
// Output:
}
func ExampleParseFromDaysAgo() {
_, err := timex.ParseFromDaysAgo("-1 day")
if err != nil {
return
}
// Output:
}
func ExampleParseFromHoursAgo() {
_, err := timex.ParseFromHoursAgo("-1 hour")
if err != nil {
return
}
// Output:
}
func ExampleParseFromTimestamp() {
_, err := timex.ParseFromTimestamp("1464876005")
if err != nil {
return
}
// Output:
}
func ExampleIntervalRunner() {
f := func() {}
runner := timex.IntervalRunner(time.Millisecond, f)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go runner.Run(ctx)
// Output:
}
func ExampleIntervalRunner_stop_on_error() {
alwaysFail := func() error {
return errors.New("arbitrary error")
}
ctx, cancel := context.WithCancel(context.Background())
f := func() {
err := alwaysFail()
if err != nil {
cancel()
}
}
runner := timex.IntervalRunner(time.Millisecond, f)
go runner.Run(ctx)
// Output:
}
|
// +build ios macAppStore
package udwOcAppStore
func setAppStoreLoading(loadType string) {
if gContext == nil {
return
}
if gContext.loadingCallback != nil {
gContext.loadingCallback(loadType)
}
}
func setAppStoreAlert(alertType string) {
if gContext == nil {
return
}
if gContext.alertCallback != nil {
gContext.alertCallback(alertType)
}
}
func setAppStorePurchasedSuccess(productId, transactionId string) {
if gContext == nil {
return
}
if gContext.purchasedSuccessCallback != nil {
gContext.purchasedSuccessCallback(productId, transactionId)
}
}
func setAppStoreRestoredSuccess(success bool) {
if gContext == nil {
return
}
if gContext.restoredCallback != nil {
gContext.restoredCallback(success)
}
}
type callbackContext struct {
loadingCallback func(loadType string)
alertCallback func(alertType string)
purchasedSuccessCallback func(productId, transactionId string)
restoredCallback func(success bool)
}
func initContextIfNeed() {
if gContext == nil {
gContext = &callbackContext{}
}
}
var gContext *callbackContext
|
package gonsen
import (
"bytes"
"github.com/mitchellh/packer/common/json"
"io/ioutil"
"net/http"
"strconv"
"time"
)
type Program struct {
MediaType string
ThumbnailUrl string
MediaUrl string
Title string
Slug string
Personality string
Guest string
Updated time.Time
Index int
}
type rawProgram struct {
Type string
ThumbnailPath string
MoviePath map[string]string
Title string
Personality string
Guest string
Update string
Count string
Schedule string
OptionText string
Mail string
Copyright string
Url string
Link []map[string]string
RecommendGoods []map[string]string
RecommendMovie []map[string]string
Cm []map[string]string
AllowExpand string
}
func newProgram(raw rawProgram) Program {
p := Program{}
p.MediaType = raw.Type
p.ThumbnailUrl = "http://www.onsen.ag" + raw.ThumbnailPath
p.MediaUrl = raw.MoviePath["pc"]
p.Title = raw.Title
p.Slug = raw.Url
p.Personality = raw.Personality
p.Guest = raw.Guest
p.Updated, _ = time.Parse("2006.1.2", raw.Update)
p.Index, _ = strconv.Atoi(raw.Count)
return p
}
func GetProgram(name string) (Program, error) {
res, err := http.Get("http://www.onsen.ag/data/api/getMovieInfo/" + name)
if err != nil {
return Program{}, err
}
defer res.Body.Close()
b, err := ioutil.ReadAll(res.Body)
if err != nil {
return Program{}, err
}
b = bytes.TrimPrefix(b, []byte("callback("))
b = bytes.TrimSpace(b)
b = bytes.TrimSuffix(b, []byte(");"))
raw := rawProgram{}
json.Unmarshal(b, &raw)
return newProgram(raw), nil
}
|
package main
import (
"fmt"
//"log"
. "math"
//"math/rand"
"bytes"
"strings"
//"time"
"strconv"
//"errors"
)
import (
"github.com/lxn/walk"
)
// Constants for XPCalcOps.flag
const (
OPS_NONE = 1
OPS_OBJ = 2
OPS_ERR = 3
OPS_ACT = 4
)
// Constants for XPCalcOps.notation
const (
OPS_NOTATION_NONE = 0
OPS_NOTATION_16 = 16
OPS_NOTATION_10 = 10
OPS_NOTATION_08 = 8
OPS_NOTATION_02 = 2
)
// Constants for XPCalcOps.unit_bytes
const (
OPS_UNIT_BYTES_NONE = 0
OPS_UNIT_BYTES_08 = 1
OPS_UNIT_BYTES_04 = 2
OPS_UNIT_BYTES_02 = 3
OPS_UNIT_BYTES_01 = 4
)
type XPCalcOps struct {
val_1st string
val_2nd string
ops string
flag uint32
notation_old int
notation_new int
unit_bytes_old int
unit_bytes_new int
}
type Dialog struct {
*walk.Dialog
ui dialogUI
ops XPCalcOps
}
func opsRun(dlg *Dialog) {
//dlg.ops.result = dlg.ui.textEdit.Text()
val_1st, _ := strconv.ParseFloat(dlg.ops.val_1st, 64)
val_2nd, _ := strconv.ParseFloat(dlg.ops.val_2nd, 64)
var result float64
if dlg.ops.flag == OPS_ERR {
fmt.Println("Please Clear Error")
return
}
result = val_2nd
switch dlg.ops.ops {
case "+":
result = val_1st + val_2nd
case "-":
result = val_1st - val_2nd
case "*":
result = val_1st * val_2nd
case "/":
result = val_1st / val_2nd
case "POWER":
result = Pow(val_1st, val_2nd)
case "SIN":
result = Sin(val_1st)
case "COS":
result = Cos(val_1st)
case "TAN":
result = Tan(val_1st)
case "EXP":
result = Exp(val_1st)
case "LN":
result = Log10(val_1st)
case "LOG":
result = Log(val_1st)
case "NFACTORIAL":
result, _ = Frexp(val_1st)
case "XRECIPROCAL":
result = 1/(val_1st)
}
dlg.ops.flag = OPS_ACT
val := strconv.FormatFloat(result, 'g', 'e', 64)
dlg.ui.textEdit.SetText(val)
dlg.ops.val_1st = val
fmt.Printf("val_1st = %.6f\n", val_1st)
fmt.Printf("ops = %s\n", dlg.ops.ops)
fmt.Printf("val_2nd = %.6f\n", val_2nd)
fmt.Printf("result = %s\n", val)
}
func rbNumberNotation_onCliced_btnInit(dlg *Dialog) {
enable := dlg.ui.rbNumberNotation_10.Checked()
if enable {
dlg.ui.rbNumberUnitBytes_08.SetText(`ang`)
dlg.ui.rbNumberUnitBytes_04.SetText(`rad`)
dlg.ui.rbNumberUnitBytes_02.SetText(`gar`)
dlg.ui.rbNumberUnitBytes_01.SetText(``)
dlg.ui.rbNumberUnitBytes_01.SetVisible(false)
} else {
dlg.ui.rbNumberUnitBytes_08.SetText(`8Bytes`)
dlg.ui.rbNumberUnitBytes_04.SetText(`4Bytes`)
dlg.ui.rbNumberUnitBytes_02.SetText(`2Bytes`)
dlg.ui.rbNumberUnitBytes_01.SetText(`1Bytes`)
dlg.ui.rbNumberUnitBytes_01.SetVisible(true)
}
dlg.ui.tbFnFsubE.SetEnabled(enable)
dlg.ui.tbFnDms.SetEnabled(enable)
dlg.ui.tbFnSin.SetEnabled(enable)
dlg.ui.tbFnCos.SetEnabled(enable)
dlg.ui.tbFnTan.SetEnabled(enable)
dlg.ui.tbFnExp.SetEnabled(enable)
dlg.ui.tbFnPI.SetEnabled(enable)
enable = dlg.ui.rbNumberNotation_16.Checked()
dlg.ui.tbCharA.SetEnabled(enable)
dlg.ui.tbCharB.SetEnabled(enable)
dlg.ui.tbCharC.SetEnabled(enable)
dlg.ui.tbCharD.SetEnabled(enable)
dlg.ui.tbCharE.SetEnabled(enable)
dlg.ui.tbCharF.SetEnabled(enable)
enable = dlg.ui.rbNumberNotation_08.Checked()
dlg.ui.tbNumber8.SetEnabled(!enable)
dlg.ui.tbNumber9.SetEnabled(!enable)
enable = dlg.ui.rbNumberNotation_02.Checked()
dlg.ui.tbNumber2.SetEnabled(!enable)
dlg.ui.tbNumber3.SetEnabled(!enable)
dlg.ui.tbNumber4.SetEnabled(!enable)
dlg.ui.tbNumber5.SetEnabled(!enable)
dlg.ui.tbNumber6.SetEnabled(!enable)
dlg.ui.tbNumber7.SetEnabled(!enable)
dlg.ui.tbNumber8.SetEnabled(!enable)
dlg.ui.tbNumber9.SetEnabled(!enable)
if dlg.ui.rbNumberNotation_16.Checked() {
dlg.ops.notation_new = OPS_NOTATION_16
} else if dlg.ui.rbNumberNotation_10.Checked() {
dlg.ops.notation_new = OPS_NOTATION_10
} else if dlg.ui.rbNumberNotation_08.Checked() {
dlg.ops.notation_new = OPS_NOTATION_08
} else if dlg.ui.rbNumberNotation_02.Checked() {
dlg.ops.notation_new = OPS_NOTATION_02
} else {
dlg.ops.notation_new = OPS_NOTATION_NONE
}
var result string
result = dlg.ui.textEdit.Text()
if dlg.ops.flag != OPS_ERR {
if dlg.ops.notation_old != dlg.ops.notation_new {
dlg.ops.flag = OPS_ACT
}
i, err := strconv.ParseInt(result, dlg.ops.notation_old, 64)
if err != nil {
result = strings.Split(err.Error(), ":")[1] + strings.Split(err.Error(), ":")[2]
dlg.ops.flag = OPS_ERR
} else {
result = strconv.FormatInt(i, dlg.ops.notation_new)
}
}
dlg.ui.textEdit.SetText(result)
dlg.ops.notation_old = dlg.ops.notation_new
}
func appendByte(dlg *Dialog, c byte) {
var buffer bytes.Buffer
var result string
if dlg.ops.flag == OPS_ACT {
result = ""
} else if dlg.ops.flag == OPS_ERR {
result = ""
} else {
result = dlg.ui.textEdit.Text()
}
if result == "0" {
result = string(c)
} else {
//result += string(c)
buffer.WriteString(result)
buffer.WriteString(string(c))
result = buffer.String()
}
dlg.ui.textEdit.SetText(result)
dlg.ops.flag = OPS_OBJ
dlg.ops.val_2nd = result
}
func runDialog(owner walk.Form) (int, error) {
dlg := new(Dialog)
if err := dlg.init(owner); err != nil {
return 0, err
}
dlg.ops.notation_old = OPS_NOTATION_10
dlg.ops.notation_new = dlg.ops.notation_old
dlg.ops.unit_bytes_old = OPS_UNIT_BYTES_08
dlg.ops.unit_bytes_new = dlg.ops.unit_bytes_old
dlg.ops.flag = OPS_NONE
dlg.ops.val_1st = "0"
dlg.ops.val_2nd = ""
bStatisticalOpen := false
// TODO: Do further required setup, e.g. for event handling, here.
dlg.ui.rbNumberNotation_16.Clicked().Attach(func() {
fmt.Println("Clicked rbNumberNotation_16")
rbNumberNotation_onCliced_btnInit(dlg)
})
dlg.ui.rbNumberNotation_10.Clicked().Attach(func() {
fmt.Println("Clicked rbNumberNotation_10")
rbNumberNotation_onCliced_btnInit(dlg)
})
dlg.ui.rbNumberNotation_08.Clicked().Attach(func() {
fmt.Println("Clicked rbNumberNotation_08")
rbNumberNotation_onCliced_btnInit(dlg)
})
dlg.ui.rbNumberNotation_02.Clicked().Attach(func() {
fmt.Println("Clicked rbNumberNotation_02")
rbNumberNotation_onCliced_btnInit(dlg)
})
dlg.ui.tbBackspace.Clicked().Attach(func() {
fmt.Println("Clicked tbBackspace")
// Removed last character of a string
result := dlg.ui.textEdit.Text()
sz := len(result)
if sz > 0 {
result = result[:sz-1]
}
if len(result)==0 {
result = "0"
}
dlg.ui.textEdit.SetText(result)
})
dlg.ui.tbClearError.Clicked().Attach(func() {
fmt.Println("Clicked tbClearError")
dlg.ui.textEdit.SetText(`0`)
dlg.ops.val_1st = dlg.ui.textEdit.Text()
dlg.ops.val_2nd = ""
dlg.ops.ops = ""
})
dlg.ui.tbClear.Clicked().Attach(func() {
fmt.Println("Clicked tbClear")
dlg.ui.textEdit.SetText(`0`)
dlg.ops.val_1st = dlg.ui.textEdit.Text()
dlg.ops.val_2nd = ""
dlg.ops.ops = ""
})
dlg.ui.tbNumber0.Clicked().Attach(func() {
fmt.Println("Clicked tbNumber0")
appendByte(dlg, byte('0'))
})
dlg.ui.tbNumber1.Clicked().Attach(func() {
fmt.Println("Clicked tbNumber1")
appendByte(dlg, byte('1'))
})
dlg.ui.tbNumber2.Clicked().Attach(func() {
fmt.Println("Clicked tbNumber2")
appendByte(dlg, byte('2'))
})
dlg.ui.tbNumber3.Clicked().Attach(func() {
fmt.Println("Clicked tbNumber3")
appendByte(dlg, byte('3'))
})
dlg.ui.tbNumber4.Clicked().Attach(func() {
fmt.Println("Clicked tbNumber4")
appendByte(dlg, byte('4'))
})
dlg.ui.tbNumber5.Clicked().Attach(func() {
fmt.Println("Clicked tbNumber5")
appendByte(dlg, byte('5'))
})
dlg.ui.tbNumber6.Clicked().Attach(func() {
fmt.Println("Clicked tbNumber5")
appendByte(dlg, byte('6'))
})
dlg.ui.tbNumber7.Clicked().Attach(func() {
fmt.Println("Clicked tbNumber7")
appendByte(dlg, byte('7'))
})
dlg.ui.tbNumber8.Clicked().Attach(func() {
fmt.Println("Clicked tbNumber8")
appendByte(dlg, byte('8'))
})
dlg.ui.tbNumber9.Clicked().Attach(func() {
fmt.Println("Clicked tbNumber9")
appendByte(dlg, byte('9'))
})
dlg.ui.tbCharA.Clicked().Attach(func() {
fmt.Println("Clicked tbCharA")
appendByte(dlg, byte('A'))
})
dlg.ui.tbCharB.Clicked().Attach(func() {
fmt.Println("Clicked tbCharB")
appendByte(dlg, byte('B'))
})
dlg.ui.tbCharC.Clicked().Attach(func() {
fmt.Println("Clicked tbCharC")
appendByte(dlg, byte('C'))
})
dlg.ui.tbCharD.Clicked().Attach(func() {
fmt.Println("Clicked tbCharD")
appendByte(dlg, byte('D'))
})
dlg.ui.tbCharE.Clicked().Attach(func() {
fmt.Println("Clicked tbCharE")
appendByte(dlg, byte('E'))
})
dlg.ui.tbCharF.Clicked().Attach(func() {
fmt.Println("Clicked tbCharF")
appendByte(dlg, byte('F'))
})
dlg.ui.tbStatistical.Clicked().Attach(func() {
bStatisticalOpen = !bStatisticalOpen
fmt.Println("Clicked tbStatistical")
dlg.ui.tbAverage.SetEnabled(bStatisticalOpen)
dlg.ui.tbSum.SetEnabled(bStatisticalOpen)
dlg.ui.tbs.SetEnabled(bStatisticalOpen)
dlg.ui.tbDat.SetEnabled(bStatisticalOpen)
})
dlg.ui.tbFnSin.Clicked().Attach(func() {
fmt.Println("Clicked tbFnSin")
dlg.ops.ops = "SIN"
dlg.ops.val_1st = dlg.ui.textEdit.Text()
dlg.ops.val_2nd = ""
opsRun(dlg)
})
dlg.ui.tbFnCos.Clicked().Attach(func() {
fmt.Println("Clicked tbFnCos")
dlg.ops.ops = "COS"
dlg.ops.val_1st = dlg.ui.textEdit.Text()
dlg.ops.val_2nd = ""
opsRun(dlg)
})
dlg.ui.tbFnTan.Clicked().Attach(func() {
fmt.Println("Clicked tbFnTan")
dlg.ops.ops = "TAN"
dlg.ops.val_1st = dlg.ui.textEdit.Text()
dlg.ops.val_2nd = ""
opsRun(dlg)
})
dlg.ui.tbFnLeftBracket.Clicked().Attach(func() {
fmt.Println("Clicked tbFnLeftBracket")
})
dlg.ui.tbFnRightBracket.Clicked().Attach(func() {
fmt.Println("Clicked tbFnRightBracket")
})
dlg.ui.tbFnExp.Clicked().Attach(func() {
fmt.Println("Clicked tbFnExp")
dlg.ops.ops = "EXP"
dlg.ops.val_1st = dlg.ui.textEdit.Text()
dlg.ops.val_2nd = ""
opsRun(dlg)
})
dlg.ui.tbFnLn.Clicked().Attach(func() {
fmt.Println("Clicked tbFnLn")
dlg.ops.ops = "LN"
dlg.ops.val_1st = dlg.ui.textEdit.Text()
dlg.ops.val_2nd = ""
opsRun(dlg)
})
dlg.ui.tbFnLog.Clicked().Attach(func() {
fmt.Println("Clicked tbFnLog")
dlg.ops.ops = "LOG"
dlg.ops.val_1st = dlg.ui.textEdit.Text()
dlg.ops.val_2nd = ""
opsRun(dlg)
})
dlg.ui.tbFnXpowerY.Clicked().Attach(func() {
fmt.Println("Clicked tbFnXpowerY")
dlg.ops.ops = "POWER"
dlg.ops.val_1st = dlg.ui.textEdit.Text()
dlg.ops.val_2nd = "3"
opsRun(dlg)
})
dlg.ui.tbFnXpower3.Clicked().Attach(func() {
fmt.Println("Clicked tbFnXpower3")
dlg.ops.ops = "POWER"
dlg.ops.val_1st = dlg.ui.textEdit.Text()
dlg.ops.val_2nd = "3"
opsRun(dlg)
})
dlg.ui.tbFnXpower2.Clicked().Attach(func() {
fmt.Println("Clicked tbFnXpower2")
dlg.ops.ops = "POWER"
dlg.ops.val_1st = dlg.ui.textEdit.Text()
dlg.ops.val_2nd = "2"
opsRun(dlg)
})
dlg.ui.tbFnNFactorial.Clicked().Attach(func() {
fmt.Println("Clicked tbFnNFactorial")
dlg.ops.ops = "NFACTORIAL"
dlg.ops.val_1st = dlg.ui.textEdit.Text()
dlg.ops.val_2nd = ""
opsRun(dlg)
})
dlg.ui.tbFnXReciprocal.Clicked().Attach(func() {
fmt.Println("Clicked tbFnXReciprocal")
dlg.ops.ops = "XRECIPROCAL"
dlg.ops.val_1st = dlg.ui.textEdit.Text()
dlg.ops.val_2nd = ""
opsRun(dlg)
})
dlg.ui.tbFnPI.Clicked().Attach(func() {
fmt.Println("Clicked tbFnPI")
dlg.ui.textEdit.SetText(`3.1415926`)
})
dlg.ui.tbOpsAdd.Clicked().Attach(func() {
fmt.Println("Clicked tbOpsAdd")
//dlg.ui.textEdit.SetText(``)
dlg.ops.ops = "+"
//opsRun(dlg)
})
dlg.ui.tbOpsSub.Clicked().Attach(func() {
fmt.Println("Clicked tbOpsSub")
dlg.ops.ops = "-"
//opsRun(dlg)
})
dlg.ui.tbOpsMul.Clicked().Attach(func() {
fmt.Println("Clicked tbOpsMul")
dlg.ops.ops = "*"
//opsRun(dlg)
})
dlg.ui.tbOpsDiv.Clicked().Attach(func() {
fmt.Println("Clicked tbOpsDiv")
dlg.ops.ops = "/"
//opsRun(dlg)
})
dlg.ui.tbOpsMod.Clicked().Attach(func() {
fmt.Println("Clicked tbOpsMod")
dlg.ops.ops = "/"
//opsRun(dlg)
})
dlg.ui.tbOpsPolar.Clicked().Attach(func() {
fmt.Println("Clicked tbOpsPolar")
dlg.ops.ops = "/"
//opsRun(dlg)
})
dlg.ui.tbOpsEqual.Clicked().Attach(func() {
fmt.Println("Clicked tbOpsEqual")
opsRun(dlg)
})
return dlg.Run(), nil
}
|
// Copyright 2023 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package metricserver implements a Prometheus metric server for runsc data.
package metricserver
import (
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"os"
"os/signal"
"regexp"
"runtime"
"runtime/debug"
"strconv"
"strings"
"syscall"
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/atomicbitops"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/prometheus"
"gvisor.dev/gvisor/pkg/sentry/control"
"gvisor.dev/gvisor/pkg/state"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/runsc/config"
"gvisor.dev/gvisor/runsc/container"
"gvisor.dev/gvisor/runsc/metricserver/containermetrics"
"gvisor.dev/gvisor/runsc/sandbox"
)
const (
// metricsExportTimeout is the maximum amount of time that the metrics export process should take.
metricsExportTimeout = 30 * time.Second
// metricsExportPerSandboxTimeout is the maximum amount of time that we wait on any individual
// sandbox when exporting its metrics.
metricsExportPerSandboxTimeout = 8 * time.Second
// exportParallelGoroutines is the maximum number of goroutines spawned during metrics export.
exportParallelGoroutines = 8
)
// servedSandbox is a sandbox that we serve metrics from.
// A single metrics server will export data about multiple sandboxes.
type servedSandbox struct {
rootContainerID container.FullID
server *metricServer
extraLabels map[string]string
// mu protects the fields below.
mu sync.Mutex
// sandbox is the sandbox being monitored.
// Once set, it is immutable.
sandbox *sandbox.Sandbox
// createdAt stores the time the sandbox was created.
// It is loaded from the container state file.
// Once set, it is immutable.
createdAt time.Time
// capabilities is the union of the capability set of the containers within `sandbox`.
// It is used to export a per-sandbox metric representing which capabilities are in use.
// For monitoring purposes, a capability added in a container means it is considered
// added for the whole sandbox.
capabilities []linux.Capability
// specMetadataLabels is the set of label exported as part of the
// `spec_metadata` metric.
specMetadataLabels map[string]string
// verifier allows verifying the data integrity of the metrics we get from this sandbox.
// It is not always initialized when the sandbox is discovered, but rather upon first metrics
// access to the sandbox. Metric registration data is loaded from the root container's
// state file.
// The server needs to load this registration data before any data from this sandbox is
// served to HTTP clients. If there is no metric registration data within the Container
// data, then metrics were not requested for this sandbox, and this servedSandbox should
// be deleted from the server.
// Once set, it is immutable.
verifier *prometheus.Verifier
// cleanupVerifier holds a reference to the cleanup function of the verifier.
cleanupVerifier func()
// extra contains additional per-sandbox data.
extra sandboxData
}
// load loads the sandbox being monitored and initializes its metric verifier.
// If it returns an error other than container.ErrStateFileLocked, the sandbox is either
// non-existent, or has not requested instrumentation to be enabled, or does not have
// valid metric registration data. In any of these cases, the sandbox should be removed
// from this metrics server.
func (s *servedSandbox) load() (*sandbox.Sandbox, *prometheus.Verifier, error) {
s.mu.Lock()
defer s.mu.Unlock()
if s.sandbox == nil {
allContainers, err := container.LoadSandbox(s.server.rootDir, s.rootContainerID.SandboxID, container.LoadOpts{
TryLock: container.TryAcquire,
})
if err != nil {
return nil, nil, fmt.Errorf("cannot load sandbox %q: %v", s.rootContainerID.SandboxID, err)
}
var rootContainer *container.Container
for _, cont := range allContainers {
if cont.IsSandboxRoot() {
if rootContainer != nil {
return nil, nil, fmt.Errorf("multiple root contains found for sandbox ID %q: %v and %v", s.rootContainerID.SandboxID, cont, rootContainer)
}
rootContainer = cont
}
}
if rootContainer == nil {
return nil, nil, fmt.Errorf("no root container found for sandbox ID %q", s.rootContainerID.SandboxID)
}
sandboxMetricAddr := strings.ReplaceAll(rootContainer.Sandbox.MetricServerAddress, "%RUNTIME_ROOT%", s.server.rootDir)
if sandboxMetricAddr == "" {
return nil, nil, errors.New("sandbox did not request instrumentation")
}
if sandboxMetricAddr != s.server.address {
return nil, nil, fmt.Errorf("sandbox requested instrumentation by a metric server running at a different address (sandbox wants %q, this metric server serves %q)", sandboxMetricAddr, s.server.address)
}
// Update label data as read from the state file.
// Do not store empty labels.
authoritativeLabels, err := containermetrics.SandboxPrometheusLabels(rootContainer)
if err != nil {
return nil, nil, fmt.Errorf("cannot compute Prometheus labels of sandbox: %v", err)
}
s.extraLabels = make(map[string]string, len(authoritativeLabels))
for _, label := range []string{
prometheus.SandboxIDLabel,
prometheus.IterationIDLabel,
prometheus.PodNameLabel,
prometheus.NamespaceLabel,
} {
s.extraLabels[label] = authoritativeLabels[label]
if s.extraLabels[label] == "" {
delete(s.extraLabels, label)
}
}
// Compute capability set.
allCaps := linux.AllCapabilities()
capSet := make([]linux.Capability, 0, len(allCaps))
for _, cap := range allCaps {
for _, cont := range allContainers {
if cont.HasCapabilityInAnySet(cap) {
capSet = append(capSet, cap)
break
}
}
}
if len(capSet) > 0 {
// Reallocate a slice with minimum size, since it will be long-lived.
s.capabilities = make([]linux.Capability, len(capSet))
for i, capLabels := range capSet {
s.capabilities[i] = capLabels
}
}
// Compute spec metadata.
s.specMetadataLabels = containermetrics.ComputeSpecMetadata(allContainers)
s.sandbox = rootContainer.Sandbox
s.createdAt = rootContainer.CreatedAt
}
if s.verifier == nil {
registeredMetrics, err := s.sandbox.GetRegisteredMetrics()
if err != nil {
return nil, nil, err
}
verifier, cleanup, err := prometheus.NewVerifier(registeredMetrics)
if err != nil {
return nil, nil, err
}
s.verifier = verifier
s.cleanupVerifier = cleanup
}
if err := s.extra.load(s); err != nil {
return nil, nil, err
}
return s.sandbox, s.verifier, nil
}
func (s *servedSandbox) cleanup() {
s.mu.Lock()
defer s.mu.Unlock()
if s.cleanupVerifier != nil {
s.cleanupVerifier()
}
}
// querySandboxMetrics queries the sandbox for metrics data.
func querySandboxMetrics(ctx context.Context, sand *sandbox.Sandbox, verifier *prometheus.Verifier, metricsFilter string) (*prometheus.Snapshot, error) {
ch := make(chan struct {
snapshot *prometheus.Snapshot
err error
}, 1)
canceled := make(chan struct{}, 1)
defer close(canceled)
go func() {
snapshot, err := sand.ExportMetrics(control.MetricsExportOpts{
OnlyMetrics: metricsFilter,
})
select {
case <-canceled:
case ch <- struct {
snapshot *prometheus.Snapshot
err error
}{snapshot, err}:
close(ch)
}
}()
select {
case <-ctx.Done():
canceled <- struct{}{}
return nil, ctx.Err()
case ret := <-ch:
if ret.err != nil {
return nil, ret.err
}
if err := verifier.Verify(ret.snapshot); err != nil {
return nil, err
}
return ret.snapshot, nil
}
}
// metricServer implements the metric server.
type metricServer struct {
rootDir string
pid int
pidFile string
allowUnknownRoot bool
exposeProfileEndpoints bool
address string
exporterPrefix string
startTime time.Time
srv http.Server
// Size of the map of written metrics during the last /metrics export. Initially zero.
// Used to efficiently reallocate a map of the right size during the next export.
lastMetricsWrittenSize atomicbitops.Uint32
// mu protects the fields below.
mu sync.Mutex
// udsPath is a path to a Unix Domain Socket file on which the server is bound and which it owns.
// This socket file will be deleted on server shutdown.
// This field is not set if binding to a network port, or when the UDS already existed prior to
// being bound by us (i.e. its ownership isn't ours), such that it isn't deleted in this case.
// The field is unset once the file is successfully removed.
udsPath string
// sandboxes is the list of sandboxes we serve metrics for.
sandboxes map[container.FullID]*servedSandbox
// lastStateFileStat maps container full IDs to the last observed stat() of their state file.
// This is used to monitor for sandboxes in the background. If a sandbox's state file matches this
// info, we can assume that the last background scan already looked at it.
lastStateFileStat map[container.FullID]os.FileInfo
// lastValidMetricFilter stores the last value of the "runsc-sandbox-metrics-filter" parameter for
// /metrics requests.
// It represents the last-known compilable regular expression that was passed to /metrics.
// It is used to avoid re-verifying this parameter in the common case where a single scraper
// is consistently passing in the same value for this parameter in each successive request.
lastValidMetricFilter string
// lastValidCapabilityFilterStr stores the last value of the "runsc-capability-filter" parameter
// for /metrics requests.
// It represents the last-known compilable regular expression that was passed to /metrics.
// It is used to avoid re-verifying this parameter in the common case where a single scraper
// is consistently passing in the same value for this parameter in each successive request.
lastValidCapabilityFilterStr string
// lastValidCapabilityFilterReg is the compiled regular expression corresponding to
// lastValidCapabilityFilterStr.
lastValidCapabilityFilterReg *regexp.Regexp
// numSandboxes counts the number of sandboxes that have ever been registered on this server.
// Used to distinguish between the case where this metrics serve has sat there doing nothing
// because no sandbox ever registered against it (which is unexpected), vs the case where it has
// done a good job serving sandbox metrics and it's time for it to gracefully die as there are no
// more sandboxes to serve.
// Also exported as a metric of total number of sandboxes started.
numSandboxes int64
// shuttingDown is flipped to true when the server shutdown process has started.
// Used to deal with race conditions where a sandbox is trying to register after the server has
// already started to go to sleep.
shuttingDown bool
// shutdownCh is written to when receiving the signal to shut down gracefully.
shutdownCh chan os.Signal
// extraData contains additional server-wide data.
extra serverData
}
// sufficientlyEqualStats returns whether the given FileInfo's are sufficiently
// equal to assume the file they represent has not changed between the time
// each FileInfo was obtained.
func sufficientlyEqualStats(s1, s2 os.FileInfo) bool {
if !s1.ModTime().Equal(s2.ModTime()) {
return false
}
if s1.Size() != s2.Size() {
return false
}
statT1, ok1 := s1.Sys().(*syscall.Stat_t)
statT2, ok2 := s2.Sys().(*syscall.Stat_t)
if ok1 != ok2 {
return false
}
if ok1 && ok2 {
if statT1.Dev != statT2.Dev {
return false
}
if statT1.Ino != statT2.Ino {
return false
}
}
return true
}
// refreshSandboxesLocked removes sandboxes that are no longer running from m.sandboxes, and
// adds sandboxes found in the root directory that do request instrumentation.
// Preconditions: m.mu is locked.
func (m *metricServer) refreshSandboxesLocked() {
if m.shuttingDown {
// Do nothing to avoid log spam.
return
}
sandboxIDs, err := container.ListSandboxes(m.rootDir)
if err != nil {
if !m.allowUnknownRoot {
log.Warningf("Cannot list containers in root directory %s, it has likely gone away: %v.", m.rootDir, err)
}
return
}
for sandboxID, sandbox := range m.sandboxes {
found := false
for _, sid := range sandboxIDs {
if sid == sandboxID {
found = true
break
}
}
if !found {
log.Warningf("Sandbox %s no longer exists but did not explicitly unregister. Removing it.", sandboxID)
sandbox.cleanup()
delete(m.sandboxes, sandboxID)
continue
}
if _, _, err := sandbox.load(); err != nil && err != container.ErrStateFileLocked {
log.Warningf("Sandbox %s cannot be loaded, deleting it: %v", sandboxID, err)
sandbox.cleanup()
delete(m.sandboxes, sandboxID)
continue
}
if !sandbox.sandbox.IsRunning() {
log.Infof("Sandbox %s is no longer running, deleting it.", sandboxID)
sandbox.cleanup()
delete(m.sandboxes, sandboxID)
continue
}
}
newSandboxIDs := make(map[container.FullID]bool, len(sandboxIDs))
for _, sid := range sandboxIDs {
if _, found := m.sandboxes[sid]; found {
continue
}
newSandboxIDs[sid] = true
}
for sid := range m.lastStateFileStat {
if _, found := newSandboxIDs[sid]; !found {
delete(m.lastStateFileStat, sid)
}
}
for sid := range newSandboxIDs {
stateFile := container.StateFile{
RootDir: m.rootDir,
ID: sid,
}
stat, err := stateFile.Stat()
if err != nil {
log.Warningf("Failed to stat() container state file for sandbox %q: %v", sid, err)
continue
}
if existing, found := m.lastStateFileStat[sid]; found {
// We already tried to stat this sandbox but decided not to pick it up.
// Check if the state file changed since. If it didn't, we don't want to
// try again.
if sufficientlyEqualStats(existing, stat) {
continue
}
log.Infof("State file for sandbox %q has changed since we last looked at it; will try to reload it.", sid)
delete(m.lastStateFileStat, sid)
}
// If we get here, we either haven't seen this sandbox before, or we saw it
// and it has disappeared (which means it is new in this iteration), or we
// saw it before but its state file changed. Either way, we want to try
// loading it and see if it wants instrumentation.
cont, err := container.Load(m.rootDir, sid, container.LoadOpts{
Exact: true,
SkipCheck: true,
TryLock: container.TryAcquire,
RootContainer: true,
})
if err != nil {
if err == container.ErrStateFileLocked {
// This error is OK and shouldn't generate log spam. The sandbox is probably in the middle
// of being created.
continue
}
log.Warningf("Cannot load state file for sandbox %q: %v", sid, err)
continue
}
// This is redundant with one of the checks performed below in servedSandbox.load, but this
// avoids log spam for the non-error case of sandboxes that didn't request instrumentation.
sandboxMetricAddr := strings.ReplaceAll(cont.Sandbox.MetricServerAddress, "%RUNTIME_ROOT%", m.rootDir)
if sandboxMetricAddr != m.address {
m.lastStateFileStat[sid] = stat
continue
}
// This case can be hit when there is a leftover state file for a sandbox that was `kill -9`'d
// without an opportunity for it to clean up its state file. This results in a valid state file
// but the sandbox PID is gone. We don't want to continuously load this sandbox's state file.
if cont.Status == container.Running && !cont.Sandbox.IsRunning() {
log.Warningf("Sandbox %q has state file in state Running, yet it isn't actually running. Ignoring it.", sid)
m.lastStateFileStat[sid] = stat
continue
}
m.numSandboxes++
served := &servedSandbox{
rootContainerID: sid,
server: m,
extraLabels: map[string]string{
prometheus.SandboxIDLabel: sid.SandboxID,
},
}
// Best-effort attempt to load the state file instantly.
// This may legitimately fail if it is locked, e.g. during sandbox startup.
// If it fails for any other reason, then the sandbox went away between the time we listed the
// sandboxes and now, so just delete it.
if _, _, err := served.load(); err != nil && err != container.ErrStateFileLocked {
log.Warningf("Sandbox %q cannot be loaded, ignoring it: %v", sid, err)
m.lastStateFileStat[sid] = stat
served.cleanup()
continue
}
m.sandboxes[sid] = served
log.Infof("Registered new sandbox found in root directory: %q", sid)
}
}
// sandboxLoadResult contains the outcome of calling `load` on a `servedSandbox`.
// It is used as an intermediary type that contains all that we know about a
// sandbox after attempting to load its state file, but does not contain any
// metric data from the sandbox.
type sandboxLoadResult struct {
served *servedSandbox
sandbox *sandbox.Sandbox
verifier *prometheus.Verifier
err error
}
// loadSandboxesLocked loads the state file data from all known sandboxes.
// It does so in parallel, and avoids reloading sandboxes for which we have
// already loaded data.
func (m *metricServer) loadSandboxesLocked(ctx context.Context) []sandboxLoadResult {
m.refreshSandboxesLocked()
numGoroutines := exportParallelGoroutines
numSandboxes := len(m.sandboxes)
if numSandboxes < numGoroutines {
numGoroutines = numSandboxes
}
// First, load all the sandboxes in parallel. We need to do this while m.mu is held.
loadSandboxCh := make(chan *servedSandbox, numSandboxes)
loadedSandboxesCh := make(chan sandboxLoadResult, numSandboxes)
loadedSandboxes := make([]sandboxLoadResult, 0, numSandboxes)
for i := 0; i < numGoroutines; i++ {
go func() {
for served := range loadSandboxCh {
sand, verifier, err := served.load()
loadedSandboxesCh <- sandboxLoadResult{served, sand, verifier, err}
}
}()
}
for _, sandbox := range m.sandboxes {
loadSandboxCh <- sandbox
}
close(loadSandboxCh)
for i := 0; i < numSandboxes; i++ {
loadedSandboxes = append(loadedSandboxes, <-loadedSandboxesCh)
}
close(loadedSandboxesCh)
return loadedSandboxes
}
// sandboxMetricsResult is the result of calling querySandboxMetrics on a
// single sandbox. It contains all of `sandboxLoadResult` but also has current
// metric data (if querying metrics from the sandbox process succeeded).
type sandboxMetricsResult struct {
sandboxLoadResult
isRunning bool
snapshot *prometheus.Snapshot
err error
}
// queryMultiSandboxMetrics queries metric data from multiple loaded sandboxes.
// It does so in parallel and with random permutation ordering.
// Only metrics matching the `metricsFilter` regular expression are queried.
// For each sandbox, whether we were successful in querying its metrics or
// not, the `processSandbox` function is called. This may be done in parallel,
// so `processSandbox` should do its own locking so that multiple parallel
// instances of itself behave appropriately.
func queryMultiSandboxMetrics(ctx context.Context, loadedSandboxes []sandboxLoadResult, metricsFilter string, processSandbox func(sandboxMetricsResult)) {
numSandboxes := len(loadedSandboxes)
ctxDeadline, ok := ctx.Deadline()
if !ok {
panic("context had no deadline, this should never happen as it was created with a timeout")
}
exportStartTime := time.Now()
requestTimeLeft := ctxDeadline.Sub(exportStartTime)
perSandboxTime := requestTimeLeft
if numSandboxes != 0 {
perSandboxTime = requestTimeLeft / time.Duration(numSandboxes)
}
if perSandboxTime < metricsExportPerSandboxTimeout {
perSandboxTime = metricsExportPerSandboxTimeout
}
loadedSandboxCh := make(chan sandboxLoadResult, numSandboxes)
var wg sync.WaitGroup
numGoroutines := exportParallelGoroutines
if numSandboxes < numGoroutines {
numGoroutines = numSandboxes
}
wg.Add(numGoroutines)
for i := 0; i < numGoroutines; i++ {
go func() {
defer wg.Done()
for s := range loadedSandboxCh {
isRunning := false
var snapshot *prometheus.Snapshot
err := s.err
if err == nil {
queryCtx, queryCtxCancel := context.WithTimeout(ctx, perSandboxTime)
snapshot, err = querySandboxMetrics(queryCtx, s.sandbox, s.verifier, metricsFilter)
queryCtxCancel()
isRunning = s.sandbox.IsRunning()
}
processSandbox(sandboxMetricsResult{
sandboxLoadResult: s,
isRunning: isRunning,
snapshot: snapshot,
err: err,
})
}
}()
}
// Iterate over all sandboxes.
// Important: This must be done in random order.
// A malicious/compromised sandbox may decide to stall when being asked for metrics.
// If at least `numGoroutines` sandboxes do this, this will starve other sandboxes
// from having their metrics exported, because all the goroutines will be stuck on
// the stalled sandboxes.
// One way to completely avoid this would be to spawn one goroutine per
// sandbox, but this can amount to ~hundreds of goroutines, which is not desirable
// for the metrics server.
// Another way would be to have a very strict timeout on each sandbox's export
// process, but in some cases a busy sandbox will take more than a decisecond
// or so to export its data, so this would miss some data from legitimate (but
// slow) sandboxes.
// Instead, we take a middle-of-the-road approach: we use a timeout that's not
// too strict but still ensures we make forward progress away from stalled
// sandboxes, and we also iterate across sandboxes in a different random order at
// each export. This ensures that all sandboxes eventually get a fair chance of
// being part of the "first `numGoroutines` sandboxes in line" to get their
// metric data loaded, such that a client repeatedly scraping metrics will
// eventually get data from each sandbox.
for _, sandboxIndex := range rand.Perm(len(loadedSandboxes)) {
loadedSandboxCh <- loadedSandboxes[sandboxIndex]
}
close(loadedSandboxCh)
wg.Wait()
}
// serveMetrics serves metrics requests.
func (m *metricServer) serveMetrics(w http.ResponseWriter, req *http.Request) httpResult {
ctx, ctxCancel := context.WithTimeout(req.Context(), metricsExportTimeout)
defer ctxCancel()
metricsFilter := req.URL.Query().Get("runsc-sandbox-metrics-filter")
var capabilityFilterReg *regexp.Regexp
capabilityFilterStr := req.URL.Query().Get("runsc-capability-filter")
m.mu.Lock()
if metricsFilter != "" && metricsFilter != m.lastValidMetricFilter {
_, err := regexp.Compile(metricsFilter)
if err != nil {
m.mu.Unlock()
return httpResult{http.StatusBadRequest, errors.New("provided metric filter is not a valid regular expression")}
}
m.lastValidMetricFilter = metricsFilter
}
if capabilityFilterStr != "" {
if capabilityFilterStr != m.lastValidCapabilityFilterStr {
reg, err := regexp.Compile(capabilityFilterStr)
if err != nil {
m.mu.Unlock()
return httpResult{http.StatusBadRequest, errors.New("provided capability filter is not a valid regular expression")}
}
m.lastValidCapabilityFilterStr = capabilityFilterStr
m.lastValidCapabilityFilterReg = reg
capabilityFilterReg = reg
} else {
capabilityFilterReg = m.lastValidCapabilityFilterReg
}
}
loadedSandboxes := m.loadSandboxesLocked(ctx)
numSandboxes := len(loadedSandboxes)
numSandboxesTotal := m.numSandboxes
m.mu.Unlock()
// Used to prevent goroutines from accessing the shared variables below.
var metricsMu sync.Mutex
// Meta-metrics keep track of metrics to export about the metrics server itself.
type metaMetrics struct {
numRunningSandboxes int64
numCannotExportSandboxes int64
}
meta := metaMetrics{} // Protected by metricsMu.
selfMetrics := prometheus.NewSnapshot() // Protected by metricsMu.
type snapshotAndOptions struct {
snapshot *prometheus.Snapshot
options prometheus.SnapshotExportOptions
}
snapshotCh := make(chan snapshotAndOptions, numSandboxes)
queryMultiSandboxMetrics(ctx, loadedSandboxes, metricsFilter, func(r sandboxMetricsResult) {
metricsMu.Lock()
defer metricsMu.Unlock()
selfMetrics.Add(prometheus.LabeledIntData(&SandboxPresenceMetric, nil, 1).SetExternalLabels(r.served.extraLabels))
sandboxRunning := int64(0)
if r.isRunning {
sandboxRunning = 1
meta.numRunningSandboxes++
}
selfMetrics.Add(prometheus.LabeledIntData(&SandboxRunningMetric, nil, sandboxRunning).SetExternalLabels(r.served.extraLabels))
if r.err == nil {
selfMetrics.Add(prometheus.LabeledIntData(&SandboxMetadataMetric, r.sandbox.MetricMetadata, 1).SetExternalLabels(r.served.extraLabels))
for _, cap := range r.served.capabilities {
if capabilityFilterReg != nil && !capabilityFilterReg.MatchString(cap.String()) && !capabilityFilterReg.MatchString(cap.TrimmedString()) {
continue
}
selfMetrics.Add(prometheus.LabeledIntData(&SandboxCapabilitiesMetric, map[string]string{
SandboxCapabilitiesMetricLabel: cap.TrimmedString(),
}, 1).SetExternalLabels(r.served.extraLabels))
}
selfMetrics.Add(prometheus.LabeledIntData(&SpecMetadataMetric, r.served.specMetadataLabels, 1).SetExternalLabels(r.served.extraLabels))
createdAt := float64(r.served.createdAt.Unix()) + (float64(r.served.createdAt.Nanosecond()) / 1e9)
selfMetrics.Add(prometheus.LabeledFloatData(&SandboxCreationMetric, nil, createdAt).SetExternalLabels(r.served.extraLabels))
} else {
// If the sandbox isn't running, it is normal that metrics are not exported for it, so
// do not report this case as an error.
if r.isRunning {
meta.numCannotExportSandboxes++
log.Warningf("Could not export metrics from sandbox %s: %v", r.served.rootContainerID.SandboxID, r.err)
}
return
}
snapshotCh <- snapshotAndOptions{
snapshot: r.snapshot,
options: prometheus.SnapshotExportOptions{
ExporterPrefix: m.exporterPrefix,
ExtraLabels: r.served.extraLabels,
},
}
})
// Build the map of all snapshots we will be rendering.
snapshotsToOptions := make(map[*prometheus.Snapshot]prometheus.SnapshotExportOptions, numSandboxes+2)
snapshotsToOptions[selfMetrics] = prometheus.SnapshotExportOptions{
ExporterPrefix: fmt.Sprintf("%s%s", m.exporterPrefix, prometheus.MetaMetricPrefix),
}
processMetrics := prometheus.NewSnapshot()
processMetrics.Add(prometheus.NewFloatData(&prometheus.ProcessStartTimeSeconds, float64(m.startTime.Unix())+(float64(m.startTime.Nanosecond())/1e9)))
snapshotsToOptions[processMetrics] = prometheus.SnapshotExportOptions{
// These metrics must be written without any prefix.
}
// Aggregate all the snapshots from the sandboxes.
close(snapshotCh)
for snapshotAndOptions := range snapshotCh {
snapshotsToOptions[snapshotAndOptions.snapshot] = snapshotAndOptions.options
}
// Add our own metrics.
selfMetrics.Add(prometheus.NewIntData(&NumRunningSandboxesMetric, meta.numRunningSandboxes))
selfMetrics.Add(prometheus.NewIntData(&NumCannotExportSandboxesMetric, meta.numCannotExportSandboxes))
selfMetrics.Add(prometheus.NewIntData(&NumTotalSandboxesMetric, numSandboxesTotal))
// Write out all data.
lastMetricsWrittenSize := int(m.lastMetricsWrittenSize.Load())
metricsWritten := make(map[string]bool, lastMetricsWrittenSize)
commentHeader := fmt.Sprintf("Data for runsc metric server exporting data for sandboxes in root directory %s", m.rootDir)
if metricsFilter != "" {
commentHeader = fmt.Sprintf("%s (filtered using regular expression: %q)", commentHeader, metricsFilter)
}
written, err := prometheus.Write(w, prometheus.ExportOptions{
CommentHeader: commentHeader,
MetricsWritten: metricsWritten,
}, snapshotsToOptions)
if err != nil {
if written == 0 {
return httpResult{http.StatusServiceUnavailable, err}
}
// Note that we cannot return an HTTP error here because we have already started writing a
// response, which means we've already responded with a 200 OK status code.
// This probably means the client closed the connection before we could finish writing.
return httpOK
}
if lastMetricsWrittenSize < len(metricsWritten) {
m.lastMetricsWrittenSize.CompareAndSwap(uint32(lastMetricsWrittenSize), uint32(len(metricsWritten)))
}
return httpOK
}
// serveHealthCheck serves the healthcheck endpoint.
// Returns a response prefixed by "runsc-metrics:OK" on success.
// Clients can use this to assert that they are talking to the metrics server, as opposed to some
// other random HTTP server.
func (m *metricServer) serveHealthCheck(w http.ResponseWriter, req *http.Request) httpResult {
m.mu.Lock()
defer m.mu.Unlock()
if m.shuttingDown {
return httpResult{http.StatusServiceUnavailable, errors.New("server is shutting down")}
}
if err := req.ParseForm(); err != nil {
return httpResult{http.StatusBadRequest, err}
}
rootDir := req.Form.Get("root")
if rootDir != m.rootDir {
return httpResult{http.StatusBadRequest, fmt.Errorf("this metric server is configured to serve root directory: %s", m.rootDir)}
}
w.WriteHeader(http.StatusOK)
io.WriteString(w, "runsc-metrics:OK")
return httpOK
}
// servePID serves the PID of the metric server process.
func (m *metricServer) servePID(w http.ResponseWriter, req *http.Request) httpResult {
m.mu.Lock()
defer m.mu.Unlock()
if m.shuttingDown {
return httpResult{http.StatusServiceUnavailable, errors.New("server is shutting down")}
}
io.WriteString(w, strconv.Itoa(m.pid))
return httpOK
}
// Server is the set of options to run a metric server.
// Initialize this struct and then call Run on it to run the metric server.
type Server struct {
// Config is the main runsc configuration.
Config *config.Config
// ExporterPrefix is used as prefix for all metric names following Prometheus exporter convention.
ExporterPrefix string
// PIDFile, if set, will cause the metric server to write its own PID to this file after binding
// to the requested address. The parent directory of this file must already exist.
PIDFile string
// ExposeProfileEndpoints, if true, exposes /runsc-metrics/profile-cpu and
// /runsc-metrics/profile-heap to get profiling data about the metric server.
ExposeProfileEndpoints bool
// AllowUnknownRoot causes the metric server to keep running regardless of the existence of the
// Config's root directory or the metric server's ability to access it.
AllowUnknownRoot bool
}
// Run runs the metric server.
// It blocks until the server is instructed to exit, e.g. via signal.
func (s *Server) Run(ctx context.Context) error {
ctx, ctxCancel := context.WithCancel(ctx)
defer ctxCancel()
m := &metricServer{
exporterPrefix: s.ExporterPrefix,
pidFile: s.PIDFile,
exposeProfileEndpoints: s.ExposeProfileEndpoints,
allowUnknownRoot: s.AllowUnknownRoot,
}
conf := s.Config
if conf.MetricServer == "" {
return errors.New("config does not specify the metric server address (--metric-server)")
}
if strings.Contains(conf.MetricServer, "%ID%") {
return fmt.Errorf("metric server address contains '%%ID%%': %v; this should have been replaced by the parent process", conf.MetricServer)
}
if _, err := container.ListSandboxes(conf.RootDir); err != nil {
if !m.allowUnknownRoot {
return fmt.Errorf("invalid root directory %q: tried to list sandboxes within it and got: %w", conf.RootDir, err)
}
log.Warningf("Invalid root directory %q: tried to list sandboxes within it and got: %v. Continuing anyway, as the server is configured to tolerate this.", conf.RootDir, err)
}
// container.ListSandboxes uses a glob pattern, which doesn't error out on
// permission errors. Double-check by actually listing the directory.
if _, err := ioutil.ReadDir(conf.RootDir); err != nil {
if !m.allowUnknownRoot {
return fmt.Errorf("invalid root directory %q: tried to list all entries within it and got: %w", conf.RootDir, err)
}
log.Warningf("Invalid root directory %q: tried to list all entries within it and got: %v. Continuing anyway, as the server is configured to tolerate this.", conf.RootDir, err)
}
m.startTime = time.Now()
m.rootDir = conf.RootDir
if strings.Contains(conf.MetricServer, "%RUNTIME_ROOT%") {
newAddr := strings.ReplaceAll(conf.MetricServer, "%RUNTIME_ROOT%", m.rootDir)
log.Infof("Metric server address replaced %RUNTIME_ROOT%: %q -> %q", conf.MetricServer, newAddr)
conf.MetricServer = newAddr
}
m.address = conf.MetricServer
m.sandboxes = make(map[container.FullID]*servedSandbox)
m.lastStateFileStat = make(map[container.FullID]os.FileInfo)
m.pid = os.Getpid()
m.shutdownCh = make(chan os.Signal, 1)
signal.Notify(m.shutdownCh, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
var listener net.Listener
var listenErr error
if strings.HasPrefix(conf.MetricServer, fmt.Sprintf("%c", os.PathSeparator)) {
beforeBindSt, beforeBindErr := os.Stat(conf.MetricServer)
if listener, listenErr = (&net.ListenConfig{}).Listen(ctx, "unix", conf.MetricServer); listenErr != nil {
return fmt.Errorf("cannot listen on unix domain socket %q: %w", conf.MetricServer, listenErr)
}
afterBindSt, afterBindErr := os.Stat(conf.MetricServer)
if afterBindErr != nil {
return fmt.Errorf("cannot stat our own unix domain socket %q: %w", conf.MetricServer, afterBindErr)
}
ownUDS := true
if beforeBindErr == nil && beforeBindSt.Mode() == afterBindSt.Mode() {
// Socket file existed and was a socket prior to us binding to it.
if beforeBindSt.Sys() != nil && afterBindSt.Sys() != nil {
beforeSt, beforeStOk := beforeBindSt.Sys().(*syscall.Stat_t)
afterSt, afterStOk := beforeBindSt.Sys().(*syscall.Stat_t)
if beforeStOk && afterStOk && beforeSt.Dev == afterSt.Dev && beforeSt.Ino == afterSt.Ino {
// Socket file is the same before and after binding, so we should not consider ourselves
// the owner of it.
ownUDS = false
}
}
}
if ownUDS {
log.Infof("Bound on socket file %s which we own. As such, this socket file will be deleted on server shutdown.", conf.MetricServer)
m.udsPath = conf.MetricServer
defer os.Remove(m.udsPath)
os.Chmod(m.udsPath, 0777)
} else {
log.Infof("Bound on socket file %s which existed prior to this server's existence. As such, it will not be deleted on server shutdown.", conf.MetricServer)
}
} else {
if strings.HasPrefix(conf.MetricServer, ":") {
log.Warningf("Binding on all interfaces. This will allow anyone to list all containers on your machine!")
}
if listener, listenErr = (&net.ListenConfig{}).Listen(ctx, "tcp", conf.MetricServer); listenErr != nil {
return fmt.Errorf("cannot listen on TCP address %q: %w", conf.MetricServer, listenErr)
}
}
mux := http.NewServeMux()
mux.HandleFunc("/runsc-metrics/healthcheck", logRequest(m.serveHealthCheck))
mux.HandleFunc("/runsc-metrics/pid", logRequest(m.servePID))
if m.exposeProfileEndpoints {
log.Warningf("Profiling HTTP endpoints are exposed; this should only be used for development!")
mux.HandleFunc("/runsc-metrics/profile-cpu", logRequest(m.profileCPU))
mux.HandleFunc("/runsc-metrics/profile-heap", logRequest(m.profileHeap))
} else {
// Disable memory profiling, since we don't expose it.
runtime.MemProfileRate = 0
}
mux.HandleFunc("/metrics", logRequest(m.serveMetrics))
mux.HandleFunc("/", logRequest(m.serveIndex))
m.srv.Handler = mux
m.srv.ReadTimeout = httpTimeout
m.srv.WriteTimeout = httpTimeout
if err := m.startVerifyLoop(ctx); err != nil {
return fmt.Errorf("cannot start background loop: %w", err)
}
if m.pidFile != "" {
if err := ioutil.WriteFile(m.pidFile, []byte(fmt.Sprintf("%d", m.pid)), 0644); err != nil {
return fmt.Errorf("cannot write PID to file %q: %w", m.pidFile, err)
}
defer os.Remove(m.pidFile)
log.Infof("Wrote PID %d to file %v.", m.pid, m.pidFile)
}
// If not modified by the user from the environment, set the Go GC percentage lower than default.
if _, hasEnv := os.LookupEnv("GOGC"); !hasEnv {
debug.SetGCPercent(40)
}
// Run GC immediately to get rid of all the initialization-related memory bloat and start from
// a clean slate.
state.Release()
runtime.GC()
// Initialization complete.
log.Infof("Server serving on %s for root directory %s.", conf.MetricServer, conf.RootDir)
serveErr := m.srv.Serve(listener)
log.Infof("Server has stopped accepting requests.")
m.mu.Lock()
defer m.mu.Unlock()
if serveErr != nil {
if serveErr == http.ErrServerClosed {
return nil
}
return fmt.Errorf("cannot serve on address %s: %w", conf.MetricServer, serveErr)
}
// Per documentation, http.Server.Serve can never return a nil error, so this is not a success.
return fmt.Errorf("HTTP server Serve() did not return expected error")
}
|
/**
允许向函数传递指针, 只需要在函数定义的参数上设置指针类型即可.
下面实例厌食了如何向函数传递指针, 并在函数调用后修改函数内的值:
**/
package main
import "fmt"
func main() {
var a int = 100
var b int = 200
fmt.Printf("交换前a的值: %d\n", a)
fmt.Printf("交换前b的值: %d\n", b)
/* 调用函数用于交换值
* &a指向a变量的地址
* &b指向b变量的地址
*/
swap(&a, &b)
fmt.Printf("交换后a的值:%d\n", a)
fmt.Printf("交换后b的值:%d\n", b)
}
func swap(x *int, y *int) {
var temp int
temp = *x // 保存x地址的值
*x = *y
*y = temp
}
// 输出:
// 交换前a的值: 100
// 交换前b的值: 200
// 交换后a的值:200
// 交换后b的值:100
|
package src
import (
"fmt"
"log"
"sync"
"time"
)
var counter = sync.Map{}
func addKeyspace(ks keyspace) {
counter.Store(ks.db, ks)
}
func examined(db int, size int) {
if value, ok := counter.Load(db); ok {
if ks, ok := value.(keyspace); ok {
ks.examined += size
counter.Store(db, ks)
}
}
}
func printProgress() {
ticker := time.NewTicker(1 * time.Second)
go func() {
for range ticker.C {
calcProgress()
}
}()
}
func calcProgress() {
counter.Range(func(key, value interface{}) bool {
if ks, ok := value.(keyspace); ok {
db := ks.db
keys := ks.keys
examined := ks.examined
progress := (float64(examined) / float64(keys)) * 100
percent := fmt.Sprintf("%.2f%s", progress, "%")
log.Println("db:", db, "keys:", keys, "examined:", examined, "progress:", percent)
}
return false
})
}
|
package main
import "fmt"
func main() {
for i := 1; i <= 10; i++ {
fmt.Println(i * i)
}
}
// Напишите программу, которая выводит квадраты натуральных чисел от 1 до 10.
// Квадрат каждого числа должен выводится в новой строке.
|
package leetcode
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
// recursion
func isSymmetric(root *TreeNode) bool {
return isSameTree(root, root)
}
func isSameTree(p *TreeNode, q *TreeNode) bool {
if p == nil && q == nil {
return true
} else if p == nil || q == nil {
return false
} else if p.Val != q.Val {
return false
} else {
return isSameTree(p.Left, q.Right) && isSameTree(p.Right, q.Left)
}
}
// iteration
func isSymmetric(root *TreeNode) bool {
n1 := root
n2 := root
queue := []*TreeNode{}
queue = append(queue, n1)
queue = append(queue, n2)
for len(queue) > 0 {
n1, n2 := queue[0], queue[1]
queue = queue[2:]
if n1 == nil && n2 == nil {
continue
}
if n1 == nil || n2 == nil {
return false
}
if n1.Val != n2.Val {
return false
}
queue = append(queue, n1.Left)
queue = append(queue, n2.Right)
queue = append(queue, n1.Right)
queue = append(queue, n2.Left)
}
return true
}
|
package Controllers
import (
"TaibaiSupport/Models"
"TaibaiSupport/TaibaiDBHelper"
"encoding/json"
"github.com/gorilla/websocket"
"sync"
)
type TaibaiClassroomManager struct {
OperationRWMux sync.RWMutex
WSConns map[int]map[int]*Models.TaibaiWSConn
}
func NewTaibaiClassroomManager() *TaibaiClassroomManager {
M := &TaibaiClassroomManager{
WSConns: make(map[int]map[int]*Models.TaibaiWSConn),
}
return M
}
func (this *TaibaiClassroomManager) RegisterTaibaiWSConn(classroomId, userId int, conn *websocket.Conn) *Models.TaibaiWSConn {
this.OperationRWMux.Lock()
defer this.OperationRWMux.Unlock()
// redis存入一节课
TaibaiDBHelper.AddClassroom(classroomId)
// 先注册教室
classroomContainer, classroomOk:= this.WSConns[classroomId]
if !classroomOk{
classroomContainer = make(map[int]*Models.TaibaiWSConn)
this.WSConns[classroomId] = classroomContainer
}
// redis存入一个学生
TaibaiDBHelper.AddUserIntoClassroom(classroomId, userId)
// 先停掉之前的
taibaiWSConn, wsConnOk := classroomContainer[userId]
if wsConnOk{
if taibaiWSConn.Conn != nil{
_ = taibaiWSConn.Conn.Close()
}
}
// 重新开启一个
taibaiWSConn = Models.NewTaibaiWSConn(classroomId, userId, conn)
this.WSConns[classroomId][userId] = taibaiWSConn
return taibaiWSConn
}
func (this *TaibaiClassroomManager)BroadcastClassroomStatus(classroomId int) {
this.OperationRWMux.Lock()
defer this.OperationRWMux.Unlock()
classroomStatus := TaibaiDBHelper.GetClassroomStatus(classroomId)
classroomStatus["classroomId"] = classroomId
participantList := []interface{}{}
for _, userId := range TaibaiDBHelper.GetUserList(classroomId) {
participantStatus := TaibaiDBHelper.GetUserStatus(userId)
participantStatus["userId"] = userId
participantList = append(participantList, participantStatus)
}
classroomStatus["participantList"] = participantList
message := Models.NewClassroomMessage(Models.MessageType_UpdateClassroomStatus, 0, []int{})
message.MessageContent = classroomStatus
clsStatus, _:= json.Marshal(message)
userWSContainer := this.WSConns[classroomId]
for _, conn := range userWSContainer {
conn.SendMessage(clsStatus)
}
}
func (this *TaibaiClassroomManager) BroadcastOriginEvent(classroomId int, event Models.TaibaiClassroomEvent) {
message,_ := json.Marshal(event)
userWSContainer := this.WSConns[classroomId]
for _, conn := range userWSContainer {
conn.SendMessage(message)
}
}
var TaibaiClassroomManagerInstance *TaibaiClassroomManager
func init() {
TaibaiClassroomManagerInstance = NewTaibaiClassroomManager()
}
|
package main
import (
"fmt"
"strconv"
)
var maps = map[string][]string{
"app": []string{"apptest1", "apptest2"},
}
func main() {
initApps(maps)
fmt.Println(len(maps["app"]))
}
func initApps(maps map[string][]string) {
apps := maps["app"]
for i := 0; i < 10; i++ {
apps = append(apps, "test"+strconv.Itoa(i))
}
maps["app"] = apps
}
|
// Copyright (c) 2016-2019 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rwutil
import "io"
// PlainReader provides an io.Reader for a bytes slice. It intentionally does
// not provide any other methods.
type PlainReader []byte
// Read always reads the entire underlying byte slice.
func (p PlainReader) Read(b []byte) (n int, err error) {
copy(b, p)
return len(p), io.EOF
}
// PlainWriter provides an io.Writer for a bytes slice. It intentionally does
// not provide any other methods. Clients should initialize length with make.
type PlainWriter []byte
// Write writes all of b to p.
func (p PlainWriter) Write(b []byte) (n int, err error) {
copy(p, b)
return len(p), nil
}
|
package main
const HOGE = true
const Fuga = 2
const moge = "moge"
func export() {
const Pugya = "Pugya"
println(Pugya)
}
|
package main
import (
"fmt"
"os"
"os/exec"
"github.com/hashicorp/go-plugin"
"platform-plugin/shared"
"bytes"
"io"
"strings"
"platform-plugin/plugin-utils"
"io/ioutil"
"path/filepath"
"github.com/hashicorp/go-hclog"
"platform-plugin/output"
"encoding/json"
)
// PluginMap is the map of plugins we can dispense.
var PluginMap = map[string]plugin.Plugin{}
type Platform struct {
Name string
Exec string
}
func main() {
///////// READ Plugin name from platform.json file /////////
wioPlatform := &Platform{}
execPath, err := os.Executable()
if err != nil {
fmt.Println(err.Error())
os.Exit(2)
}
execPath, err = filepath.Abs(execPath + "/../")
if err != nil {
fmt.Println(err.Error())
os.Exit(2)
}
buff, err := ioutil.ReadFile(execPath + "/avr-platform/platform.json")
if err != nil {
fmt.Println(err.Error())
os.Exit(2)
}
err = json.Unmarshal(buff, wioPlatform)
if err != nil {
fmt.Println(err.Error())
os.Exit(2)
}
//////////////////////////////////////////////////////////////
/////////////////////// Create Client ///////////////////////
old := os.Stdout
r, w, _ := os.Pipe()
os.Stdout = w
// create a logger
logger := hclog.New(&hclog.LoggerOptions{
Output: os.Stdout,
Level: hclog.Trace,
Name: "wio",
})
PluginMap[wioPlatform.Name] = &shared.PlatformPlugin{}
// We're a host. Start by launching the plugin process.
client := plugin.NewClient(&plugin.ClientConfig{
HandshakeConfig: shared.Handshake,
Plugins: PluginMap,
Logger: logger,
Cmd: exec.Command(execPath + "/avr-platform/" + wioPlatform.Exec),
AllowedProtocols: []plugin.Protocol{
plugin.ProtocolNetRPC, plugin.ProtocolGRPC},
SyncStdout: os.Stdout,
})
defer client.Kill()
// Connect via RPC
rpcClient, err := client.Client()
if err != nil {
fmt.Println("Error:", err.Error())
os.Exit(1)
}
////////////////////////////////////////////////////////////
///////////////////////// Get Plugin ///////////////////////
// Request the plugin
raw, err := rpcClient.Dispense("platform-atmelavr")
if err != nil {
fmt.Println("Error:", err.Error())
os.Exit(1)
}
platform := raw.(shared.Platform)
///////////////////////////////////////////////////////////
//////////////////////// Call functions ///////////////////
obj := &shared.TargetInformation{}
obj.Platform = "Hola"
// build function
executables, err := platform.BuildProject(obj)
fmt.Printf("wio message:: {{Number of executables given %d\n}}", len(executables))
if err != nil {
fmt.Fprintln(old, plugin_utils.GetPluginError(err))
os.Exit(2)
}
// run function
_, err = platform.RunProject(&shared.RunInformation{})
if err != nil {
fmt.Fprintln(old, plugin_utils.GetPluginError(err))
os.Exit(2)
}
// print the output because we need to structure the logs
w.Close()
os.Stdout = old
var buf bytes.Buffer
io.Copy(&buf, r)
for _, line := range strings.Split(buf.String(), "\n") {
if plugin_utils.IsPluginMessage(line) {
fmt.Println(output.DecodeMessage(line))
}
}
}
|
/*
* Databricks
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 0.0.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package models
type ClustersAwsAttributes struct {
FirstOnDemand int32 `json:"first_on_demand,omitempty"`
Availability *ClustersAwsAvailability `json:"availability,omitempty"`
ZoneId string `json:"zone_id,omitempty"`
InstanceProfileArn string `json:"instance_profile_arn,omitempty"`
SpotBidPricePercent int32 `json:"spot_bid_price_percent,omitempty"`
EbsVolumeType *ClustersEbsVolumeType `json:"ebs_volume_type,omitempty"`
EbsVolumeCount int32 `json:"ebs_volume_count,omitempty"`
EbsVolumeSize int32 `json:"ebs_volume_size,omitempty"`
}
|
package main
import (
"fmt"
"strconv"
)
func main() {
max, prod := 0, 0
for i := 100; i < 1000; i++ {
for j := i; j < 1000; j++ {
prod = i * j
if isPalindrome(prod) && prod > max {
max = prod
}
}
}
fmt.Println(max)
}
func isPalindrome(num int) bool {
string := strconv.Itoa(num)
i, j := 0, len(string)-1
for i <= j {
if string[i] != string[j] {
return false
}
i += 1
j -= 1
}
return true
}
|
package main
import (
"bytes"
"encoding/pem"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"github.com/davidwalter0/fetchhostcerts"
)
var version = ""
func main() {
var format string
var template string
var skipVerify bool
var utc bool
var timeout int
var showVersion bool
flag.StringVar(&format, "f", "simple table", "Output format. md: as markdown, json: as JSON. ")
flag.StringVar(&format, "format", "simple table", "Output format. md: as markdown, json: as JSON. ")
flag.StringVar(&template, "t", "", "Output format as Go template string or Go template file path.")
flag.StringVar(&template, "template", "", "Output format as Go template string or Go template file path.")
flag.BoolVar(&skipVerify, "k", false, "Skip verification of server's certificate chain and host name.")
flag.BoolVar(&skipVerify, "skip-verify", false, "Skip verification of server's certificate chain and host name.")
flag.BoolVar(&utc, "u", false, "Use UTC to represent NotBefore and NotAfter.")
flag.BoolVar(&utc, "utc", false, "Use UTC to represent NotBefore and NotAfter.")
flag.IntVar(&timeout, "s", 3, "Timeout seconds.")
flag.IntVar(&timeout, "timeout", 3, "Timeout seconds.")
flag.BoolVar(&showVersion, "v", false, "Show version.")
flag.BoolVar(&showVersion, "version", false, "Show version.")
flag.Parse()
if showVersion {
fmt.Println("cert version ", version)
return
}
var certs fetchhostcerts.Certs
var err error
fetchhostcerts.SkipVerify = skipVerify
fetchhostcerts.UTC = utc
fetchhostcerts.TimeoutSeconds = timeout
certs, err = fetchhostcerts.NewCerts(flag.Args())
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
for _, certficate := range certs {
var pemBytes bytes.Buffer
for _, cert := range certficate.CertChain() {
if err := pem.Encode(&pemBytes, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}); err != nil {
if err != nil {
log.Println("CertChainToPEM", err)
continue
}
}
f, err := ioutil.TempFile(".", certficate.DomainName+".*")
if err != nil {
log.Println("Tempfile", err)
continue
}
if n, err := f.Write(pemBytes.Bytes()); err != nil {
log.Printf("Wrote %d of %d Write", n, pemBytes.Len(), err)
}
}
}
if template == "" {
switch format {
case "md":
fmt.Printf("%s", certs.Markdown())
case "json":
fmt.Printf("%s", certs.JSON())
default:
fmt.Printf("%s", certs)
}
return
}
if err := fetchhostcerts.SetUserTempl(template); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
fmt.Printf("%s", certs)
}
|
// Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package vfs
import (
"bytes"
"fmt"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/atomicbitops"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/uniqueid"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
// inotifyEventBaseSize is the base size of linux's struct inotify_event. This
// must be a power 2 for rounding below.
const inotifyEventBaseSize = 16
// EventType defines different kinds of inotfiy events.
//
// The way events are labelled appears somewhat arbitrary, but they must match
// Linux so that IN_EXCL_UNLINK behaves as it does in Linux.
//
// +stateify savable
type EventType uint8
// PathEvent and InodeEvent correspond to FSNOTIFY_EVENT_PATH and
// FSNOTIFY_EVENT_INODE in Linux.
const (
PathEvent EventType = iota
InodeEvent EventType = iota
)
// Inotify represents an inotify instance created by inotify_init(2) or
// inotify_init1(2). Inotify implements FileDescriptionImpl.
//
// +stateify savable
type Inotify struct {
vfsfd FileDescription
FileDescriptionDefaultImpl
DentryMetadataFileDescriptionImpl
NoLockFD
// Unique identifier for this inotify instance. We don't just reuse the
// inotify fd because fds can be duped. These should not be exposed to the
// user, since we may aggressively reuse an id on S/R.
id uint64
// queue is used to notify interested parties when the inotify instance
// becomes readable or writable.
queue waiter.Queue
// evMu *only* protects the events list. We need a separate lock while
// queuing events: using mu may violate lock ordering, since at that point
// the calling goroutine may already hold Watches.mu.
evMu inotifyEventMutex `state:"nosave"`
// A list of pending events for this inotify instance. Protected by evMu.
events eventList
// A scratch buffer, used to serialize inotify events. Allocate this
// ahead of time for the sake of performance. Protected by evMu.
scratch []byte
// mu protects the fields below.
mu inotifyMutex `state:"nosave"`
// nextWatchMinusOne is used to allocate watch descriptors on this Inotify
// instance. Note that Linux starts numbering watch descriptors from 1.
nextWatchMinusOne int32
// Map from watch descriptors to watch objects.
watches map[int32]*Watch
}
var _ FileDescriptionImpl = (*Inotify)(nil)
// NewInotifyFD constructs a new Inotify instance.
func NewInotifyFD(ctx context.Context, vfsObj *VirtualFilesystem, flags uint32) (*FileDescription, error) {
// O_CLOEXEC affects file descriptors, so it must be handled outside of vfs.
flags &^= linux.O_CLOEXEC
if flags&^linux.O_NONBLOCK != 0 {
return nil, linuxerr.EINVAL
}
id := uniqueid.GlobalFromContext(ctx)
vd := vfsObj.NewAnonVirtualDentry(fmt.Sprintf("[inotifyfd:%d]", id))
defer vd.DecRef(ctx)
fd := &Inotify{
id: id,
scratch: make([]byte, inotifyEventBaseSize),
watches: make(map[int32]*Watch),
}
if err := fd.vfsfd.Init(fd, flags, vd.Mount(), vd.Dentry(), &FileDescriptionOptions{
UseDentryMetadata: true,
DenyPRead: true,
DenyPWrite: true,
}); err != nil {
return nil, err
}
return &fd.vfsfd, nil
}
// Release implements FileDescriptionImpl.Release. Release removes all
// watches and frees all resources for an inotify instance.
func (i *Inotify) Release(ctx context.Context) {
var ds []*Dentry
// We need to hold i.mu to avoid a race with concurrent calls to
// Inotify.handleDeletion from Watches. There's no risk of Watches
// accessing this Inotify after the destructor ends, because we remove all
// references to it below.
i.mu.Lock()
for _, w := range i.watches {
// Remove references to the watch from the watches set on the target. We
// don't need to worry about the references from i.watches, since this
// file description is about to be destroyed.
d := w.target
ws := d.Watches()
// Watchable dentries should never return a nil watch set.
if ws == nil {
panic("Cannot remove watch from an unwatchable dentry")
}
ws.Remove(i.id)
if ws.Size() == 0 {
ds = append(ds, d)
}
}
i.mu.Unlock()
for _, d := range ds {
d.OnZeroWatches(ctx)
}
}
// Allocate implements FileDescription.Allocate.
func (i *Inotify) Allocate(ctx context.Context, mode, offset, length uint64) error {
panic("Allocate should not be called on read-only inotify fds")
}
// EventRegister implements waiter.Waitable.
func (i *Inotify) EventRegister(e *waiter.Entry) error {
i.queue.EventRegister(e)
return nil
}
// EventUnregister implements waiter.Waitable.
func (i *Inotify) EventUnregister(e *waiter.Entry) {
i.queue.EventUnregister(e)
}
// Readiness implements waiter.Waitable.Readiness.
//
// Readiness indicates whether there are pending events for an inotify instance.
func (i *Inotify) Readiness(mask waiter.EventMask) waiter.EventMask {
ready := waiter.EventMask(0)
i.evMu.Lock()
defer i.evMu.Unlock()
if !i.events.Empty() {
ready |= waiter.ReadableEvents
}
return mask & ready
}
// Epollable implements FileDescriptionImpl.Epollable.
func (i *Inotify) Epollable() bool {
return true
}
// PRead implements FileDescriptionImpl.PRead.
func (*Inotify) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts ReadOptions) (int64, error) {
return 0, linuxerr.ESPIPE
}
// PWrite implements FileDescriptionImpl.PWrite.
func (*Inotify) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts WriteOptions) (int64, error) {
return 0, linuxerr.ESPIPE
}
// Write implements FileDescriptionImpl.Write.
func (*Inotify) Write(ctx context.Context, src usermem.IOSequence, opts WriteOptions) (int64, error) {
return 0, linuxerr.EBADF
}
// Read implements FileDescriptionImpl.Read.
func (i *Inotify) Read(ctx context.Context, dst usermem.IOSequence, opts ReadOptions) (int64, error) {
if dst.NumBytes() < inotifyEventBaseSize {
return 0, linuxerr.EINVAL
}
i.evMu.Lock()
defer i.evMu.Unlock()
if i.events.Empty() {
// Nothing to read yet, tell caller to block.
return 0, linuxerr.ErrWouldBlock
}
var writeLen int64
for it := i.events.Front(); it != nil; {
// Advance `it` before the element is removed from the list, or else
// it.Next() will always be nil.
event := it
it = it.Next()
// Does the buffer have enough remaining space to hold the event we're
// about to write out?
if dst.NumBytes() < int64(event.sizeOf()) {
if writeLen > 0 {
// Buffer wasn't big enough for all pending events, but we did
// write some events out.
return writeLen, nil
}
return 0, linuxerr.EINVAL
}
// Linux always dequeues an available event as long as there's enough
// buffer space to copy it out, even if the copy below fails. Emulate
// this behaviour.
i.events.Remove(event)
// Buffer has enough space, copy event to the read buffer.
n, err := event.CopyTo(ctx, i.scratch, dst)
if err != nil {
return 0, err
}
writeLen += n
dst = dst.DropFirst64(n)
}
return writeLen, nil
}
// Ioctl implements FileDescriptionImpl.Ioctl.
func (i *Inotify) Ioctl(ctx context.Context, uio usermem.IO, sysno uintptr, args arch.SyscallArguments) (uintptr, error) {
switch args[1].Int() {
case linux.FIONREAD:
i.evMu.Lock()
var n uint32
for e := i.events.Front(); e != nil; e = e.Next() {
n += uint32(e.sizeOf())
}
i.evMu.Unlock()
var buf [4]byte
hostarch.ByteOrder.PutUint32(buf[:], n)
_, err := uio.CopyOut(ctx, args[2].Pointer(), buf[:], usermem.IOOpts{})
return 0, err
default:
return 0, linuxerr.ENOTTY
}
}
func (i *Inotify) queueEvent(ev *Event) {
i.evMu.Lock()
// Check if we should coalesce the event we're about to queue with the last
// one currently in the queue. Events are coalesced if they are identical.
if last := i.events.Back(); last != nil {
if ev.equals(last) {
// "Coalesce" the two events by simply not queuing the new one. We
// don't need to raise a waiter.EventIn notification because no new
// data is available for reading.
i.evMu.Unlock()
return
}
}
i.events.PushBack(ev)
// Release mutex before notifying waiters because we don't control what they
// can do.
i.evMu.Unlock()
i.queue.Notify(waiter.ReadableEvents)
}
// newWatchLocked creates and adds a new watch to target.
//
// Precondition: i.mu must be locked. ws must be the watch set for target d.
func (i *Inotify) newWatchLocked(d *Dentry, ws *Watches, mask uint32) *Watch {
w := &Watch{
owner: i,
wd: i.nextWatchIDLocked(),
target: d,
mask: atomicbitops.FromUint32(mask),
}
// Hold the watch in this inotify instance as well as the watch set on the
// target.
i.watches[w.wd] = w
ws.Add(w)
return w
}
// newWatchIDLocked allocates and returns a new watch descriptor.
//
// Precondition: i.mu must be locked.
func (i *Inotify) nextWatchIDLocked() int32 {
i.nextWatchMinusOne++
return i.nextWatchMinusOne
}
// AddWatch constructs a new inotify watch and adds it to the target. It
// returns the watch descriptor returned by inotify_add_watch(2).
//
// The caller must hold a reference on target.
func (i *Inotify) AddWatch(target *Dentry, mask uint32) int32 {
// Note: Locking this inotify instance protects the result returned by
// Lookup() below. With the lock held, we know for sure the lookup result
// won't become stale because it's impossible for *this* instance to
// add/remove watches on target.
i.mu.Lock()
defer i.mu.Unlock()
ws := target.Watches()
// Does the target already have a watch from this inotify instance?
if existing := ws.Lookup(i.id); existing != nil {
newmask := mask
if mask&linux.IN_MASK_ADD != 0 {
// "Add (OR) events to watch mask for this pathname if it already
// exists (instead of replacing mask)." -- inotify(7)
newmask |= existing.mask.Load()
}
existing.mask.Store(newmask)
return existing.wd
}
// No existing watch, create a new watch.
w := i.newWatchLocked(target, ws, mask)
return w.wd
}
// RmWatch looks up an inotify watch for the given 'wd' and configures the
// target to stop sending events to this inotify instance.
func (i *Inotify) RmWatch(ctx context.Context, wd int32) error {
i.mu.Lock()
// Find the watch we were asked to removed.
w, ok := i.watches[wd]
if !ok {
i.mu.Unlock()
return linuxerr.EINVAL
}
// Remove the watch from this instance.
delete(i.watches, wd)
// Remove the watch from the watch target.
ws := w.target.Watches()
// AddWatch ensures that w.target has a non-nil watch set.
if ws == nil {
panic("Watched dentry cannot have nil watch set")
}
ws.Remove(w.OwnerID())
remaining := ws.Size()
i.mu.Unlock()
if remaining == 0 {
w.target.OnZeroWatches(ctx)
}
// Generate the event for the removal.
i.queueEvent(newEvent(wd, "", linux.IN_IGNORED, 0))
return nil
}
// Watches is the collection of all inotify watches on a single file.
//
// +stateify savable
type Watches struct {
// mu protects the fields below.
mu sync.RWMutex `state:"nosave"`
// ws is the map of active watches in this collection, keyed by the inotify
// instance id of the owner.
ws map[uint64]*Watch
}
// Size returns the number of watches held by w.
func (w *Watches) Size() int {
w.mu.Lock()
defer w.mu.Unlock()
return len(w.ws)
}
// Lookup returns the watch owned by an inotify instance with the given id.
// Returns nil if no such watch exists.
//
// Precondition: the inotify instance with the given id must be locked to
// prevent the returned watch from being concurrently modified or replaced in
// Inotify.watches.
func (w *Watches) Lookup(id uint64) *Watch {
w.mu.Lock()
defer w.mu.Unlock()
return w.ws[id]
}
// Add adds watch into this set of watches.
//
// Precondition: the inotify instance with the given id must be locked.
func (w *Watches) Add(watch *Watch) {
w.mu.Lock()
defer w.mu.Unlock()
owner := watch.OwnerID()
// Sanity check, we should never have two watches for one owner on the
// same target.
if _, exists := w.ws[owner]; exists {
panic(fmt.Sprintf("Watch collision with ID %+v", owner))
}
if w.ws == nil {
w.ws = make(map[uint64]*Watch)
}
w.ws[owner] = watch
}
// Remove removes a watch with the given id from this set of watches and
// releases it. The caller is responsible for generating any watch removal
// event, as appropriate. The provided id must match an existing watch in this
// collection.
//
// Precondition: the inotify instance with the given id must be locked.
func (w *Watches) Remove(id uint64) {
w.mu.Lock()
defer w.mu.Unlock()
if w.ws == nil {
// This watch set is being destroyed. The thread executing the
// destructor is already in the process of deleting all our watches. We
// got here with no references on the target because we raced with the
// destructor notifying all the watch owners of destruction. See the
// comment in Watches.HandleDeletion for why this race exists.
return
}
// It is possible for w.Remove() to be called for the same watch multiple
// times. See the treatment of one-shot watches in Watches.Notify().
if _, ok := w.ws[id]; ok {
delete(w.ws, id)
}
}
// Notify queues a new event with watches in this set. Watches with
// IN_EXCL_UNLINK are skipped if the event is coming from a child that has been
// unlinked.
func (w *Watches) Notify(ctx context.Context, name string, events, cookie uint32, et EventType, unlinked bool) {
var hasExpired bool
w.mu.RLock()
for _, watch := range w.ws {
if unlinked && watch.ExcludeUnlinked() && et == PathEvent {
continue
}
if watch.Notify(name, events, cookie) {
hasExpired = true
}
}
w.mu.RUnlock()
if hasExpired {
w.cleanupExpiredWatches(ctx)
}
}
// This function is relatively expensive and should only be called where there
// are expired watches.
func (w *Watches) cleanupExpiredWatches(ctx context.Context) {
// Because of lock ordering, we cannot acquire Inotify.mu for each watch
// owner while holding w.mu. As a result, store expired watches locally
// before removing.
var toRemove []*Watch
w.mu.RLock()
for _, watch := range w.ws {
if watch.expired.Load() == 1 {
toRemove = append(toRemove, watch)
}
}
w.mu.RUnlock()
for _, watch := range toRemove {
watch.owner.RmWatch(ctx, watch.wd)
}
}
// HandleDeletion is called when the watch target is destroyed. Clear the
// watch set, detach watches from the inotify instances they belong to, and
// generate the appropriate events.
func (w *Watches) HandleDeletion(ctx context.Context) {
w.Notify(ctx, "", linux.IN_DELETE_SELF, 0, InodeEvent, true /* unlinked */)
// As in Watches.Notify, we can't hold w.mu while acquiring Inotify.mu for
// the owner of each watch being deleted. Instead, atomically store the
// watches map in a local variable and set it to nil so we can iterate over
// it with the assurance that there will be no concurrent accesses.
var ws map[uint64]*Watch
w.mu.Lock()
ws = w.ws
w.ws = nil
w.mu.Unlock()
// Remove each watch from its owner's watch set, and generate a corresponding
// watch removal event.
for _, watch := range ws {
i := watch.owner
i.mu.Lock()
_, found := i.watches[watch.wd]
delete(i.watches, watch.wd)
// Release mutex before notifying waiters because we don't control what
// they can do.
i.mu.Unlock()
// If watch was not found, it was removed from the inotify instance before
// we could get to it, in which case we should not generate an event.
if found {
i.queueEvent(newEvent(watch.wd, "", linux.IN_IGNORED, 0))
}
}
}
// Watch represent a particular inotify watch created by inotify_add_watch.
//
// +stateify savable
type Watch struct {
// Inotify instance which owns this watch.
//
// This field is immutable after creation.
owner *Inotify
// Descriptor for this watch. This is unique across an inotify instance.
//
// This field is immutable after creation.
wd int32
// target is a dentry representing the watch target. Its watch set contains this watch.
//
// This field is immutable after creation.
target *Dentry
// Events being monitored via this watch.
mask atomicbitops.Uint32
// expired is set to 1 to indicate that this watch is a one-shot that has
// already sent a notification and therefore can be removed.
expired atomicbitops.Int32
}
// OwnerID returns the id of the inotify instance that owns this watch.
func (w *Watch) OwnerID() uint64 {
return w.owner.id
}
// ExcludeUnlinked indicates whether the watched object should continue to be
// notified of events originating from a path that has been unlinked.
//
// For example, if "foo/bar" is opened and then unlinked, operations on the
// open fd may be ignored by watches on "foo" and "foo/bar" with IN_EXCL_UNLINK.
func (w *Watch) ExcludeUnlinked() bool {
return w.mask.Load()&linux.IN_EXCL_UNLINK != 0
}
// Notify queues a new event on this watch. Returns true if this is a one-shot
// watch that should be deleted, after this event was successfully queued.
func (w *Watch) Notify(name string, events uint32, cookie uint32) bool {
if w.expired.Load() == 1 {
// This is a one-shot watch that is already in the process of being
// removed. This may happen if a second event reaches the watch target
// before this watch has been removed.
return false
}
mask := w.mask.Load()
if mask&events == 0 {
// We weren't watching for this event.
return false
}
// Event mask should include bits matched from the watch plus all control
// event bits.
unmaskableBits := ^uint32(0) &^ linux.IN_ALL_EVENTS
effectiveMask := unmaskableBits | mask
matchedEvents := effectiveMask & events
w.owner.queueEvent(newEvent(w.wd, name, matchedEvents, cookie))
if mask&linux.IN_ONESHOT != 0 {
w.expired.Store(1)
return true
}
return false
}
// Event represents a struct inotify_event from linux.
//
// +stateify savable
type Event struct {
eventEntry
wd int32
mask uint32
cookie uint32
// len is computed based on the name field is set automatically by
// Event.setName. It should be 0 when no name is set; otherwise it is the
// length of the name slice.
len uint32
// The name field has special padding requirements and should only be set by
// calling Event.setName.
name []byte
}
func newEvent(wd int32, name string, events, cookie uint32) *Event {
e := &Event{
wd: wd,
mask: events,
cookie: cookie,
}
if name != "" {
e.setName(name)
}
return e
}
// paddedBytes converts a go string to a null-terminated c-string, padded with
// null bytes to a total size of 'l'. 'l' must be large enough for all the bytes
// in the 's' plus at least one null byte.
func paddedBytes(s string, l uint32) []byte {
if l < uint32(len(s)+1) {
panic("Converting string to byte array results in truncation, this can lead to buffer-overflow due to the missing null-byte!")
}
b := make([]byte, l)
copy(b, s)
// b was zero-value initialized during make(), so the rest of the slice is
// already filled with null bytes.
return b
}
// setName sets the optional name for this event.
func (e *Event) setName(name string) {
// We need to pad the name such that the entire event length ends up a
// multiple of inotifyEventBaseSize.
unpaddedLen := len(name) + 1
// Round up to nearest multiple of inotifyEventBaseSize.
e.len = uint32((unpaddedLen + inotifyEventBaseSize - 1) & ^(inotifyEventBaseSize - 1))
// Make sure we haven't overflowed and wrapped around when rounding.
if unpaddedLen > int(e.len) {
panic("Overflow when rounding inotify event size, the 'name' field was too big.")
}
e.name = paddedBytes(name, e.len)
}
func (e *Event) sizeOf() int {
s := inotifyEventBaseSize + int(e.len)
if s < inotifyEventBaseSize {
panic("Overflowed event size")
}
return s
}
// CopyTo serializes this event to dst. buf is used as a scratch buffer to
// construct the output. We use a buffer allocated ahead of time for
// performance. buf must be at least inotifyEventBaseSize bytes.
func (e *Event) CopyTo(ctx context.Context, buf []byte, dst usermem.IOSequence) (int64, error) {
hostarch.ByteOrder.PutUint32(buf[0:], uint32(e.wd))
hostarch.ByteOrder.PutUint32(buf[4:], e.mask)
hostarch.ByteOrder.PutUint32(buf[8:], e.cookie)
hostarch.ByteOrder.PutUint32(buf[12:], e.len)
writeLen := 0
n, err := dst.CopyOut(ctx, buf)
if err != nil {
return 0, err
}
writeLen += n
dst = dst.DropFirst(n)
if e.len > 0 {
n, err = dst.CopyOut(ctx, e.name)
if err != nil {
return 0, err
}
writeLen += n
}
// Santiy check.
if writeLen != e.sizeOf() {
panic(fmt.Sprintf("Serialized unexpected amount of data for an event, expected %d, wrote %d.", e.sizeOf(), writeLen))
}
return int64(writeLen), nil
}
func (e *Event) equals(other *Event) bool {
return e.wd == other.wd &&
e.mask == other.mask &&
e.cookie == other.cookie &&
e.len == other.len &&
bytes.Equal(e.name, other.name)
}
// InotifyEventFromStatMask generates the appropriate events for an operation
// that set the stats specified in mask.
func InotifyEventFromStatMask(mask uint32) uint32 {
var ev uint32
if mask&(linux.STATX_UID|linux.STATX_GID|linux.STATX_MODE) != 0 {
ev |= linux.IN_ATTRIB
}
if mask&linux.STATX_SIZE != 0 {
ev |= linux.IN_MODIFY
}
if (mask & (linux.STATX_ATIME | linux.STATX_MTIME)) == (linux.STATX_ATIME | linux.STATX_MTIME) {
// Both times indicates a utime(s) call.
ev |= linux.IN_ATTRIB
} else if mask&linux.STATX_ATIME != 0 {
ev |= linux.IN_ACCESS
} else if mask&linux.STATX_MTIME != 0 {
ev |= linux.IN_MODIFY
}
return ev
}
// InotifyRemoveChild sends the appriopriate notifications to the watch sets of
// the child being removed and its parent. Note that unlike most pairs of
// parent/child notifications, the child is notified first in this case.
func InotifyRemoveChild(ctx context.Context, self, parent *Watches, name string) {
if self != nil {
self.Notify(ctx, "", linux.IN_ATTRIB, 0, InodeEvent, true /* unlinked */)
}
if parent != nil {
parent.Notify(ctx, name, linux.IN_DELETE, 0, InodeEvent, true /* unlinked */)
}
}
// InotifyRename sends the appriopriate notifications to the watch sets of the
// file being renamed and its old/new parents.
func InotifyRename(ctx context.Context, renamed, oldParent, newParent *Watches, oldName, newName string, isDir bool) {
var dirEv uint32
if isDir {
dirEv = linux.IN_ISDIR
}
cookie := uniqueid.InotifyCookie(ctx)
if oldParent != nil {
oldParent.Notify(ctx, oldName, dirEv|linux.IN_MOVED_FROM, cookie, InodeEvent, false /* unlinked */)
}
if newParent != nil {
newParent.Notify(ctx, newName, dirEv|linux.IN_MOVED_TO, cookie, InodeEvent, false /* unlinked */)
}
// Somewhat surprisingly, self move events do not have a cookie.
if renamed != nil {
renamed.Notify(ctx, "", linux.IN_MOVE_SELF, 0, InodeEvent, false /* unlinked */)
}
}
|
package main
import (
"fmt"
"github.com/xeb/backq/modules/private"
"gopkg.in/alecthomas/kingpin.v2"
)
var (
reqport = kingpin.Flag("request_port", "The 0MQ port for publishing requests to bqprivate, e.g. a value of 20000 means binding to 'tcp://*:20000'").Required().Int()
repport = kingpin.Flag("reply_port", "The 0MQ port for listening for replies from bqprivate").Required().Int()
publichost = kingpin.Flag("public_host", "The host name or IP address of the bqpublic server").Required().String()
)
func main() {
kingpin.Parse()
reqaddy := fmt.Sprintf("tcp://%s:%d", *publichost, *reqport)
repaddy := fmt.Sprintf("tcp://%s:%d", *publichost, *repport)
fmt.Printf("[PRIVATE] Binding request-0mq channel to '%s'\n", reqaddy)
fmt.Printf("[PRIVATE] Binding reply-0mq channel to '%s'\n", repaddy)
private.Subscribe(reqaddy, repaddy)
}
|
package safemap
type SafeMap interface {
Each(EachFunc) int
Set(interface{}, interface{})
Del(interface{})
Get(interface{}) (interface{}, bool)
GetInterface(interface{}) interface{}
GetInt(interface{}) *int
GetInt64(interface{}) *int64
GetString(interface{}) *string
GetBool(interface{}) *bool
Len() int
Update(interface{}, UpdateFunc)
Close() map[interface{}]interface{}
MultiGet(keys map[interface{}]interface{}) map[interface{}]interface{}
MultiSet(keys map[interface{}]interface{})
SumI(interface{}, int)
SumF(interface{}, float64)
Inc(interface{})
Dec(interface{})
}
type itemRes struct {
key interface{}
value interface{}
found bool
}
type commandData struct {
itemRes
action commandAction
result chan<- itemRes
items map[interface{}]interface{}
data chan<- map[interface{}]interface{}
updater UpdateFunc
}
type safeMap chan commandData
type commandAction int
const (
rem commandAction = iota
end
get
mget
mset
set
length
update
each
)
type UpdateFunc func(interface{}, bool) interface{}
type EachFunc func(key interface{}, val interface{}, cnt int) bool
func (sm safeMap) SumF(key interface{}, delta float64) {
sm.Update(key, func(val interface{}, found bool) interface{} {
if found {
return val.(float64) + delta
}
return delta
})
}
func (sm safeMap) SumI(key interface{}, delta int) {
sm.Update(key, func(val interface{}, found bool) interface{} {
if found {
return val.(int) + delta
}
return delta
})
}
func (sm safeMap) Inc(key interface{}) {
sm.SumI(key, 1)
}
func (sm safeMap) Dec(key interface{}) {
sm.SumI(key, -1)
}
func (sm safeMap) Each(fn EachFunc) int {
n := 0
reply := make(chan itemRes, sm.Len())
sm <- commandData{action: each, result: reply}
for itm := range reply {
if itm.found == false {
break
}
if !fn(itm.key, itm.value, n) {
break
}
n++
}
close(reply)
return n
}
func (sm safeMap) Update(key interface{}, fn UpdateFunc) {
sm <- commandData{action: update, updater: fn, itemRes: itemRes{key: key}}
}
func (sm safeMap) Set(key interface{}, value interface{}) {
sm <- commandData{action: set, itemRes: itemRes{key: key, value: value}}
}
func (sm safeMap) Del(key interface{}) {
sm <- commandData{action: rem, itemRes: itemRes{key: key}}
}
func (sm safeMap) Get(key interface{}) (value interface{}, found bool) {
reply := make(chan itemRes)
sm <- commandData{action: get, result: reply, itemRes: itemRes{key: key}}
result := <-reply
close(reply)
return result.value, result.found
}
func (sm safeMap) GetInterface(key interface{}) interface{} {
if value, ok := sm.Get(key); ok {
return value
}
return nil
}
func (sm safeMap) GetBool(key interface{}) *bool {
if vI, ok := sm.Get(key); ok {
if v, ok := vI.(bool); ok {
return &v
}
}
return nil
}
func (sm safeMap) GetInt(key interface{}) *int {
if vI, ok := sm.Get(key); ok {
if v, ok := vI.(int); ok {
return &v
}
}
return nil
}
func (sm safeMap) GetInt64(key interface{}) *int64 {
if vI, ok := sm.Get(key); ok {
if v, ok := vI.(int64); ok {
return &v
}
}
return nil
}
func (sm safeMap) GetString(key interface{}) *string {
if vI, ok := sm.Get(key); ok {
if v, ok := vI.(string); ok {
return &v
}
}
return nil
}
func (sm safeMap) Len() int {
reply := make(chan itemRes)
sm <- commandData{action: length, result: reply}
return (<-reply).value.(int)
}
func (sm safeMap) Close() map[interface{}]interface{} {
reply := make(chan map[interface{}]interface{})
sm <- commandData{action: end, data: reply}
return <-reply
}
func (sm safeMap) MultiGet(keys map[interface{}]interface{}) map[interface{}]interface{} {
reply := make(chan map[interface{}]interface{})
sm <- commandData{action: mget, items: keys, data: reply}
return <-reply
}
func (sm safeMap) MultiSet(keys map[interface{}]interface{}) {
sm <- commandData{action: mset, items: keys}
}
func (sm safeMap) run() {
store := make(map[interface{}]interface{})
for command := range sm {
switch command.action {
case set:
store[command.key] = command.value
case rem:
delete(store, command.key)
case get:
value, found := store[command.key]
command.result <- itemRes{nil, value, found}
case length:
command.result <- itemRes{nil, len(store), true}
case update:
value, found := store[command.key]
store[command.key] = command.updater(value, found)
case end:
close(sm)
command.data <- store
case mget:
out := make(map[interface{}]interface{})
for key, _ := range command.items {
if val, f := store[key]; f {
out[key] = val
}
}
command.data <- out
case mset:
for key, val := range command.items {
store[key] = val
}
case each:
func() {
defer func() { recover() }()
for i := range store {
command.result <- itemRes{key: i, value: store[i], found: true}
}
command.result <- itemRes{nil, nil, false}
}()
}
}
}
func New(bufsize int) SafeMap {
sm := make(safeMap, bufsize) // тип safeMap chan commandData
go sm.run()
return sm
}
|
package main
import (
"context"
"fmt"
"os"
)
func main() {
ctx := context.Background()
client := NewGithubClient(ctx)
repositories, err := RepositoryListByReviewRequest(ctx, client)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
view := NewView(repositories)
view.Show()
}
|
package pagination
import (
"github.com/gin-gonic/gin"
"net/url"
)
type paginationRenderData struct {
URL string // 分页的 root url
CurrentPage int // 当前页码
OnFirstPage bool // 是否在第一页
HasMorePages bool // 是否有更多页
Elements []int // 页码
PreviousButtonText string // 前一页按钮文本
PreviousPageIndex int // 前一页按钮的页码
NextButtonText string // 后一页按钮文本
NextPageIndex int // 后一页按钮的页码
}
// CreatePaginationFillToTplData : 生成分页模板所需的数据
func CreatePaginationFillToTplData(c *gin.Context, pageQueryKeyName string, currentPage, totalPage int, otherData map[string]interface{}) map[string]interface{} {
queryValues := url.Values{}
for k, v := range c.Request.URL.Query() {
if k != pageQueryKeyName {
queryValues.Add(k, v[0])
}
}
query := queryValues.Encode()
if query != "" {
query = query + "&"
}
pageData := paginationRenderData{
URL: c.Request.URL.Path + "?" + query + pageQueryKeyName + "=",
CurrentPage: currentPage,
OnFirstPage: currentPage == 1,
HasMorePages: currentPage != totalPage,
Elements: countStartAndEndPageIndex(currentPage, totalPage, 3),
PreviousButtonText: "前一页",
PreviousPageIndex: currentPage - 1,
NextButtonText: "下一页",
NextPageIndex: currentPage + 1,
}
otherData["pagination"] = pageData
return otherData
}
// 返回一个区间数组,供生成区间页码按钮
// baseOnCurrentPageButtonOffset: 前后有多少个按钮
func countStartAndEndPageIndex(currentPage, totalPage, baseOnCurrentPageButtonOffset int) []int {
howMuchPageButtons := baseOnCurrentPageButtonOffset*2 + 1
startPage := 1
endPage := 1
result := make([]int, 0)
if currentPage > baseOnCurrentPageButtonOffset {
// 当前页码大于偏移量,则起始按钮为 当前页码 - 偏移量
startPage = currentPage - baseOnCurrentPageButtonOffset
if totalPage > (currentPage + baseOnCurrentPageButtonOffset) {
endPage = currentPage + baseOnCurrentPageButtonOffset
} else {
endPage = totalPage
}
} else {
// 当前页码小于偏移量
startPage = 1
if totalPage > howMuchPageButtons {
endPage = howMuchPageButtons
} else {
endPage = totalPage
}
}
if (currentPage + baseOnCurrentPageButtonOffset) > totalPage {
startPage = startPage - (currentPage + baseOnCurrentPageButtonOffset - endPage)
}
if startPage <= 0 {
startPage = 1
}
for i := startPage; i <= endPage; i++ {
result = append(result, i)
}
return result
}
|
package server
import (
"github.com/mercadolibre/time-zone-front/src/api/controller"
"github.com/mercadolibre/time-zone-front/src/api/service"
)
type Controllers struct {
TimeZoneController controller.TimeZoneController
}
func AppendControllers() *Controllers {
tzController := newTZController()
return &Controllers{
TimeZoneController: *tzController,
}
}
func newTZController() *controller.TimeZoneController {
return &controller.TimeZoneController{
TimeZonesServiceInterface: newtimeZonesServiceInterface(),
}
}
func newtimeZonesServiceInterface() *service.TimeZonesService {
return &service.TimeZonesService{}
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package wifi
import (
"context"
"chromiumos/tast/common/wifi/security/wpa"
"chromiumos/tast/remote/wificell"
"chromiumos/tast/remote/wificell/hostapd"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: MultiAuth,
Desc: "Tests the ability to select network correctly among APs with similar network configurations, by configuring two APs with the same SSID/channel/mode but different security config and connecting to each in turn",
Contacts: []string{
"chromeos-wifi-champs@google.com", // WiFi oncall rotation; or http://b/new?component=893827
},
Attr: []string{"group:wificell", "wificell_func"},
ServiceDeps: []string{wificell.TFServiceName},
Fixture: "wificellFixtWithCapture",
})
}
func MultiAuth(ctx context.Context, s *testing.State) {
tf := s.FixtValue().(*wificell.TestFixture)
apOpts := []hostapd.Option{hostapd.SSID(hostapd.RandomSSID("TAST_TEST_MultiAuth")), hostapd.Mode(hostapd.Mode80211g), hostapd.Channel(1)}
wpaCfg := wpa.NewConfigFactory("chromeos", wpa.Mode(wpa.ModePureWPA), wpa.Ciphers(wpa.CipherCCMP))
s.Log("Configuring AP 0 (Open)")
ap0, err := tf.ConfigureAP(ctx, apOpts, nil)
if err != nil {
s.Fatal("Failed to configure AP 0: ", err)
}
defer func(ctx context.Context) {
if err := tf.DeconfigAP(ctx, ap0); err != nil {
s.Error("Failed to deconfig AP 0: ", err)
}
}(ctx)
ctx, cancel := tf.ReserveForDeconfigAP(ctx, ap0)
defer cancel()
s.Log("Configuring AP 1 (WPA)")
ap1, err := tf.ConfigureAP(ctx, apOpts, wpaCfg)
if err != nil {
s.Fatal("Failed to configure AP 1: ", err)
}
defer func(ctx context.Context) {
if err := tf.DeconfigAP(ctx, ap1); err != nil {
s.Error("Failed to deconfig AP 1: ", err)
}
}(ctx)
ctx, cancel = tf.ReserveForDeconfigAP(ctx, ap1)
defer cancel()
s.Log("Connecting to AP 0")
if _, err := tf.ConnectWifiAP(ctx, ap0); err != nil {
s.Fatal("Failed to connect to AP 0: ", err)
}
defer func(ctx context.Context) {
if err := tf.CleanDisconnectWifi(ctx); err != nil {
s.Error("Failed to disconnect WiFi: ", err)
}
}(ctx)
ctx, cancel = tf.ReserveForDisconnect(ctx)
defer cancel()
s.Log("Verifying connection to AP 0")
if err := tf.VerifyConnection(ctx, ap0); err != nil {
s.Fatal("Failed to verify connection: ", err)
}
s.Log("Connecting to AP 1")
if _, err := tf.ConnectWifiAP(ctx, ap1); err != nil {
s.Fatal("Failed to connect to AP 1: ", err)
}
s.Log("Verifying connection to AP 1")
if err := tf.VerifyConnection(ctx, ap1); err != nil {
s.Fatal("Failed to verify connection: ", err)
}
}
|
package models
import (
"time"
// import mysql driver
_ "github.com/jinzhu/gorm/dialects/mysql"
)
// TournamentSignup struct
type TournamentSignup struct {
ID uint `gorm:"primary_key" json:"id"`
CreatedAt time.Time `json:"-"`
UpdatedAt time.Time `json:"-"`
DeletedAt *time.Time `sql:"index" json:"-"`
Team string `json:"team"`
Name string `json:"name"`
IGN string `json:"ign"`
Leader bool `json:"leader"`
Email string `json:"email"`
IDTournament uint `json:"id_tournament"`
}
// TournamentSignupManager struct
type TournamentSignupManager struct {
db *DB
}
// NewTournamentSignupManager - Creates a new *TournamentSignupManager that can be used for managing all the tournament signups
func NewTournamentSignupManager(db *DB) (*TournamentSignupManager, error) {
db.AutoMigrate(&TournamentSignup{})
manager := TournamentSignupManager{}
manager.db = db
return &manager, nil
}
// Create - Add a TournamentSignup
func (state TournamentSignupManager) Create(t *TournamentSignup) error {
return state.db.Create(t).Error
}
// FindAll - Get all the tournament signups
func (state TournamentSignupManager) FindAll() ([]TournamentSignup, error) {
t := []TournamentSignup{}
err := state.db.Find(&t).Error
return t, err
}
// FindTeams - Get a list of all signed up teams
func (state TournamentSignupManager) FindTeams(id uint) ([]string, error) {
t := []string{}
err := state.db.Model(&TournamentSignup{}).Where("id_tournament = ?", id).Pluck("team", &t).Error
return t, err
}
// FindAllTeams - Get a list of all signed up people in a specific tournament
func (state TournamentSignupManager) FindAllTeams(id uint) ([]TournamentSignup, error) {
t := []TournamentSignup{}
err := state.db.Where("id_tournament = ?", id).Find(&t).Error
return t, err
}
// Exists - Checks if a team already exists
func (state TournamentSignupManager) Exists(name string) bool {
if err := state.db.Where("team=?", name).Find(&TournamentSignup{}).Error; err != nil {
return false
}
return true
}
// Delete removes an existing tournament from the database
func (state TournamentSignupManager) Delete(t *TournamentSignup) error {
return state.db.Delete(&t).Error
}
// DeleteWithTournamentID is a wrapper for Delete the allows deleting a tournament
// with the tournament id instead of the tournament struct
func (state TournamentSignupManager) DeleteWithTournamentID(id uint) error {
return state.db.Where("id_tournament = ?", id).Delete(TournamentSignup{}).Error
}
|
package FlatFS
type UUIDToQuery struct {
uuid string
querykeyValue QueryKeyValue
}
type QueryKeyValue struct {
keyValue map[string]string
}
type QueryType struct {
addSpec bool
querySpec bool
replaceSpec bool
deleteSpec bool
fileSpec bool
emptyType bool
}
|
// This file is part of CycloneDX GoMod
//
// Licensed under the Apache License, Version 2.0 (the “License”);
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an “AS IS” BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) OWASP Foundation. All Rights Reserved.
package sbom
import (
"testing"
cdx "github.com/CycloneDX/cyclonedx-go"
"github.com/stretchr/testify/require"
)
func TestCalculateFileHashes(t *testing.T) {
t.Run("AllSupported", func(t *testing.T) {
algos := []cdx.HashAlgorithm{
cdx.HashAlgoMD5,
cdx.HashAlgoSHA1,
cdx.HashAlgoSHA256,
cdx.HashAlgoSHA384,
cdx.HashAlgoSHA512,
cdx.HashAlgoSHA3_256,
cdx.HashAlgoSHA3_512,
}
hashes, err := CalculateFileHashes("../../NOTICE", algos...) // TODO: use another file (create a tempfile?)
require.NoError(t, err)
require.Len(t, hashes, 7)
require.Equal(t, "90b8bc82c30341e88830b0ea82f18548", hashes[0].Value)
require.Equal(t, "8767825dace783fb1570510e21ab84ad59baa39c", hashes[1].Value)
require.Equal(t, "02fa11d51d573ee6f4e1133cb4b5c7b8ade1eeadb951875dfc2a67c0122add65", hashes[2].Value)
require.Equal(t, "3200f7c24a80080a7d7979aaaad480749b1fc5b07f0609749d47004c7e39265569ed17b2db5eea1f961543cc7a9627f2", hashes[3].Value)
require.Equal(t, "afef70a115ee95c3e7d966322898909964399186b9cdd877b5d7ea12352b2b5f8b54902e674875be0fc84affe86d28fdca7893b5e7da45241f3e1e646ab0f32b", hashes[4].Value)
require.Equal(t, "436042da3bf8a7b9bebeed1913c8e6ebf3b800aaaa1864690351754ece07caea", hashes[5].Value)
require.Equal(t, "cb6b4798adf21d3604dbf089f410edb1d2be31d958d2c859a3bf64a7c3d8b8df29c2218a47e80e026e44ff2932771123a8e5ea9019b18bdce7a0781d4379dd9a", hashes[6].Value)
})
t.Run("UnsupportedAlgorithm", func(t *testing.T) {
algos := []cdx.HashAlgorithm{
cdx.HashAlgoBlake2b_256,
cdx.HashAlgoBlake2b_384,
cdx.HashAlgoBlake2b_512,
cdx.HashAlgoBlake3,
}
for _, algo := range algos {
t.Run(string(algo), func(t *testing.T) {
_, err := CalculateFileHashes("", algo)
require.Error(t, err)
require.Contains(t, err.Error(), "unsupported hash algorithm")
})
}
})
t.Run("NoAlgorithms", func(t *testing.T) {
hashes, err := CalculateFileHashes("")
require.NoError(t, err)
require.Empty(t, hashes)
})
}
func TestNewProperty(t *testing.T) {
property := NewProperty("name", "value")
require.Equal(t, "cdx:gomod:name", property.Name)
require.Equal(t, "value", property.Value)
}
|
// Package libasciidoc is an open source Go library that converts Asciidoc
// content into HTML.
package libasciidoc
import (
"io"
"os"
"strings"
"time"
"github.com/bytesparadise/libasciidoc/pkg/configuration"
"github.com/bytesparadise/libasciidoc/pkg/parser"
"github.com/bytesparadise/libasciidoc/pkg/renderer"
"github.com/bytesparadise/libasciidoc/pkg/types"
"github.com/bytesparadise/libasciidoc/pkg/validator"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
var (
// BuildCommit lastest build commit (set by Makefile)
BuildCommit = ""
// BuildTag if the `BuildCommit` matches a tag
BuildTag = ""
// BuildTime set by build script (set by Makefile)
BuildTime = ""
)
// ConvertFile converts the content of the given filename into an output document.
// The conversion result is written in the given writer `output`, whereas the document metadata (title, etc.) (or an error if a problem occurred) is returned
// as the result of the function call. The output format is determined by config.Backend (HTML5 default).
func ConvertFile(output io.Writer, config *configuration.Configuration) (types.Metadata, error) {
file, err := os.Open(config.Filename)
if err != nil {
return types.Metadata{}, errors.Wrapf(err, "error opening %s", config.Filename)
}
defer file.Close()
// use the file mtime as the `last updated` value
stat, err := os.Stat(config.Filename)
if err != nil {
return types.Metadata{}, errors.Wrapf(err, "error opening %s", config.Filename)
}
config.LastUpdated = stat.ModTime()
return Convert(file, output, config)
}
// Convert converts the content of the given reader `r` into a full output document, written in the given writer `output`.
// Returns an error if a problem occurred. The default will be HTML5, but depends on the config.BackEnd value.
func Convert(source io.Reader, output io.Writer, config *configuration.Configuration) (types.Metadata, error) {
var start, endOfPreprocess, emdOfParse, endOfValidate, endOfRender time.Time
start = time.Now()
defer func() {
log.Infof("time to preprocess %d microseconds", endOfPreprocess.Sub(start).Microseconds())
log.Infof("time to parse %d microseconds", emdOfParse.Sub(endOfPreprocess).Microseconds())
log.Infof("time to validate %d microseconds", endOfValidate.Sub(emdOfParse).Microseconds())
log.Infof("time to render %d microseconds", endOfRender.Sub(endOfValidate).Microseconds())
log.Infof("total time %d microseconds", endOfRender.Sub(start).Microseconds())
}()
p, err := parser.Preprocess(source, config)
if err != nil {
return types.Metadata{}, err
}
endOfPreprocess = time.Now()
// log.Debugf("parsing the asciidoc source...")
doc, err := parser.ParseDocument(strings.NewReader(p), config)
if err != nil {
return types.Metadata{}, err
}
emdOfParse = time.Now()
// validate the document
doctype := config.Attributes.GetAsStringWithDefault(types.AttrDocType, "article")
problems, err := validator.Validate(doc, doctype)
if err != nil {
return types.Metadata{}, err
}
endOfValidate = time.Now()
if len(problems) > 0 {
// if any problem found, change the doctype to render the document as a regular article
log.Warnf("changing doctype to 'article' because problems were found in the document: %v", problems)
config.Attributes[types.AttrDocType] = "article" // switch to `article` rendering (in case it was a manpage with problems)
for _, problem := range problems {
switch problem.Severity {
case validator.Error:
log.Error(problem.Message)
case validator.Warning:
log.Warn(problem.Message)
}
}
}
// render
metadata, err := renderer.Render(doc, config, output)
if err != nil {
return types.Metadata{}, err
}
endOfRender = time.Now()
// log.Debugf("Done processing document")
return metadata, nil
}
|
/*
* Copyright (c) 2018-present unTill Pro, Ltd. and Contributors
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*
*/
package cli
import (
"fmt"
"io/ioutil"
"path/filepath"
"strings"
"github.com/spf13/cobra"
gc "github.com/untillpro/gochips"
"gopkg.in/yaml.v2"
)
const (
worker = "worker"
manager = "manager"
leader = "leader"
)
var mode bool
// AddToSwarm adds nodes to swarm s
func AddToSwarm(manager bool, args []string) {
clusterFile := unmarshalClusterYml()
nodesFromYml := getNodesFromYml(getWorkingDir())
gc.ExitIfFalse(len(nodesFromYml) > 0, "Can't find nodes from nodes.yml. Add some nodes first")
nodeHostAndNode := make(map[string]node)
for _, value := range nodesFromYml {
nodeHostAndNode[value.Host] = value
}
clusterLeaderNode, clusterManagerNodes, clusterWorkerNodes := getHostsFromNodesGroupingBySwarmModeValue(nodesFromYml)
if clusterLeaderNode == (node{}) {
gc.ExitIfFalse(manager, "Use `-manager` flag to init swarm")
gc.ExitIfFalse(len(args) > 0, "Need to pass at least one alias to init swarm")
}
nodesWithoutSwarm := make([]node, 0, len(nodesFromYml))
for _, nodeFromYml := range nodesFromYml {
if nodeFromYml == clusterLeaderNode || containsNode(clusterManagerNodes, nodeFromYml) ||
containsNode(clusterWorkerNodes, nodeFromYml) {
if contains(args, nodeFromYml.Alias) {
gc.Info(nodeFromYml.Alias + " already in swarm")
}
continue
}
if manager {
if ok := contains(args, nodeFromYml.Alias); !ok {
continue
}
}
nodesWithoutSwarm = append(nodesWithoutSwarm, nodeFromYml)
}
gc.ExitIfFalse(len(nodesWithoutSwarm) > 0, "All nodes already in swarm")
var nodeVar node
if clusterLeaderNode == (node{}) {
nodeVar, nodesWithoutSwarm = initSwarm(nodesWithoutSwarm, args,
clusterFile)
nodeHostAndNode[nodeVar.Host] = nodeVar
clusterLeaderNode = nodeVar
}
var channelForNodes = make(chan nodeAndError)
for _, currentNode := range nodesWithoutSwarm {
go func(nodeVar node) {
nodeFromGoroutine, err := joinToSwarm(nodeVar, clusterLeaderNode.Host, clusterFile, manager)
nodeFromFunc := nodeAndError{
nodeFromGoroutine,
err,
}
channelForNodes <- nodeFromFunc
}(currentNode)
}
errMsgs := make([]string, 0, len(args))
for _, key := range nodesWithoutSwarm {
nodeWithPossibleError := <-channelForNodes
node := nodeWithPossibleError.nodeWithPossibleError
err := nodeWithPossibleError.err
if nodeWithPossibleError.err != nil {
errMsgs = append(errMsgs, fmt.Sprintf("Host: %v, returns error: %v", node.Host,
err.Error()))
}
nodeHostAndNode[key.Host] = node
}
for _, errMsg := range errMsgs {
gc.Info(errMsg)
}
close(channelForNodes)
nodes := make([]node, len(nodeHostAndNode))
i := 0
for _, value := range nodeHostAndNode {
nodes[i] = value
i++
}
marshaledNode, err := yaml.Marshal(&nodes)
gc.ExitIfError(err)
nodesFilePath := filepath.Join(getWorkingDir(), nodesFileName)
gc.ExitIfError(ioutil.WriteFile(nodesFilePath, marshaledNode, 0600))
gc.ExitIfFalse(len(errMsgs) == 0, "Failed to install on some node(s)")
}
// swarmCmd represents the swarm command
var swarmCmd = &cobra.Command{
Use: "swarm -m <Alias1> <Alias2> or swarm without params (you should create one manager before doing that)",
Short: "swarm -m installs managers on given node, swarm installs workers",
Long: `swarm with -m installs swarm manager nodes on given Aliases, swarm installs swarm workers on other nodes in
cluster`,
Run: func(cmd *cobra.Command, args []string) {
initCommand("swarm")
defer finitCommand()
if mode && len(args) == 0 {
gc.Fatal("Need at least one node alias")
}
checkSSHAgent()
AddToSwarm(mode, args)
},
}
func getToken(mode, host string, client *SSHClient, targetHost string) (string, error) {
output, err := client.Exec(host, "$sudo docker swarm join-token "+mode) // "$" prefix masks output
cmd := ""
if err == nil {
cmd = strings.Trim(substringAfterIncludeValue(output, "docker swarm join"), "\n ")
cmd = strings.Replace(cmd, "swarm join ", fmt.Sprintf("swarm join --advertise-addr \"%s\" --data-path-addr \"%s\" ", targetHost, targetHost), 1)
}
return cmd, err
}
func reloadUfwAndDocker(host string, client *SSHClient) error {
logWithPrefix(host, "Restarting ufw...")
_, err := client.Exec(host, "sudo ufw reload")
if err != nil {
return err
}
logWithPrefix(host, "Ufw restarted!")
return nil
}
func initSwarm(nodes []node, args []string, file *clusterFile) (node, []node) {
var alias string
alias = args[0]
node, index := findNodeByAliasFromNodesYml(alias, nodes)
host := node.Host
client := getSSHClient(file)
err := configUfwToWorkInSwarmMode(host, client)
gc.ExitIfError(err)
gc.Info("Starting swarm initialization...")
_, err = client.Exec(host, "sudo ufw allow 2377/tcp")
gc.ExitIfError(err)
err = reloadUfwAndDocker(host, client)
gc.ExitIfError(err)
_, err = client.Exec(host, "sudo docker swarm init --advertise-addr "+host+" --data-path-addr "+host)
nodes = append(nodes[:index], nodes[index+1:]...)
node.SwarmMode = leader
gc.Info("Swarm initialized, leader node is " + alias)
return node, nodes
}
func findNodeByAliasFromNodesYml(alias string, nodesFromYml []node) (node, int) {
var leaderNode node
var index int
for i, node := range nodesFromYml {
if node.Alias == alias {
leaderNode = node
index = i
}
}
if leaderNode == (node{}) {
gc.Info("Can't find host by given alias in nodes.yml, choose it interactive")
alias := numberHostsFromNodesFile(nodesFromYml)
return findNodeByAliasFromNodesYml(alias, nodesFromYml)
}
return leaderNode, index
}
func getHostsFromNodesGroupingBySwarmModeValue(nodes []node) (node, []node, []node) {
var clusterLeaderHost node
var clusterManagerHosts []node
var clusterWorkersHost []node
for _, node := range nodes {
if len(node.DockerVersion) == 0 {
gc.Fatal("Need to install docker on all nodes from nodes.yml, please exec `swarmgo docker`")
}
switch node.SwarmMode {
case leader:
clusterLeaderHost = node
case manager:
clusterManagerHosts = append(clusterManagerHosts, node)
case worker:
clusterWorkersHost = append(clusterWorkersHost, node)
}
}
return clusterLeaderHost, clusterManagerHosts, clusterWorkersHost
}
func configUfwToWorkInSwarmMode(host string, client *SSHClient) error {
commands := []SSHCommand{
SSHCommand{
cmd: "sudo apt-get -y install ufw",
title: "Installing ufw",
},
SSHCommand{
cmd: "sudo ufw allow 22/tcp",
title: "Adding ufw rule 22/tcp",
},
SSHCommand{
cmd: "sudo ufw allow 2376/tcp",
title: "Adding ufw rule 2376/tcp",
},
SSHCommand{
cmd: "sudo ufw allow 7946/tcp",
title: "Adding ufw rule 7946/tcp",
},
SSHCommand{
cmd: "sudo ufw allow 7946/udp",
title: "Adding ufw rule 7946/udp",
},
SSHCommand{
cmd: "sudo ufw allow 4789/udp",
title: "Adding ufw rule 4789/udp",
},
SSHCommand{
cmd: "sudo ufw allow proto esp from any",
title: "Adding ufw rule for ESP proto",
},
}
err := sshKeyAuthCmds(host, client, commands)
if err != nil {
return err
}
logWithPrefix(host, "Ufw configured")
return nil
}
func joinToSwarm(node node, leaderHost string, file *clusterFile, mgr bool) (node, error) {
client := getSSHClient(file)
err := configUfwToWorkInSwarmMode(node.Host, client)
if err != nil {
return node, err
}
var token string
if mgr {
_, err = client.Exec(node.Host, "sudo ufw allow 2377/tcp")
if err != nil {
return node, err
}
token, err = getToken("manager", leaderHost, client, node.Host)
if err != nil {
return node, err
}
node.SwarmMode = manager
} else {
token, err = getToken("worker", leaderHost, client, node.Host)
if err != nil {
return node, err
}
node.SwarmMode = worker
}
err = reloadUfwAndDocker(node.Host, client)
if err != nil {
node.SwarmMode = ""
return node, err
}
gc.Doing("Joining " + node.Host + " to swarm")
// "!" is used to avoid logging
_, err = client.Exec(node.Host, "sudo "+token)
if err != nil {
node.SwarmMode = ""
return node, err
}
logWithPrefix(node.Host, node.Alias+" successfully joined swarm")
return node, nil
}
func getSwarmLeaderNodeAndClusterFile() (*entry, *clusterFile) {
clusterFile := unmarshalClusterYml()
nodesFromYml := getNodesFromYml(getWorkingDir())
gc.ExitIfFalse(len(nodesFromYml) > 0, "Can't find nodes from nodes.yml. Add some nodes first")
var firstEntry *entry
//need to create networks in manager node
for _, value := range nodesFromYml {
//if value.SwarmMode == 0 {
// gc.Fatal("All nodes must be in swarm! Node " + value.Host + " isn't part of the swarm")
//}
if value.SwarmMode == leader {
firstEntry = &entry{
value.Host,
clusterFile.ClusterUserName,
value,
}
}
}
return firstEntry, clusterFile
}
|
package models
import(
"encoding/json"
)
/**
* Type definition for ApplicationEnvironmentEnum enum
*/
type ApplicationEnvironmentEnum int
/**
* Value collection for ApplicationEnvironmentEnum enum
*/
const (
ApplicationEnvironment_KVMWARE ApplicationEnvironmentEnum = 1 + iota
ApplicationEnvironment_KHYPERV
ApplicationEnvironment_KSQL
ApplicationEnvironment_KVIEW
ApplicationEnvironment_KPUPPETEER
ApplicationEnvironment_KPHYSICAL
ApplicationEnvironment_KPURE
ApplicationEnvironment_KAZURE
ApplicationEnvironment_KNETAPP
ApplicationEnvironment_KAGENT
ApplicationEnvironment_KGENERICNAS
ApplicationEnvironment_KACROPOLIS
ApplicationEnvironment_KPHYSICALFILES
ApplicationEnvironment_KISILON
ApplicationEnvironment_KKVM
ApplicationEnvironment_KAWS
ApplicationEnvironment_KEXCHANGE
ApplicationEnvironment_KHYPERVVSS
ApplicationEnvironment_KORACLE
ApplicationEnvironment_KGCP
ApplicationEnvironment_KFLASHBLADE
ApplicationEnvironment_KAWSNATIVE
ApplicationEnvironment_KVCD
ApplicationEnvironment_KO365
ApplicationEnvironment_KO365OUTLOOK
ApplicationEnvironment_KHYPERFLEX
ApplicationEnvironment_KGCPNATIVE
ApplicationEnvironment_KAZURENATIVE
)
func (r ApplicationEnvironmentEnum) MarshalJSON() ([]byte, error) {
s := ApplicationEnvironmentEnumToValue(r)
return json.Marshal(s)
}
func (r *ApplicationEnvironmentEnum) UnmarshalJSON(data []byte) error {
var s string
json.Unmarshal(data, &s)
v := ApplicationEnvironmentEnumFromValue(s)
*r = v
return nil
}
/**
* Converts ApplicationEnvironmentEnum to its string representation
*/
func ApplicationEnvironmentEnumToValue(applicationEnvironmentEnum ApplicationEnvironmentEnum) string {
switch applicationEnvironmentEnum {
case ApplicationEnvironment_KVMWARE:
return "kVMware"
case ApplicationEnvironment_KHYPERV:
return "kHyperV"
case ApplicationEnvironment_KSQL:
return "kSQL"
case ApplicationEnvironment_KVIEW:
return "kView"
case ApplicationEnvironment_KPUPPETEER:
return "kPuppeteer"
case ApplicationEnvironment_KPHYSICAL:
return "kPhysical"
case ApplicationEnvironment_KPURE:
return "kPure"
case ApplicationEnvironment_KAZURE:
return "kAzure"
case ApplicationEnvironment_KNETAPP:
return "kNetapp"
case ApplicationEnvironment_KAGENT:
return "kAgent"
case ApplicationEnvironment_KGENERICNAS:
return "kGenericNas"
case ApplicationEnvironment_KACROPOLIS:
return "kAcropolis"
case ApplicationEnvironment_KPHYSICALFILES:
return "kPhysicalFiles"
case ApplicationEnvironment_KISILON:
return "kIsilon"
case ApplicationEnvironment_KKVM:
return "kKVM"
case ApplicationEnvironment_KAWS:
return "kAWS"
case ApplicationEnvironment_KEXCHANGE:
return "kExchange"
case ApplicationEnvironment_KHYPERVVSS:
return "kHyperVVSS"
case ApplicationEnvironment_KORACLE:
return "kOracle"
case ApplicationEnvironment_KGCP:
return "kGCP"
case ApplicationEnvironment_KFLASHBLADE:
return "kFlashBlade"
case ApplicationEnvironment_KAWSNATIVE:
return "kAWSNative"
case ApplicationEnvironment_KVCD:
return "kVCD"
case ApplicationEnvironment_KO365:
return "kO365"
case ApplicationEnvironment_KO365OUTLOOK:
return "kO365Outlook"
case ApplicationEnvironment_KHYPERFLEX:
return "kHyperFlex"
case ApplicationEnvironment_KGCPNATIVE:
return "kGCPNative"
case ApplicationEnvironment_KAZURENATIVE:
return "kAzureNative"
default:
return "kVMware"
}
}
/**
* Converts ApplicationEnvironmentEnum Array to its string Array representation
*/
func ApplicationEnvironmentEnumArrayToValue(applicationEnvironmentEnum []ApplicationEnvironmentEnum) []string {
convArray := make([]string,len( applicationEnvironmentEnum))
for i:=0; i<len(applicationEnvironmentEnum);i++ {
convArray[i] = ApplicationEnvironmentEnumToValue(applicationEnvironmentEnum[i])
}
return convArray
}
/**
* Converts given value to its enum representation
*/
func ApplicationEnvironmentEnumFromValue(value string) ApplicationEnvironmentEnum {
switch value {
case "kVMware":
return ApplicationEnvironment_KVMWARE
case "kHyperV":
return ApplicationEnvironment_KHYPERV
case "kSQL":
return ApplicationEnvironment_KSQL
case "kView":
return ApplicationEnvironment_KVIEW
case "kPuppeteer":
return ApplicationEnvironment_KPUPPETEER
case "kPhysical":
return ApplicationEnvironment_KPHYSICAL
case "kPure":
return ApplicationEnvironment_KPURE
case "kAzure":
return ApplicationEnvironment_KAZURE
case "kNetapp":
return ApplicationEnvironment_KNETAPP
case "kAgent":
return ApplicationEnvironment_KAGENT
case "kGenericNas":
return ApplicationEnvironment_KGENERICNAS
case "kAcropolis":
return ApplicationEnvironment_KACROPOLIS
case "kPhysicalFiles":
return ApplicationEnvironment_KPHYSICALFILES
case "kIsilon":
return ApplicationEnvironment_KISILON
case "kKVM":
return ApplicationEnvironment_KKVM
case "kAWS":
return ApplicationEnvironment_KAWS
case "kExchange":
return ApplicationEnvironment_KEXCHANGE
case "kHyperVVSS":
return ApplicationEnvironment_KHYPERVVSS
case "kOracle":
return ApplicationEnvironment_KORACLE
case "kGCP":
return ApplicationEnvironment_KGCP
case "kFlashBlade":
return ApplicationEnvironment_KFLASHBLADE
case "kAWSNative":
return ApplicationEnvironment_KAWSNATIVE
case "kVCD":
return ApplicationEnvironment_KVCD
case "kO365":
return ApplicationEnvironment_KO365
case "kO365Outlook":
return ApplicationEnvironment_KO365OUTLOOK
case "kHyperFlex":
return ApplicationEnvironment_KHYPERFLEX
case "kGCPNative":
return ApplicationEnvironment_KGCPNATIVE
case "kAzureNative":
return ApplicationEnvironment_KAZURENATIVE
default:
return ApplicationEnvironment_KVMWARE
}
}
|
// Copyright ©2017 Dan Kortschak. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// gen_nlo_const outputs candidate constants and a deBruijn look up table
// for implementing a fast bit fiddling number of leading ones function.
package main
import "fmt"
func nlo(x, add, mult, shift byte, andnot bool) int {
x = ^x
x |= x >> 1
x |= x >> 2
if andnot {
x &^= x >> 4
} else {
x |= x >> 4
}
x += add
x *= mult
x >>= shift
return int(x)
}
func loopNLO(x byte) int {
var n int
for b := 0x80; b > 0; b >>= 1 {
if x&byte(b) == 0 {
break
}
n++
}
return n
}
func main() {
for add := byte(0); add < 2; add++ {
for mult := byte(1); mult < 64; mult++ {
for shift := byte(0); shift < 8; shift++ {
for _, andnot := range []bool{false, true} {
hits := make(map[int]int)
for x := 0; x < 256; x++ {
i := nlo(byte(x), add, mult, shift, andnot)
prev, ok := hits[i]
n := loopNLO(byte(x))
if ok && n != prev {
goto fail
}
hits[i] = n
}
if len(hits) == 9 {
var max int
for k := range hits {
if k > max {
max = k
}
}
if max < 16 {
fmt.Printf("add=%d mult=%d shift=%d andnot=%t table=%#v\n", add, mult, shift, andnot, hits)
}
}
fail:
}
}
}
}
}
|
package scalars
import (
"io"
"strconv"
"time"
"github.com/99designs/gqlgen/graphql"
"github.com/neighborly/go-errors"
)
func MarshalDate(t time.Time) graphql.Marshaler {
return graphql.WriterFunc(func(w io.Writer) {
io.WriteString(w, strconv.Quote(t.Format("2006-01-02")))
})
}
func UnmarshalDate(v interface{}) (time.Time, error) {
if tmpStr, ok := v.(string); ok {
result, err := time.Parse("2006-01-02", tmpStr)
if err != nil {
result2, err2 := time.Parse(time.RFC3339, tmpStr)
if err2 != nil {
return time.Time{}, err
}
return time.Parse("2006-01-02", result2.Format("2006-01-02"))
}
return result, nil
}
return time.Time{}, errors.InvalidArgument.New("date should be YYYY-MM-DD formatted string")
}
|
package main
import (
"net/http"
"github.com/gorilla/mux"
)
type Route struct {
Name string
Method string
Pattern string
HandlerFunc http.HandlerFunc
}
type Routes []Route
func NewRouter() *mux.Router {
router := mux.NewRouter().StrictSlash(true)
for _, route := range routes {
router.
Methods(route.Method).
Path(route.Pattern).
Name(route.Name).
Handler(route.HandlerFunc)
}
return router
}
var routes = Routes{
Route{
"State",
"GET",
"/state",
GetState,
},
Route{
"OffState",
"PUT",
"/off",
PutOffState,
},
Route{
"CoolState",
"PUT",
"/cool",
PutCoolState,
},
Route{
"DryState",
"PUT",
"/dry",
PutDryState,
},
/*Route{
"AutoState",
"PUT",
"/auto",
PutAutoState,
},*/
Route{
"HeatState",
"PUT",
"/heat",
PutHeatState,
},
Route{
"FanState",
"PUT",
"/fan",
PutFanState,
},
}
|
package api
import (
"bytes"
"crypto/tls"
"encoding/json"
"errors"
"github.com/bitmaelum/bitmaelum-server/core"
"github.com/bitmaelum/bitmaelum-server/core/encrypt"
"io/ioutil"
"net/http"
"time"
)
type Api struct {
account *core.AccountInfo
jwt string
client *http.Client
}
// Create a new mailserver API client
func CreateNewClient(ai *core.AccountInfo) (*Api, error) {
// Create JWT token based on the private key of the user
privKey, err := encrypt.PEMToPrivKey([]byte(ai.PrivKey))
if err != nil {
return nil, err
}
jwtToken, err := core.GenerateJWTToken(core.StringToHash(ai.Address), privKey)
if err != nil {
return nil, err
}
// Create API
tr := &http.Transport{
// @TODO: We don't want this...
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
api := &Api{
account: ai,
jwt: jwtToken,
client: &http.Client{
Transport: tr,
Timeout: 30 * time.Second,
},
}
return api, nil;
}
// Get JSON result from API
func (api *Api) GetJSON(path string, v interface{}) error {
body, err := api.Get(path)
if err != nil {
return err
}
err = json.Unmarshal(body, v)
if err != nil {
return err
}
return nil
}
// Get raw bytes from API
func (api *Api) Get(path string) ([]byte, error) {
req, err := http.NewRequest("GET", api.account.Server + path, nil)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer " + api.jwt)
resp, err := api.client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode > 299 {
return nil, errors.New("incorrect status code returned")
}
return ioutil.ReadAll(resp.Body)
}
// Post to API
func (api *Api) Post(path string, body interface{}) error {
bodyBytes, err := json.Marshal(body)
if err != nil {
return err
}
req, err := http.NewRequest("POST", api.account.Server + path, bytes.NewBuffer(bodyBytes))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer " + api.jwt)
resp, err := api.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode > 299 {
return errors.New("incorrect status code returned")
}
return nil
}
|
package main
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/pjirsa/azure-costmanagement-samples/export"
"github.com/pjirsa/azure-costmanagement-samples/internal/config"
)
func main() {
err := config.ParseEnvironment()
if err != nil {
return
}
ctx, cancel := context.WithTimeout(context.Background(), 6000*time.Second)
defer cancel()
ListExports(ctx)
}
func ListExports(ctx context.Context) {
scope := "subscriptions/" + config.SubscriptionID()
resp, err := export.List(ctx, scope)
if err != nil {
fmt.Println("An error happened")
}
result, _ := json.Marshal(resp.Value)
fmt.Println("List of configured exports:")
fmt.Println(string(result))
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package crash
import (
"context"
"path"
"path/filepath"
"strings"
"time"
"github.com/golang/protobuf/ptypes/empty"
"chromiumos/tast/ctxutil"
"chromiumos/tast/dut"
"chromiumos/tast/rpc"
crash_service "chromiumos/tast/services/cros/crash"
"chromiumos/tast/ssh/linuxssh"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
func init() {
testing.AddTest(&testing.Test{
Func: WatchdogCrash,
Desc: "Verify artificial watchdog crash creates crash files",
Contacts: []string{"mutexlox@chromium.org", "cros-telemetry@google.com"},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"device_crash", "pstore", "reboot", "watchdog"},
ServiceDeps: []string{"tast.cros.crash.FixtureService"},
HardwareDeps: hwdep.D(hwdep.SkipOnPlatform(
// See https://crbug.com/1069618 for discussion of bob, scarlet, kevin issues.
"bob",
"scarlet",
"kevin")),
Timeout: 10 * time.Minute,
})
}
func saveAllFiles(ctx context.Context, d *dut.DUT, matches []*crash_service.RegexMatch, dir string) error {
var firstErr error
for _, m := range matches {
for _, f := range m.Files {
if err := linuxssh.GetFile(ctx, d.Conn(), f, filepath.Join(dir, path.Base(f)), linuxssh.PreserveSymlinks); err != nil {
testing.ContextLogf(ctx, "Failed to save file %s: %s", f, err)
if firstErr == nil {
firstErr = err
}
}
}
}
return firstErr
}
func WatchdogCrash(ctx context.Context, s *testing.State) {
const systemCrashDir = "/var/spool/crash"
d := s.DUT()
cl, err := rpc.Dial(ctx, d, s.RPCHint())
if err != nil {
s.Fatal("Failed to connect to the RPC service on the DUT: ", err)
}
fs := crash_service.NewFixtureServiceClient(cl.Conn)
req := crash_service.SetUpCrashTestRequest{
Consent: crash_service.SetUpCrashTestRequest_MOCK_CONSENT,
}
// Shorten deadline to leave time for cleanup
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 5*time.Second)
defer cancel()
if _, err := fs.SetUp(ctx, &req); err != nil {
s.Error("Failed to set up: ", err)
cl.Close(cleanupCtx)
return
}
// This is a bit delicate. If the test fails _before_ we panic the machine,
// we need to do TearDown then, and on the same connection (so we can close Chrome).
//
// If it fails to reconnect, we do not need to clean these up.
//
// Otherwise, we need to re-establish a connection to the machine and
// run TearDown.
defer func() {
s.Log("Cleaning up")
if fs != nil {
if _, err := fs.TearDown(cleanupCtx, &empty.Empty{}); err != nil {
s.Error("Couldn't tear down: ", err)
}
}
if cl != nil {
cl.Close(cleanupCtx)
}
}()
// Sync filesystem to minimize impact of the crash on other tests
if out, err := d.Conn().CommandContext(ctx, "sync").CombinedOutput(); err != nil {
s.Fatalf("Failed to sync filesystems: %s. err: %v", out, err)
}
// Trigger a watchdog reset
// Run the triggering command in the background to avoid the DUT potentially going down before
// success is reported over the SSH connection. Redirect all I/O streams to ensure that the
// SSH exec request doesn't hang (see https://en.wikipedia.org/wiki/Nohup#Overcoming_hanging).
// Daisydog is the watchdog service
cmd := `nohup sh -c 'sleep 2
stop daisydog
sleep 60 > /dev/watchdog' >/dev/null 2>&1 </dev/null &`
if err := d.Conn().CommandContext(ctx, "bash", "-c", cmd).Run(); err != nil {
s.Fatal("Failed to panic DUT: ", err)
}
s.Log("Waiting for DUT to become unreachable")
if err := d.WaitUnreachable(ctx); err != nil {
s.Fatal("Failed to wait for DUT to become unreachable: ", err)
}
s.Log("DUT became unreachable (as expected)")
// When we lost the connection, these connections broke.
cl.Close(ctx)
cl = nil
fs = nil
s.Log("Reconnecting to DUT")
if err := d.WaitConnect(ctx); err != nil {
s.Fatal("Failed to reconnect to DUT: ", err)
}
s.Log("Reconnected to DUT")
cl, err = rpc.Dial(ctx, d, s.RPCHint())
if err != nil {
s.Fatal("Failed to connect to the RPC service on the DUT: ", err)
}
fs = crash_service.NewFixtureServiceClient(cl.Conn)
base := `kernel\.\d{8}\.\d{6}\.\d+\.0`
biosLogMatches := &crash_service.RegexMatch{
Regex: base + `\.bios_log`,
Files: nil,
}
waitReq := &crash_service.WaitForCrashFilesRequest{
Dirs: []string{systemCrashDir},
Regexes: []string{base + `\.kcrash`, base + `\.meta`, base + `\.log`},
}
s.Log("Waiting for files to become present")
res, err := fs.WaitForCrashFiles(ctx, waitReq)
if err != nil {
if err := d.GetFile(cleanupCtx, "/var/log/messages",
filepath.Join(s.OutDir(), "messages")); err != nil {
s.Log("Failed to get messages log")
}
s.Fatal("Failed to find crash files: ", err.Error())
}
for _, m := range res.Matches {
if strings.HasSuffix(m.Regex, ".meta") {
// Also remove the bios log if it was created.
for _, f := range m.Files {
biosLogMatches.Files = append(biosLogMatches.Files, strings.TrimSuffix(f, filepath.Ext(f))+".bios_log")
}
if len(m.Files) != 1 {
s.Errorf("Unexpected number of kernel crashes: %d, want 1", len(m.Files))
continue
}
if err := d.Conn().CommandContext(ctx, "/bin/grep", "-q", "sig=kernel-(WATCHDOG)", m.Files[0]).Run(); err != nil {
// get all files to help debug test failures
if err := saveAllFiles(cleanupCtx, d, append(res.Matches, biosLogMatches), s.OutDir()); err != nil {
s.Log("Failed to get meta file: ", err)
}
s.Error("Did not find correct pattern in meta file: ", err)
}
}
}
removeReq := &crash_service.RemoveAllFilesRequest{
Matches: append(res.Matches, biosLogMatches),
}
if _, err := fs.RemoveAllFiles(ctx, removeReq); err != nil {
s.Error("Error removing files: ", err)
}
}
|
// Copyright 2019 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package builder
import (
tcp_filter "github.com/envoyproxy/go-control-plane/envoy/api/v2/listener"
http_filter "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/http_connection_manager/v2"
tcp_config "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/rbac/v2"
istiolog "istio.io/pkg/log"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/util"
authz_model "istio.io/istio/pilot/pkg/security/authz/model"
"istio.io/istio/pilot/pkg/security/authz/policy"
"istio.io/istio/pilot/pkg/security/authz/policy/v1alpha1"
"istio.io/istio/pilot/pkg/security/authz/policy/v1beta1"
"istio.io/istio/pilot/pkg/security/trustdomain"
"istio.io/istio/pkg/config/labels"
)
var (
rbacLog = istiolog.RegisterScope("rbac", "rbac debugging", 0)
)
// Builder wraps all needed information for building the RBAC filter for a service.
type Builder struct {
isXDSMarshalingToAnyEnabled bool
generator policy.Generator
}
// NewBuilder creates a builder instance that can be used to build corresponding RBAC filter config.
func NewBuilder(trustDomainBundle trustdomain.Bundle, serviceInstance *model.ServiceInstance,
workloadLabels labels.Collection, configNamespace string,
policies *model.AuthorizationPolicies, isXDSMarshalingToAnyEnabled bool) *Builder {
var generator policy.Generator
if p := policies.ListAuthorizationPolicies(configNamespace, workloadLabels); len(p) > 0 {
generator = v1beta1.NewGenerator(trustDomainBundle, p)
rbacLog.Debugf("v1beta1 authorization enabled for workload %v in %s", workloadLabels, configNamespace)
} else {
if serviceInstance == nil {
return nil
}
if serviceInstance.Service == nil {
rbacLog.Errorf("no service for serviceInstance: %v", serviceInstance)
return nil
}
serviceNamespace := serviceInstance.Service.Attributes.Namespace
serviceHostname := string(serviceInstance.Service.Hostname)
if policies.IsRBACEnabled(serviceHostname, serviceNamespace) {
serviceName := serviceInstance.Service.Attributes.Name
serviceMetadata, err := authz_model.NewServiceMetadata(serviceName, serviceNamespace, serviceInstance)
if err != nil {
rbacLog.Errorf("failed to create ServiceMetadata for %s: %s", serviceName, err)
return nil
}
generator = v1alpha1.NewGenerator(trustDomainBundle, serviceMetadata, policies, policies.IsGlobalPermissiveEnabled())
rbacLog.Debugf("v1alpha1 RBAC enabled for service %s", serviceHostname)
}
}
if generator == nil {
return nil
}
return &Builder{
isXDSMarshalingToAnyEnabled: isXDSMarshalingToAnyEnabled,
generator: generator,
}
}
// BuildHTTPFilter builds the RBAC HTTP filter.
func (b *Builder) BuildHTTPFilter() *http_filter.HttpFilter {
if b == nil {
return nil
}
rbacConfig := b.generator.Generate(false /* forTCPFilter */)
if rbacConfig == nil {
return nil
}
httpConfig := http_filter.HttpFilter{
Name: authz_model.RBACHTTPFilterName,
}
if b.isXDSMarshalingToAnyEnabled {
httpConfig.ConfigType = &http_filter.HttpFilter_TypedConfig{TypedConfig: util.MessageToAny(rbacConfig)}
} else {
httpConfig.ConfigType = &http_filter.HttpFilter_Config{Config: util.MessageToStruct(rbacConfig)}
}
rbacLog.Debugf("built http filter config: %v", httpConfig)
return &httpConfig
}
// BuildTCPFilter builds the RBAC TCP filter.
func (b *Builder) BuildTCPFilter() *tcp_filter.Filter {
if b == nil {
return nil
}
// The build function always return the config for HTTP filter, we need to extract the
// generated rules and set it in the config for TCP filter.
config := b.generator.Generate(true /* forTCPFilter */)
if config == nil {
return nil
}
rbacConfig := &tcp_config.RBAC{
Rules: config.Rules,
ShadowRules: config.ShadowRules,
StatPrefix: authz_model.RBACTCPFilterStatPrefix,
}
tcpConfig := tcp_filter.Filter{
Name: authz_model.RBACTCPFilterName,
}
if b.isXDSMarshalingToAnyEnabled {
tcpConfig.ConfigType = &tcp_filter.Filter_TypedConfig{TypedConfig: util.MessageToAny(rbacConfig)}
} else {
tcpConfig.ConfigType = &tcp_filter.Filter_Config{Config: util.MessageToStruct(rbacConfig)}
}
rbacLog.Debugf("built tcp filter config: %v", tcpConfig)
return &tcpConfig
}
|
package queue
import "sync"
// Queue represents a ring buffer.
type Queue[T any] struct {
ringBuffer []T
read int
write int
capacity int
size int
mutex sync.Mutex
}
// New creates a new queue with the specified capacity.
func New[T any](capacity int) *Queue[T] {
return &Queue[T]{
ringBuffer: make([]T, capacity),
capacity: capacity,
}
}
// Size returns the size of the queue.
func (queue *Queue[T]) Size() int {
queue.mutex.Lock()
defer queue.mutex.Unlock()
return queue.size
}
// Capacity returns the capacity of the queue.
func (queue *Queue[T]) Capacity() int {
queue.mutex.Lock()
defer queue.mutex.Unlock()
return queue.capacity
}
// Offer adds an element to the queue and returns true.
// If the queue is full, it drops it and returns false.
func (queue *Queue[T]) Offer(element T) bool {
queue.mutex.Lock()
defer queue.mutex.Unlock()
if queue.size == queue.capacity {
return false
}
queue.ringBuffer[queue.write] = element
queue.write = (queue.write + 1) % queue.capacity
queue.size++
return true
}
// Poll returns and removes the oldest element in the queue and true if successful.
// If returns false if the queue is empty.
func (queue *Queue[T]) Poll() (element T, success bool) {
queue.mutex.Lock()
defer queue.mutex.Unlock()
if success = queue.size != 0; !success {
return
}
element = queue.ringBuffer[queue.read]
var emptyElement T
queue.ringBuffer[queue.read] = emptyElement
queue.read = (queue.read + 1) % queue.capacity
queue.size--
return
}
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
//
package kops
import (
"os"
"os/exec"
"path"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
const (
outputDirName = "output"
kubeConfigName = "kubeconfig"
)
// Cmd is the kops command to execute.
type Cmd struct {
kopsPath string
s3StateStore string
tempDir string
logger log.FieldLogger
}
// New creates a new instance of Cmd through which to execute kops.
func New(s3StateStore string, logger log.FieldLogger) (*Cmd, error) {
kopsPath, err := exec.LookPath("kops")
if err != nil {
return nil, errors.Wrap(err, "failed to find kops installed on your PATH")
}
tempDir, err := os.MkdirTemp("", "kops-")
if err != nil {
return nil, errors.Wrap(err, "failed to create temporary kops directory")
}
return &Cmd{
kopsPath: kopsPath,
s3StateStore: s3StateStore,
tempDir: tempDir,
logger: logger,
}, nil
}
// SetLogger sets a new logger for kops commands.
func (c *Cmd) SetLogger(logger log.FieldLogger) {
c.logger = logger
}
// GetTempDir returns the root temporary directory used by kops.
func (c *Cmd) GetTempDir() string {
return c.tempDir
}
// GetOutputDirectory returns the temporary output directory used by kops.
func (c *Cmd) GetOutputDirectory() string {
return path.Join(c.tempDir, outputDirName)
}
// GetKubeConfigPath returns the temporary kubeconfig directory used by kops.
func (c *Cmd) GetKubeConfigPath() string {
return path.Join(c.tempDir, kubeConfigName)
}
// Close cleans up the temporary output directory used by kops.
func (c *Cmd) Close() error {
return os.RemoveAll(c.tempDir)
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package scanner contains local Tast tests that exercise scanning
// functionality for ChromeOS.
package scanner
|
package main
// Stack is a FIFO stack of chatLines implemented as a ring buffer, so
// that it always keep the latest <size> messages.
type Stack struct {
items []chatLine
oldest int // oldest item in buffer
next int // next write mark
size int // current size of items
full bool // true when buffer is full
}
// newStack creates a new Stack queue of max size chatlines.
func newStack(size int) *Stack {
return &Stack{items: make([]chatLine, size)}
}
// Push an chatline intem to the queue.
func (q *Stack) Push(l chatLine) {
// assign item
q.items[q.next] = l
// increase size if still below max size
if !q.full {
q.size = q.size + 1
}
if q.full {
// move read marker ahead
if q.oldest < len(q.items)-1 {
q.oldest = q.oldest + 1
} else {
// or back to start if end is reached
q.oldest = 0
}
}
// move write mark
if q.next < len(q.items)-1 {
q.next = q.next + 1
} else {
// back to start of buffer
q.next = 0
q.full = true
}
}
// All returns all chatLine items, in cronological order.
func (q *Stack) All() []chatLine {
all := make([]chatLine, q.size)
if q.next >= q.next && !q.full {
copy(all, q.items[q.oldest:q.size])
} else {
copy(all, q.items[q.oldest:q.size])
copy(all[len(q.items[q.oldest:q.size]):q.size], q.items[0:q.next])
}
return all
}
|
package accesscontrol
import (
"context"
"net/http"
"github.com/corioders/gokit/errors"
"github.com/corioders/gokit/web"
"github.com/corioders/gokit/web/middleware/accesscontrol/role"
"gopkg.in/square/go-jose.v2"
"gopkg.in/square/go-jose.v2/jwt"
)
type loginOptions struct{}
type loginOption func(o *loginOptions)
type LoginAcceptHandler func(ctx context.Context, r *http.Request) (claims interface{}, roleGranted *role.Role, shouldLogin bool, err error)
var (
ErrNilLoginAcceptHandler = errors.New("Login accept handler (lah) cannot be nil")
ErrNilRoleGranted = errors.New("Login accept handler (lah) returned roleGranted=nil and shouldLogin=true, this is incorrect")
)
func (ac *Accesscontrol) NewLogin(lah LoginAcceptHandler, options ...loginOption) (web.Handler, error) {
if lah == nil {
return nil, errors.WithStack(ErrNilLoginAcceptHandler)
}
// Add stack when NewLogin is called so search for invalid lah is easy.
errNilRoleGranted := errors.WithStack(ErrNilRoleGranted)
return func(ctx context.Context, rw http.ResponseWriter, r *http.Request) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
claims, roleGranted, shouldLogin, err := lah(ctx, r)
if err != nil {
return errors.WithStack(err)
}
if !shouldLogin {
rw.WriteHeader(statusAccessDenied)
return nil
}
if roleGranted == nil {
// errNilRoleGranted is tied to lah so we want to make search for invalid lah as easy as possible.
return errNilRoleGranted
}
token, err := newLoginToken(ac.tokenSigner, ac.tokenEncrypter, &internalClaims{
UserClaims: &userClaims{value: claims},
Role: roleGranted,
})
if err != nil {
return errors.WithStack(err)
}
http.SetCookie(rw, &http.Cookie{
Name: ac.tokenCookieName,
Value: token,
})
return nil
}, nil
}
func newLoginToken(signer jose.Signer, encrypter jose.Encrypter, claims *internalClaims) (string, error) {
return jwt.SignedAndEncrypted(signer, encrypter).Claims(claims).CompactSerialize()
}
|
package models
type Currency struct {
ID int64
Name string
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.